diff --git a/brokerapi/brokers/name_generator/name_generator.go b/brokerapi/brokers/name_generator/name_generator.go index ec7bb71bc..d02f82a5d 100644 --- a/brokerapi/brokers/name_generator/name_generator.go +++ b/brokerapi/brokers/name_generator/name_generator.go @@ -56,8 +56,8 @@ func (bng *BasicNameGenerator) InstanceNameWithSeparator(sep string) string { return bng.newNameWithSeperator(sep) } -func (bng *SqlNameGenerator) InstanceName() string { - return bng.newNameWithSeperator("-") +func (sng *SqlNameGenerator) InstanceName() string { + return sng.newNameWithSeperator("-") } func (sng *SqlNameGenerator) DatabaseName() string { diff --git a/fakes/fake_env_vars.go b/fakes/fake_env_vars.go index c404e80c5..757c1691d 100644 --- a/fakes/fake_env_vars.go +++ b/fakes/fake_env_vars.go @@ -77,3 +77,71 @@ const Services string = `[ "tags": ["gcp", "ml"] } ]` + +const PreconfiguredPlans = `[ + { + "service_id": "b9e4332e-b42b-4680-bda5-ea1506797474", + "name": "standard", + "display_name": "Standard", + "description": "Standard storage class", + "features": {"storage_class": "STANDARD"} + }, + { + "service_id": "b9e4332e-b42b-4680-bda5-ea1506797474", + "name": "nearline", + "display_name": "Nearline", + "description": "Nearline storage class", + "features": {"storage_class": "NEARLINE"} + }, + { + "service_id": "b9e4332e-b42b-4680-bda5-ea1506797474", + "name": "reduced_availability", + "display_name": "Durable Reduced Availability", + "description": "Durable Reduced Availability storage class", + "features": {"storage_class": "DURABLE_REDUCED_AVAILABILITY"} + }, + { + "service_id": "628629e3-79f5-4255-b981-d14c6c7856be", + "name": "default", + "display_name": "Default", + "description": "PubSub Default plan", + "features": "" + }, + { "service_id": "f80c0a3e-bd4d-4809-a900-b4e33a6450f1", + "name": "default", + "display_name": "Default", + "description": "BigQuery default plan", + "features": "" + }, + { + "service_id": "5ad2dce0-51f7-4ede-8b46-293d6df1e8d4", + "name": "default", + "display_name": "Default", + "description": "Machine Learning api default plan", + "features": "" + } + ]` + +const TestCloudSQLPlan = `{ + "test_cloudsql_plan": { + "guid": "test_cloudsql_plan", + "name": "test_cloudsql_plan", + "description": "test-cloudsql-plan", + "tier": "D4", + "pricing_plan": "PER_USE", + "max_disk_size": "20", + "display_name": "test_cloudsql_plan", + "service": "4bc59b9a-8520-409f-85da-1c7552315863" + } + }` +const TestBigtablePlan = `{ + "test_bigtable_plan": { + "guid": "test_bigtable_plan", + "name": "test_bigtable_plan", + "description": "test-bigtable-plan", + "storage_type": "SSD", + "num_nodes": "3", + "display_name": "test_bigtable_plan", + "service": "b8e19880-ac58-42ef-b033-f7cd9c94d1fe" + } + }` diff --git a/fakes/fake_name_generator.go b/fakes/fake_name_generator.go index c1c7ddfdb..7705cc21b 100644 --- a/fakes/fake_name_generator.go +++ b/fakes/fake_name_generator.go @@ -15,3 +15,23 @@ func (sg *StaticNameGenerator) InstanceNameWithSeparator(sep string) string { func (sg *StaticNameGenerator) DatabaseName() string { return sg.Val } + +type StaticSQLNameGenerator struct { + StaticNameGenerator +} + +func (sng *StaticSQLNameGenerator) InstanceName() string { + return sng.Val +} + +func (sng *StaticSQLNameGenerator) DatabaseName() string { + return sng.Val +} + +func (sng *StaticSQLNameGenerator) GenerateUsername(instanceID, bindingID string) (string, error) { + return sng.Val[:16], nil +} + +func (sng *StaticSQLNameGenerator) GeneratePassword() (string, error) { + return sng.Val, nil +} diff --git a/glide.lock b/glide.lock index 677f7132d..67b7f141b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,8 @@ -hash: 47433d0284bcdacaddfafc7e0861c6372d61907e5c25bcd3616e3ed29aa22222 -updated: 2017-02-03T11:20:55.413887464-08:00 +hash: 65bc06e5fe15156aa97a1826d7fc56f6fa1391b72ae0e5bc9a5687191ab00ddc +updated: 2017-03-03T15:47:24.224813167-08:00 imports: - name: cloud.google.com/go - version: 3258e6905e9694db9bcd41910c65ecc30ae6dbbe + version: 78582c9da1f74d3e1e999e675923bd17d55e0639 subpackages: - bigquery - bigtable @@ -12,16 +12,19 @@ imports: - iam - internal - internal/optional + - internal/version - longrunning - pubsub - pubsub/apiv1 + - spanner/... + - spanner/admin/instance/apiv1 - storage - name: code.cloudfoundry.org/lager - version: 6cfe365fb6bb84343f0034b4a53cd250d173b61f + version: de8e9c6c6e474e5e3668aea1a9817bdb4ceeceb0 - name: github.com/go-sql-driver/mysql version: a0583e0143b1624142adab07e0e97fe106d99561 - name: github.com/golang/protobuf - version: 8ee79997227bf9b34611aee7946ae64735e6fd93 + version: 69b215d01a5606c843240eab4937eab3acee6530 subpackages: - proto - protoc-gen-go/descriptor @@ -46,9 +49,11 @@ imports: - name: github.com/leonelquinteros/gorand version: aac5da62f38c2d2b492bee83183ca80dd209b875 - name: github.com/mattn/go-sqlite3 - version: ce9149a3c941c30de51a01dbc5bc414ddaa52927 + version: acf4ae44299454c98d3a39ea15d444b5a6708710 +- name: github.com/pivotal-golang/lager + version: de8e9c6c6e474e5e3668aea1a9817bdb4ceeceb0 - name: golang.org/x/net - version: 007e530097ad7f954752df63046b4036f98ba6a6 + version: d379faa25cbdc04d653984913a2ceb43b0bc46d7 subpackages: - context - context/ctxhttp @@ -59,14 +64,14 @@ imports: - lex/httplex - trace - name: golang.org/x/oauth2 - version: 314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd + version: efb10a30610e617dbb17fc243f4cc61a8cfa2903 subpackages: - google - internal - jws - jwt - name: google.golang.org/api - version: c55f685368bb1cdfed2c20283e2878a83b929730 + version: f786854525c2e5b0b49c2a301b0ff076d2ae20df subpackages: - bigquery/v2 - cloudresourcemanager/v1 @@ -83,7 +88,7 @@ imports: - support/bundler - transport - name: google.golang.org/appengine - version: a2c54d2174c17540446e0ced57d9d459af61bc1c + version: 5403c08c6e8fb3b2dc1209d2d833d8e8ac8240de subpackages: - internal - internal/app_identity @@ -97,17 +102,26 @@ imports: - socket - urlfetch - name: google.golang.org/genproto - version: b3e7c2fb04031add52c4817f53f43757ccbf9c18 + version: 1e95789587db7d93ebbaa5eb65da17d3dbf8ab64 subpackages: - googleapis/api/annotations + - googleapis/api/label + - googleapis/api/metric + - googleapis/api/monitoredres + - googleapis/api/serviceconfig - googleapis/bigtable/admin/v2 - googleapis/bigtable/v2 - googleapis/iam/v1 - googleapis/longrunning - googleapis/pubsub/v1 - googleapis/rpc/status + - googleapis/spanner/admin/instance/v1 + - protobuf/api + - protobuf/field_mask + - protobuf/ptype + - protobuf/source_context - name: google.golang.org/grpc - version: 21f8ed309495401e6fd79b3a9fd549582aed1b4c + version: 1dab93372523195731c738b0f0cb4e452228e959 subpackages: - codes - credentials diff --git a/glide.yaml b/glide.yaml index f70d10d26..3c773a043 100644 --- a/glide.yaml +++ b/glide.yaml @@ -29,6 +29,8 @@ import: - bigtable - bigquery - storage + - spanner/admin/instance/apiv1 + - spanner/... testImport: - package: github.com/onsi/ginkgo version: ~1.2.0 diff --git a/integration_tests/async_integration_test.go b/integration_tests/async_integration_test.go new file mode 100644 index 000000000..cc7618759 --- /dev/null +++ b/integration_tests/async_integration_test.go @@ -0,0 +1,179 @@ +// Copyright the Service Broker Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//////////////////////////////////////////////////////////////////////////////// +// + +package integration_tests + +import ( + "code.cloudfoundry.org/lager" + "fmt" + "gcp-service-broker/brokerapi/brokers" + . "gcp-service-broker/brokerapi/brokers" + "gcp-service-broker/brokerapi/brokers/models" + "gcp-service-broker/brokerapi/brokers/name_generator" + "gcp-service-broker/db_service" + "gcp-service-broker/fakes" + "github.com/jinzhu/gorm" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + googlecloudsql "google.golang.org/api/sqladmin/v1beta4" + "os" + "time" +) + +func pollForMaxFiveMins(gcpb *GCPAsyncServiceBroker, instanceId string) error { + var err error + timeout := time.After(5 * time.Minute) + tick := time.Tick(30 * time.Second) + + // Keep trying until we're timed out or got a result or got an error + for { + select { + case <-timeout: + return err + case <-tick: + done, err := gcpb.LastOperation(instanceId) + if err != nil { + return err + } else if done.State == models.Succeeded { + return nil + } + } + } +} + +var _ = Describe("AsyncIntegrationTests", func() { + var ( + gcpBroker *GCPAsyncServiceBroker + err error + logger lager.Logger + serviceNameToId map[string]string = make(map[string]string) + serviceNameToPlanId map[string]string = make(map[string]string) + instance_name string + cloudsqlInstanceName string + ) + + BeforeEach(func() { + logger = lager.NewLogger("brokers_test") + logger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG)) + + testDb, _ := gorm.Open("sqlite3", "test.db") + testDb.CreateTable(models.ServiceInstanceDetails{}) + testDb.CreateTable(models.ServiceBindingCredentials{}) + testDb.CreateTable(models.PlanDetails{}) + testDb.CreateTable(models.ProvisionRequestDetails{}) + testDb.CreateTable(models.CloudOperation{}) + + db_service.DbConnection = testDb + + os.Setenv("SECURITY_USER_NAME", "username") + os.Setenv("SECURITY_USER_PASSWORD", "password") + os.Setenv("SERVICES", fakes.Services) + os.Setenv("PRECONFIGURED_PLANS", fakes.PreconfiguredPlans) + + os.Setenv("CLOUDSQL_CUSTOM_PLANS", fakes.TestCloudSQLPlan) + os.Setenv("BIGTABLE_CUSTOM_PLANS", fakes.TestBigtablePlan) + + var creds models.GCPCredentials + creds, err = brokers.GetCredentialsFromEnv() + if err != nil { + logger.Error("error", err) + } + instance_name = generateInstanceName(creds.ProjectId, "-") + cloudsqlInstanceName = fmt.Sprintf("pcf-sb-test-%d", time.Now().UnixNano()) + name_generator.Basic = &fakes.StaticNameGenerator{Val: instance_name} + name_generator.Sql = &fakes.StaticSQLNameGenerator{ + StaticNameGenerator: fakes.StaticNameGenerator{Val: cloudsqlInstanceName}, + } + + gcpBroker, err = brokers.New(logger) + if err != nil { + logger.Error("error", err) + } + + for _, service := range *gcpBroker.Catalog { + serviceNameToId[service.Name] = service.ID + serviceNameToPlanId[service.Name] = service.Plans[0].ID + } + }) + + Describe("Cloud SQL", func() { + + var dbService *googlecloudsql.InstancesService + var sslService *googlecloudsql.SslCertsService + BeforeEach(func() { + sqlService, err := googlecloudsql.New(gcpBroker.GCPClient) + Expect(err).NotTo(HaveOccurred()) + dbService = googlecloudsql.NewInstancesService(sqlService) + sslService = googlecloudsql.NewSslCertsService(sqlService) + }) + + It("can provision/bind/unbind/deprovision", func() { + provisionDetails := models.ProvisionDetails{ + ServiceID: serviceNameToId[models.CloudsqlName], + PlanID: serviceNameToPlanId[models.CloudsqlName], + } + _, err = gcpBroker.Provision("integration_test_instance", provisionDetails, true) + Expect(err).NotTo(HaveOccurred()) + pollForMaxFiveMins(gcpBroker, "integration_test_instance") + + var count int + db_service.DbConnection.Model(&models.ServiceInstanceDetails{}).Where("id = ?", "integration_test_instance").Count(&count) + Expect(count).To(Equal(1)) + + clouddb, err := dbService.Get(gcpBroker.RootGCPCredentials.ProjectId, cloudsqlInstanceName).Do() + Expect(err).NotTo(HaveOccurred()) + Expect(clouddb.Name).To(Equal(cloudsqlInstanceName)) + + bindDetails := models.BindDetails{ + ServiceID: serviceNameToId[models.CloudsqlName], + PlanID: serviceNameToPlanId[models.CloudsqlName], + } + creds, err := gcpBroker.Bind("integration_test_instance", "binding_id", bindDetails) + Expect(err).NotTo(HaveOccurred()) + credsMap := creds.Credentials.(map[string]string) + + Expect(credsMap["Username"]).ToNot(Equal("")) + _, err = sslService.Get(gcpBroker.RootGCPCredentials.ProjectId, cloudsqlInstanceName, credsMap["Sha1Fingerprint"]).Do() + Expect(err).NotTo(HaveOccurred()) + + unBindDetails := models.UnbindDetails{ + ServiceID: serviceNameToId[models.CloudsqlName], + PlanID: serviceNameToPlanId[models.CloudsqlName], + } + err = gcpBroker.Unbind("integration_test_instance", "binding_id", unBindDetails) + Expect(err).NotTo(HaveOccurred()) + certsList, err := sslService.List(gcpBroker.RootGCPCredentials.ProjectId, cloudsqlInstanceName).Do() + Expect(len(certsList.Items)).To(Equal(0)) + + deprovisionDetails := models.DeprovisionDetails{ + ServiceID: serviceNameToId[models.CloudsqlName], + PlanID: serviceNameToPlanId[models.CloudsqlName], + } + _, err = gcpBroker.Deprovision("integration_test_instance", deprovisionDetails, true) + Expect(err).NotTo(HaveOccurred()) + pollForMaxFiveMins(gcpBroker, "integration_test_instance") + _, err = dbService.Get(gcpBroker.RootGCPCredentials.ProjectId, cloudsqlInstanceName).Do() + Expect(err).To(HaveOccurred()) + }) + + }) + + AfterEach(func() { + os.Remove(models.AppCredsFileName) + os.Remove("test.db") + }) +}) diff --git a/integration_tests/integration_test.go b/integration_tests/integration_test.go index 16c784184..3f0922f20 100644 --- a/integration_tests/integration_test.go +++ b/integration_tests/integration_test.go @@ -308,30 +308,8 @@ var _ = Describe("LiveIntegrationTests", func() { } ]`) - os.Setenv("CLOUDSQL_CUSTOM_PLANS", `{ - "test_cloudsql_plan": { - "guid": "foo", - "name": "bar", - "description": "test-cloudsql-plan", - "tier": "D4", - "pricing_plan": "PER_USE", - "max_disk_size": "20", - "display_name": "FOOBAR", - "service": "4bc59b9a-8520-409f-85da-1c7552315863" - } - }`) - - os.Setenv("BIGTABLE_CUSTOM_PLANS", `{ - "test_bigtable_plan": { - "guid": "foo2", - "name": "bar2", - "description": "test-bigtable-plan", - "storage_type": "SSD", - "num_nodes": "3", - "display_name": "FOOBAR2", - "service": "b8e19880-ac58-42ef-b033-f7cd9c94d1fe" - } - }`) + os.Setenv("CLOUDSQL_CUSTOM_PLANS", fakes.TestCloudSQLPlan) + os.Setenv("BIGTABLE_CUSTOM_PLANS", fakes.TestBigtablePlan) var creds models.GCPCredentials creds, err = brokers.GetCredentialsFromEnv() diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index e81780d25..2b4bac995 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -37,28 +37,30 @@ run the against the actual APIs. - **GCLOUD_TESTS_API_KEY**: Your API key. Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create the indexes used in the datastore integration tests with indexes -found in `datastore/testdata/index.yaml`: +to create some resources used in integration tests. From the project's root directory: ``` sh -# Set the default project in your env +# Set the default project in your env. $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID -# Authenticate the gcloud tool with your account +# Authenticate the gcloud tool with your account. $ gcloud auth login -# Create the indexes +# Create the indexes used in the datastore integration tests. $ gcloud preview datastore create-indexes datastore/testdata/index.yaml -``` -The Sink integration tests in preview/logging require a Google Cloud storage -bucket with the same name as your test project, and with the Stackdriver Logging -service account as owner: -``` sh +# Create a Google Cloud storage bucket with the same name as your test project, +# and with the Stackdriver Logging service account as owner, for the sink +# integration tests in logging. $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Create a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to delete +# the instance after testing with 'gcloud beta spanner instances delete'. ``` Once you've set the environment variables, you can run the integration tests by diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 7d8d64c50..11fb0aa84 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -29,10 +29,19 @@ backwards-incompatible changes. * [Cloud Pub/Sub](#cloud-pub-sub-) * [Cloud BigQuery](#cloud-bigquery-) * [Stackdriver Logging](#stackdriver-logging-) + * [Cloud Spanner](#cloud-spanner-) ## News +_February 14, 2017_ + +Release of a client library for Spanner. See +the +[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). + +Note that although the Spanner service is beta, the Go client library is alpha. + _December 12, 2016_ Beta release of BigQuery, DataStore, Logging and Storage. See the @@ -172,6 +181,7 @@ Google API | Status | Package [Vision][cloud-vision] | alpha | [`cloud.google.com/go/vision`][cloud-vision-ref] [Language][cloud-language] | alpha | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] [Speech][cloud-speech] | alpha | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref] +[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref] > **Alpha status**: the API is still being actively developed. As a @@ -430,6 +440,41 @@ if err != nil { } ``` + +## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) + +- [About Cloud Spanner][cloud-spanner] +- [API documentation][cloud-spanner-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) + +### Example Usage + +First create a `spanner.Client` to use throughout your application: + +```go +client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") +if err != nil { + log.Fatal(err) +} +``` + +```go +// Simple Reads And Writes +_, err := client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) +if err != nil { + log.Fatal(err) +} +row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) +if err != nil { + log.Fatal(err) +} +``` + + ## Contributing Contributions are welcome. Please, see the @@ -476,4 +521,8 @@ for more information. [cloud-speech]: https://cloud.google.com/speech [cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1 +[cloud-spanner]: https://cloud.google.com/spanner/ +[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner +[cloud-spanner-docs]: https://cloud.google.com/spanner/docs + [default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/authexample_test.go b/vendor/cloud.google.com/go/authexample_test.go index 528be133b..fe75467f9 100644 --- a/vendor/cloud.google.com/go/authexample_test.go +++ b/vendor/cloud.google.com/go/authexample_test.go @@ -21,40 +21,29 @@ import ( ) func Example_applicationDefaultCredentials() { - ctx := context.Background() - // Use Google Application Default Credentials to authorize and authenticate the client. - // More information about Application Default Credentials and how to enable is at - // https://developers.google.com/identity/protocols/application-default-credentials. - // - // This is the recommended way of authorizing and authenticating. + // Google Application Default Credentials is the recommended way to authorize + // and authenticate clients. // - // Note: The example uses the datastore client, but the same steps apply to - // the other client libraries underneath this package. - client, err := datastore.NewClient(ctx, "project-id") + // See the following link on how to create and obtain Application Default Credentials: + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := datastore.NewClient(context.Background(), "project-id") if err != nil { // TODO: handle error. } - // Use the client. - _ = client + _ = client // Use the client. } func Example_serviceAccountFile() { - // Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS - // and use the Application Default Credentials. - ctx := context.Background() // Use a JSON key file associated with a Google service account to - // authenticate and authorize. - // Go to https://console.developers.google.com/permissions/serviceaccounts to create - // and download a service account key for your project. + // authenticate and authorize. Service Account keys can be created and + // downloaded from https://console.developers.google.com/permissions/serviceaccounts. // - // Note: The example uses the datastore client, but the same steps apply to + // Note: This example uses the datastore client, but the same steps apply to // the other client libraries underneath this package. - client, err := datastore.NewClient(ctx, - "project-id", - option.WithServiceAccountFile("/path/to/service-account-key.json")) + client, err := datastore.NewClient(context.Background(), + "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) if err != nil { // TODO: handle error. } - // Use the client. - _ = client + _ = client // Use the client. } diff --git a/vendor/cloud.google.com/go/bigquery/doc.go b/vendor/cloud.google.com/go/bigquery/doc.go index 996cd3b2f..f644792bc 100644 --- a/vendor/cloud.google.com/go/bigquery/doc.go +++ b/vendor/cloud.google.com/go/bigquery/doc.go @@ -286,5 +286,10 @@ Extractor, then optionally configure it, and lastly call its Run method. extractor.DisableHeader = true job, err = extractor.Run(ctx) // Poll the job for completion if desired, as above. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. */ package bigquery // import "cloud.google.com/go/bigquery" diff --git a/vendor/cloud.google.com/go/bigquery/integration_test.go b/vendor/cloud.google.com/go/bigquery/integration_test.go index 3fbf87bc0..fd215c8b3 100644 --- a/vendor/cloud.google.com/go/bigquery/integration_test.go +++ b/vendor/cloud.google.com/go/bigquery/integration_test.go @@ -274,7 +274,7 @@ func TestIntegration_UploadAndRead(t *testing.T) { }) } if err := upl.Put(ctx, saverRows); err != nil { - t.Fatal(err) + t.Fatal(putError(err)) } // Wait until the data has been uploaded. This can take a few seconds, according @@ -355,7 +355,13 @@ type TestStruct struct { Subs []*Sub } -type Sub struct{ B bool } +type Sub struct { + B bool + SubSub SubSub + SubSubs []*SubSub +} + +type SubSub struct{ Count int } func TestIntegration_UploadAndReadStructs(t *testing.T) { if client == nil { @@ -365,24 +371,29 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Populate the table. upl := table.Uploader() - structs := []*TestStruct{ - {Name: "a", Nums: []int{1, 2}, Sub: Sub{B: true}, Subs: []*Sub{{false}, {true}}}, - {Name: "b", Nums: []int{1}, Subs: []*Sub{{false}, nil, {true}}}, - nil, + want := []*TestStruct{ + {Name: "a", Nums: []int{1, 2}, Sub: Sub{B: true}, Subs: []*Sub{{B: false}, {B: true}}}, + {Name: "b", Nums: []int{1}, Subs: []*Sub{{B: false}, {B: false}, {B: true}}}, {Name: "c", Sub: Sub{B: true}}, + { + Name: "d", + Sub: Sub{SubSub: SubSub{12}, SubSubs: []*SubSub{{1}, {2}, {3}}}, + Subs: []*Sub{{B: false, SubSub: SubSub{4}}, {B: true, SubSubs: []*SubSub{{5}, {6}}}}, + }, } var savers []*StructSaver - for _, s := range structs { + for _, s := range want { savers = append(savers, &StructSaver{Schema: schema, Struct: s}) } if err := upl.Put(ctx, savers); err != nil { - t.Fatal(err) + t.Fatal(putError(err)) } // Wait until the data has been uploaded. This can take a few seconds, according @@ -407,13 +418,7 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) { } sort.Sort(byName(got)) - // BigQuery elides nils, both at top level and in nested structs. - // This may be surprising, but the client library is faithfully - // rendering these nils into JSON, so we should not change it. - // structs[1].Subs[1] and structs[2] are nil. - want := []*TestStruct{structs[0], structs[1], structs[3]} - want[1].Subs = []*Sub{want[1].Subs[0], want[1].Subs[2]} - + // BigQuery does not elide nils. It reports an error for nil fields. for i, g := range got { if i >= len(want) { t.Errorf("%d: got %v, past end of want", i, pretty.Value(g)) @@ -601,20 +606,22 @@ func TestIntegration_TimeTypes(t *testing.T) { {Name: "d", Type: DateFieldType}, {Name: "t", Type: TimeFieldType}, {Name: "dt", Type: DateTimeFieldType}, + {Name: "ts", Type: TimestampFieldType}, } table := newTable(t, dtSchema) defer table.Delete(ctx) d := civil.Date{2016, 3, 20} tm := civil.Time{12, 30, 0, 0} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) wantRows := [][]Value{ - []Value{d, tm, civil.DateTime{d, tm}}, + []Value{d, tm, civil.DateTime{d, tm}, ts}, } upl := table.Uploader() if err := upl.Put(ctx, []*ValuesSaver{ {Schema: dtSchema, Row: wantRows[0]}, }); err != nil { - t.Fatal(err) + t.Fatal(putError(err)) } if err := waitForRow(ctx, table); err != nil { t.Fatal(err) @@ -622,9 +629,9 @@ func TestIntegration_TimeTypes(t *testing.T) { // SQL wants DATETIMEs with a space between date and time, but the service // returns them in RFC3339 form, with a "T" between. - query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt) "+ - "VALUES ('%s', '%s', '%s %s')", - table.TableID, d, tm, d, tm) + query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+ + "VALUES ('%s', '%s', '%s %s', '%s')", + table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05")) q := client.Query(query) q.UseStandardSQL = true // necessary for DML job, err := q.Run(ctx) @@ -733,3 +740,15 @@ func waitForRow(ctx context.Context, table *Table) error { time.Sleep(1 * time.Second) } } + +func putError(err error) string { + pme, ok := err.(PutMultiError) + if !ok { + return err.Error() + } + var msgs []string + for _, err := range pme { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "\n") +} diff --git a/vendor/cloud.google.com/go/bigquery/params.go b/vendor/cloud.google.com/go/bigquery/params.go index f36169252..2b97f3a2c 100644 --- a/vendor/cloud.google.com/go/bigquery/params.go +++ b/vendor/cloud.google.com/go/bigquery/params.go @@ -49,7 +49,7 @@ func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{} return "", true, nil, nil } -var fieldCache = fields.NewCache(bqTagParser, nil) +var fieldCache = fields.NewCache(bqTagParser, nil, nil) var ( int64ParamType = &bq.QueryParameterType{Type: "INT64"} diff --git a/vendor/cloud.google.com/go/bigquery/schema.go b/vendor/cloud.google.com/go/bigquery/schema.go index 4d0da28b8..d6d88a5d5 100644 --- a/vendor/cloud.google.com/go/bigquery/schema.go +++ b/vendor/cloud.google.com/go/bigquery/schema.go @@ -140,7 +140,7 @@ var typeOfByteSlice = reflect.TypeOf([]byte{}) // // Recursively defined structs are also disallowed. func InferSchema(st interface{}) (Schema, error) { - return inferSchemaReflect(reflect.TypeOf(st)) + return inferSchemaReflectCached(reflect.TypeOf(st)) } var schemaCache atomiccache.Cache @@ -150,19 +150,26 @@ type cacheVal struct { err error } -func inferSchemaReflect(t reflect.Type) (Schema, error) { +func inferSchemaReflectCached(t reflect.Type) (Schema, error) { cv := schemaCache.Get(t, func() interface{} { - s, err := inferStruct(t, map[reflect.Type]bool{}) + s, err := inferSchemaReflect(t) return cacheVal{s, err} }).(cacheVal) return cv.schema, cv.err } -func inferStruct(t reflect.Type, seen map[reflect.Type]bool) (Schema, error) { - if seen[t] { +func inferSchemaReflect(t reflect.Type) (Schema, error) { + rec, err := hasRecursiveType(t, nil) + if err != nil { + return nil, err + } + if rec { return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t) } - seen[t] = true + return inferStruct(t) +} + +func inferStruct(t reflect.Type) (Schema, error) { switch t.Kind() { case reflect.Ptr: if t.Elem().Kind() != reflect.Struct { @@ -172,14 +179,14 @@ func inferStruct(t reflect.Type, seen map[reflect.Type]bool) (Schema, error) { fallthrough case reflect.Struct: - return inferFields(t, seen) + return inferFields(t) default: return nil, errNoStruct } } // inferFieldSchema infers the FieldSchema for a Go type -func inferFieldSchema(rt reflect.Type, seen map[reflect.Type]bool) (*FieldSchema, error) { +func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) { switch rt { case typeOfByteSlice: return &FieldSchema{Required: true, Type: BytesFieldType}, nil @@ -203,7 +210,7 @@ func inferFieldSchema(rt reflect.Type, seen map[reflect.Type]bool) (*FieldSchema return nil, errUnsupportedFieldType } - f, err := inferFieldSchema(et, seen) + f, err := inferFieldSchema(et) if err != nil { return nil, err } @@ -211,7 +218,7 @@ func inferFieldSchema(rt reflect.Type, seen map[reflect.Type]bool) (*FieldSchema f.Required = false return f, nil case reflect.Struct, reflect.Ptr: - nested, err := inferStruct(rt, seen) + nested, err := inferStruct(rt) if err != nil { return nil, err } @@ -228,14 +235,14 @@ func inferFieldSchema(rt reflect.Type, seen map[reflect.Type]bool) (*FieldSchema } // inferFields extracts all exported field types from struct type. -func inferFields(rt reflect.Type, seen map[reflect.Type]bool) (Schema, error) { +func inferFields(rt reflect.Type) (Schema, error) { var s Schema fields, err := fieldCache.Fields(rt) if err != nil { return nil, err } for _, field := range fields { - f, err := inferFieldSchema(field.Type, seen) + f, err := inferFieldSchema(field.Type) if err != nil { return nil, err } @@ -256,3 +263,50 @@ func isSupportedIntType(t reflect.Type) bool { return false } } + +// typeList is a linked list of reflect.Types. +type typeList struct { + t reflect.Type + next *typeList +} + +func (l *typeList) has(t reflect.Type) bool { + for l != nil { + if l.t == t { + return true + } + l = l.next + } + return false +} + +// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly, +// via exported fields. (Schema inference ignores unexported fields.) +func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return false, nil + } + if seen.has(t) { + return true, nil + } + fields, err := fieldCache.Fields(t) + if err != nil { + return false, err + } + seen = &typeList{t, seen} + // Because seen is a linked list, additions to it from one field's + // recursive call will not affect the value for subsequent fields' calls. + for _, field := range fields { + ok, err := hasRecursiveType(field.Type, seen) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + return false, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/schema_test.go b/vendor/cloud.google.com/go/bigquery/schema_test.go index c3eef556f..88e3dda93 100644 --- a/vendor/cloud.google.com/go/bigquery/schema_test.go +++ b/vendor/cloud.google.com/go/bigquery/schema_test.go @@ -306,13 +306,13 @@ func TestSimpleInference(t *testing.T) { }, }, } - for i, tc := range testCases { + for _, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { - t.Fatalf("%d: error inferring TableSchema: %v", i, err) + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) } if !reflect.DeepEqual(got, tc.want) { - t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, pretty.Value(got), pretty.Value(tc.want)) } } @@ -339,6 +339,10 @@ type ptrNested struct { Ptr *struct{ Inside int } } +type dup struct { // more than one field of the same struct type + A, B allBoolean +} + func TestNestedInference(t *testing.T) { testCases := []struct { in interface{} @@ -386,15 +390,32 @@ func TestNestedInference(t *testing.T) { }, }, }, + { + in: dup{}, + want: Schema{ + &FieldSchema{ + Name: "A", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + &FieldSchema{ + Name: "B", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + }, + }, } - for i, tc := range testCases { + for _, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { - t.Fatalf("%d: error inferring TableSchema: %v", i, err) + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) } if !reflect.DeepEqual(got, tc.want) { - t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, pretty.Value(got), pretty.Value(tc.want)) } } @@ -730,3 +751,42 @@ func TestSchemaErrors(t *testing.T) { } } } + +func TestHasRecursiveType(t *testing.T) { + type ( + nonStruct int + nonRec struct{ A string } + dup struct{ A, B nonRec } + rec struct { + A int + B *rec + } + recUnexported struct { + A int + b *rec + } + hasRec struct { + A int + R *rec + } + ) + for _, test := range []struct { + in interface{} + want bool + }{ + {nonStruct(0), false}, + {nonRec{}, false}, + {dup{}, false}, + {rec{}, true}, + {recUnexported{}, false}, + {hasRec{}, true}, + } { + got, err := hasRecursiveType(reflect.TypeOf(test.in), nil) + if err != nil { + t.Fatal(err) + } + if got != test.want { + t.Errorf("%T: got %t, want %t", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/value.go b/vendor/cloud.google.com/go/bigquery/value.go index 46f626a37..509853a8c 100644 --- a/vendor/cloud.google.com/go/bigquery/value.go +++ b/vendor/cloud.google.com/go/bigquery/value.go @@ -485,9 +485,19 @@ func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) // structFieldToUploadValue is careful to return a true nil interface{} when needed, so its // caller can easily identify a nil value. func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) { - // A non-nested field, repeated or not, can be represented by its Go value. + if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) { + return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", + schemaField.Name, vfield.Type()) + } + + // A non-nested field can be represented by its Go value. if schemaField.Type != RecordFieldType { - return vfield.Interface(), nil + if !schemaField.Repeated || vfield.Len() > 0 { + return vfield.Interface(), nil + } + // The service treats a null repeated field as an error. Return + // nil to omit the field entirely. + return nil, nil } // A non-repeated nested field is converted into a map[string]Value. if !schemaField.Repeated { @@ -501,10 +511,6 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i return m, nil } // A repeated nested field is converted into a slice of maps. - if vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array { - return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", - schemaField.Name, vfield.Type()) - } if vfield.Len() == 0 { return nil, nil } @@ -618,7 +624,7 @@ func convertBasicType(val string, typ FieldType) (Value, error) { return strconv.ParseBool(val) case TimestampFieldType: f, err := strconv.ParseFloat(val, 64) - return Value(time.Unix(0, int64(f*1e9))), err + return Value(time.Unix(0, int64(f*1e9)).UTC()), err case DateFieldType: return civil.ParseDate(val) case TimeFieldType: diff --git a/vendor/cloud.google.com/go/bigquery/value_test.go b/vendor/cloud.google.com/go/bigquery/value_test.go index b1e2d8779..01accd226 100644 --- a/vendor/cloud.google.com/go/bigquery/value_test.go +++ b/vendor/cloud.google.com/go/bigquery/value_test.go @@ -73,6 +73,9 @@ func TestConvertTime(t *testing.T) { if !got[0].(time.Time).Equal(thyme) { t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme) } + if got[0].(time.Time).Location() != time.UTC { + t.Errorf("expected time zone UTC: got:\n%v", got) + } } func TestConvertNullValues(t *testing.T) { @@ -474,10 +477,7 @@ func TestStructSaver(t *testing.T) { } check("all values", in, want) check("all values, ptr", &in, want) - check("empty struct", T{}, map[string]Value{ - "s": "", - "r": []int(nil), - }) + check("empty struct", T{}, map[string]Value{"s": ""}) // Missing and extra fields ignored. type T2 struct { @@ -490,7 +490,6 @@ func TestStructSaver(t *testing.T) { check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}}, map[string]Value{ "s": "", - "r": []int(nil), "rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}}, }) } diff --git a/vendor/cloud.google.com/go/bigtable/bigtable_test.go b/vendor/cloud.google.com/go/bigtable/bigtable_test.go index 8d54a59a2..b3257659e 100644 --- a/vendor/cloud.google.com/go/bigtable/bigtable_test.go +++ b/vendor/cloud.google.com/go/bigtable/bigtable_test.go @@ -20,7 +20,6 @@ import ( "fmt" "math/rand" "reflect" - "sort" "strings" "sync" "testing" @@ -157,9 +156,6 @@ func TestClientIntegration(t *testing.T) { {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, }, } - for _, ris := range row { - sort.Sort(byColumn(ris)) - } if !reflect.DeepEqual(row, wantRow) { t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) } @@ -173,7 +169,7 @@ func TestClientIntegration(t *testing.T) { limit ReadOption // may be nil // We do the read, grab all the cells, turn them into "--", - // sort that list, and join with a comma. + // and join with a comma. want string }{ { @@ -294,6 +290,24 @@ func TestClientIntegration(t *testing.T) { filter: ValueRangeFilter([]byte("3"), []byte("5")), // matches nothing want: "", }, + { + desc: "read with InterleaveFilter, no matches on all filters", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*")), + want: "", + }, + { + desc: "read with InterleaveFilter, no duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*j.*")), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InterleaveFilter, with duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")), + want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1", + }, } for _, tc := range readTests { var opts []ReadOption @@ -316,7 +330,6 @@ func TestClientIntegration(t *testing.T) { t.Errorf("%s: %v", tc.desc, err) continue } - sort.Strings(elt) if got := strings.Join(elt, ","); got != tc.want { t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) } @@ -337,7 +350,6 @@ func TestClientIntegration(t *testing.T) { t.Errorf("read RowList: %v", err) } - sort.Strings(elt) if got := strings.Join(elt, ","); got != want { t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want) } @@ -491,6 +503,37 @@ func TestClientIntegration(t *testing.T) { if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow) } + // Delete non-existing cells, no such column family in this row + // Should not delete anything + if err := adminClient.CreateColumnFamily(ctx, table, "non-existing"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + mut = NewMutation() + mut.DeleteTimestampRange("non-existing", "col", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } + // Delete non-existing cells, no such column in this column family + // Should not delete anything + mut = NewMutation() + mut.DeleteTimestampRange("ts", "non-existing", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } // Delete the cell with timestamp 2000 and repeat the last read, // checking that we get ts 3000 and ts 1000. mut = NewMutation() @@ -511,7 +554,7 @@ func TestClientIntegration(t *testing.T) { } checkpoint("tested multiple versions in a cell") - // Check DeleteColumnFamily + // Check DeleteCellsInFamily if err := adminClient.CreateColumnFamily(ctx, table, "status"); err != nil { t.Fatalf("Creating column family: %v", err) } @@ -555,12 +598,85 @@ func TestClientIntegration(t *testing.T) { {Row: "row2", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, }, "status": []ReadItem{ - {Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")}, {Row: "row2", Column: "status:end", Timestamp: 0, Value: []byte("2")}, + {Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")}, }, } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow) + } checkpoint("tested family delete") + // Check DeleteCellsInColumn + mut = NewMutation() + mut.Set("status", "start", 0, []byte("1")) + mut.Set("status", "middle", 0, []byte("2")) + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "middle") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + {Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")}, + }, + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "start") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + }, + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "end") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if len(r) != 0 { + t.Errorf("Delete column: got %v, want empty row", r) + } + // Add same cell after delete + mut = NewMutation() + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow) + } + checkpoint("tested column delete") + // Do highly concurrent reads/writes. // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. const maxConcurrency = 100 @@ -688,9 +804,6 @@ func TestClientIntegration(t *testing.T) { if err != nil { t.Fatalf("Reading a bulk row: %v", err) } - for _, ris := range row { - sort.Sort(byColumn(ris)) - } var wantItems []ReadItem for _, val := range ss { wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) @@ -732,12 +845,6 @@ func fill(b, sub []byte) { } } -type byColumn []ReadItem - -func (b byColumn) Len() int { return len(b) } -func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } - func clearTimestamps(r Row) { for _, ris := range r { for i := range ris { diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go index a4b55ccef..89717db1a 100644 --- a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go @@ -187,8 +187,10 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu } newcf := &columnFamily{ name: req.Name + "/columnFamilies/" + mod.Id, + order: tbl.counter, gcRule: create.GcRule, } + tbl.counter++ tbl.families[mod.Id] = newcf } else if mod.GetDrop() { if _, ok := tbl.families[mod.Id]; !ok { @@ -358,21 +360,23 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) ( } rrr := &btpb.ReadRowsResponse{} - for col, cells := range r.cells { - i := strings.Index(col, ":") // guaranteed to exist - fam, col := col[:i], col[i+1:] - if len(cells) == 0 { - continue - } - // TODO(dsymonds): Apply transformers. - for _, cell := range cells { - rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{ - RowKey: []byte(r.key), - FamilyName: &wrappers.StringValue{Value: fam}, - Qualifier: &wrappers.BytesValue{Value: []byte(col)}, - TimestampMicros: cell.ts, - Value: cell.value, - }) + families := r.sortedFamilies() + for _, fam := range families { + for _, colName := range fam.colNames { + cells := fam.cells[colName] + if len(cells) == 0 { + continue + } + // TODO(dsymonds): Apply transformers. + for _, cell := range cells { + rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(r.key), + FamilyName: &wrappers.StringValue{Value: fam.name}, + Qualifier: &wrappers.BytesValue{Value: []byte(colName)}, + TimestampMicros: cell.ts, + Value: cell.value, + }) + } } } // We can't have a cell with just COMMIT set, which would imply a new empty cell. @@ -406,21 +410,28 @@ func filterRow(f *btpb.RowFilter, r *row) bool { } // merge // TODO(dsymonds): is this correct? - r.cells = make(map[string][]cell) + r.families = make(map[string]*family) for _, sr := range srs { - for col, cs := range sr.cells { - r.cells[col] = append(r.cells[col], cs...) + for _, fam := range sr.families { + f := r.getOrCreateFamily(fam.name, fam.order) + for colName, cs := range fam.cells { + f.cells[colName] = append(f.cellsByColumn(colName), cs...) + } } } - for _, cs := range r.cells { - sort.Sort(byDescTS(cs)) + for _, fam := range r.families { + for _, cs := range fam.cells { + sort.Sort(byDescTS(cs)) + } } return true case *btpb.RowFilter_CellsPerColumnLimitFilter: lim := int(f.CellsPerColumnLimitFilter) - for col, cs := range r.cells { - if len(cs) > lim { - r.cells[col] = cs[:lim] + for _, fam := range r.families { + for col, cs := range fam.cells { + if len(cs) > lim { + fam.cells[col] = cs[:lim] + } } } return true @@ -449,11 +460,11 @@ func filterRow(f *btpb.RowFilter, r *row) bool { // Any other case, operate on a per-cell basis. cellCount := 0 - for key, cs := range r.cells { - i := strings.Index(key, ":") // guaranteed to exist - fam, col := key[:i], key[i+1:] - r.cells[key] = filterCells(f, fam, col, cs) - cellCount += len(r.cells[key]) + for _, fam := range r.families { + for colName, cs := range fam.cells { + fam.cells[colName] = filterCells(f, fam.name, colName, cs) + cellCount += len(fam.cells[colName]) + } } return cellCount > 0 } @@ -579,7 +590,7 @@ func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*bt return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) } - fs := tbl.columnFamiliesSet() + fs := tbl.columnFamilies() r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() defer r.mu.Unlock() @@ -600,7 +611,7 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))} - fs := tbl.columnFamiliesSet() + fs := tbl.columnFamilies() for i, entry := range req.Entries { r := tbl.mutableRow(string(entry.RowKey)) @@ -630,7 +641,7 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate res := &btpb.CheckAndMutateRowResponse{} - fs := tbl.columnFamiliesSet() + fs := tbl.columnFamilies() r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() @@ -640,18 +651,13 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate whichMut := false if req.PredicateFilter == nil { // Use true_mutations iff row contains any cells. - whichMut = len(r.cells) > 0 + whichMut = !r.isEmpty() } else { // Use true_mutations iff any cells in the row match the filter. // TODO(dsymonds): This could be cheaper. nr := r.copy() filterRow(req.PredicateFilter, nr) - for _, cs := range nr.cells { - if len(cs) > 0 { - whichMut = true - break - } - } + whichMut = !nr.isEmpty() // TODO(dsymonds): Figure out if this is supposed to be set // even when there's no predicate filter. res.PredicateMatched = whichMut @@ -670,14 +676,14 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate // applyMutations applies a sequence of mutations to a row. // fam should be a snapshot of the keys of tbl.families. // It assumes r.mu is locked. -func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]bool) error { +func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]*columnFamily) error { for _, mut := range muts { switch mut := mut.Mutation.(type) { default: return fmt.Errorf("can't handle mutation type %T", mut) case *btpb.Mutation_SetCell_: set := mut.SetCell - if !fs[set.FamilyName] { + if _, ok := fs[set.FamilyName]; !ok { return fmt.Errorf("unknown family %q", set.FamilyName) } ts := set.TimestampMicros @@ -687,54 +693,65 @@ func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]boo if !tbl.validTimestamp(ts) { return fmt.Errorf("invalid timestamp %d", ts) } - col := fmt.Sprintf("%s:%s", set.FamilyName, set.ColumnQualifier) + fam := set.FamilyName + col := string(set.ColumnQualifier) newCell := cell{ts: ts, value: set.Value} - r.cells[col] = appendOrReplaceCell(r.cells[col], newCell) + f := r.getOrCreateFamily(fam, fs[fam].order) + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) case *btpb.Mutation_DeleteFromColumn_: del := mut.DeleteFromColumn - col := fmt.Sprintf("%s:%s", del.FamilyName, del.ColumnQualifier) - - cs := r.cells[col] - if del.TimeRange != nil { - tsr := del.TimeRange - if !tbl.validTimestamp(tsr.StartTimestampMicros) { - return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) - } - if !tbl.validTimestamp(tsr.EndTimestampMicros) { - return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) - } - // Find half-open interval to remove. - // Cells are in descending timestamp order, - // so the predicates to sort.Search are inverted. - si, ei := 0, len(cs) - if tsr.StartTimestampMicros > 0 { - ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) - } - if tsr.EndTimestampMicros > 0 { - si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) + if _, ok := fs[del.FamilyName]; !ok { + return fmt.Errorf("unknown family %q", del.FamilyName) + } + fam := del.FamilyName + col := string(del.ColumnQualifier) + if _, ok := r.families[fam]; ok { + cs := r.families[fam].cells[col] + if del.TimeRange != nil { + tsr := del.TimeRange + if !tbl.validTimestamp(tsr.StartTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) + } + if !tbl.validTimestamp(tsr.EndTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) + } + // Find half-open interval to remove. + // Cells are in descending timestamp order, + // so the predicates to sort.Search are inverted. + si, ei := 0, len(cs) + if tsr.StartTimestampMicros > 0 { + ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) + } + if tsr.EndTimestampMicros > 0 { + si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) + } + if si < ei { + copy(cs[si:], cs[ei:]) + cs = cs[:len(cs)-(ei-si)] + } + } else { + cs = nil } - if si < ei { - copy(cs[si:], cs[ei:]) - cs = cs[:len(cs)-(ei-si)] + if len(cs) == 0 { + delete(r.families[fam].cells, col) + colNames := r.families[fam].colNames + i := sort.Search(len(colNames), func(i int) bool { return colNames[i] >= col }) + if i < len(colNames) && colNames[i] == col { + r.families[fam].colNames = append(colNames[:i], colNames[i+1:]...) + } + if len(r.families[fam].cells) == 0 { + delete(r.families, fam) + } + } else { + r.families[fam].cells[col] = cs } - } else { - cs = nil - } - if len(cs) == 0 { - delete(r.cells, col) - } else { - r.cells[col] = cs } case *btpb.Mutation_DeleteFromRow_: - r.cells = make(map[string][]cell) + r.families = make(map[string]*family) case *btpb.Mutation_DeleteFromFamily_: - fampre := mut.DeleteFromFamily.FamilyName + ":" - for col, _ := range r.cells { - if strings.HasPrefix(col, fampre) { - delete(r.cells, col) - } - } + fampre := mut.DeleteFromFamily.FamilyName + delete(r.families, fampre) } } return nil @@ -779,7 +796,7 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri updates := make(map[string]cell) // copy of updated cells; keyed by full column name - fs := tbl.columnFamiliesSet() + fs := tbl.columnFamilies() r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() @@ -787,17 +804,21 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri // Assume all mutations apply to the most recent version of the cell. // TODO(dsymonds): Verify this assumption and document it in the proto. for _, rule := range req.Rules { - if !fs[rule.FamilyName] { + if _, ok := fs[rule.FamilyName]; !ok { return nil, fmt.Errorf("unknown family %q", rule.FamilyName) } - key := fmt.Sprintf("%s:%s", rule.FamilyName, rule.ColumnQualifier) + fam := rule.FamilyName + col := string(rule.ColumnQualifier) + isEmpty := false + f := r.getOrCreateFamily(fam, fs[fam].order) + cs := f.cells[col] + isEmpty = len(cs) == 0 - cells := r.cells[key] ts := newTimestamp() var newCell, prevCell cell - isEmpty := len(cells) == 0 if !isEmpty { + cells := r.families[fam].cells[col] prevCell = cells[0] // ts is the max of now or the prev cell's timestamp in case the @@ -824,8 +845,9 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri binary.BigEndian.PutUint64(val[:], uint64(v)) newCell = cell{ts: ts, value: val[:]} } + key := strings.Join([]string{fam, col}, ":") updates[key] = newCell - r.cells[key] = appendOrReplaceCell(r.cells[key], newCell) + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) } res := &btpb.Row{ @@ -925,6 +947,7 @@ func (s *server) gcloop(done <-chan int) { type table struct { mu sync.RWMutex + counter uint64 // increment by 1 when a new family is created families map[string]*columnFamily // keyed by plain family name rows []*row // sorted by row key rowIndex map[string]*row // indexed by row key @@ -932,16 +955,20 @@ type table struct { func newTable(ctr *btapb.CreateTableRequest) *table { fams := make(map[string]*columnFamily) + c := uint64(0) if ctr.Table != nil { for id, cf := range ctr.Table.ColumnFamilies { fams[id] = &columnFamily{ name: ctr.Parent + "/columnFamilies/" + id, + order: c, gcRule: cf.GcRule, } + c++ } } return &table{ families: fams, + counter: c, rowIndex: make(map[string]*row), } } @@ -961,14 +988,6 @@ func (t *table) columnFamilies() map[string]*columnFamily { return cp } -func (t *table) columnFamiliesSet() map[string]bool { - fs := make(map[string]bool) - for fam := range t.columnFamilies() { - fs[fam] = true - } - return fs -} - func (t *table) mutableRow(row string) *row { // Try fast path first. t.mu.RLock() @@ -1023,14 +1042,14 @@ func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } type row struct { key string - mu sync.Mutex - cells map[string][]cell // keyed by full column name; cells are in descending timestamp order + mu sync.Mutex + families map[string]*family // keyed by family name } func newRow(key string) *row { return &row{ - key: key, - cells: make(map[string][]cell), + key: key, + families: make(map[string]*family), } } @@ -1038,36 +1057,78 @@ func newRow(key string) *row { // Cell values are aliased. // r.mu should be held. func (r *row) copy() *row { - nr := &row{ - key: r.key, - cells: make(map[string][]cell, len(r.cells)), - } - for col, cs := range r.cells { - // Copy the []cell slice, but not the []byte inside each cell. - nr.cells[col] = append([]cell(nil), cs...) + nr := newRow(r.key) + for _, fam := range r.families { + nr.families[fam.name] = &family{ + name: fam.name, + order: fam.order, + colNames: fam.colNames, + cells: make(map[string][]cell), + } + for col, cs := range fam.cells { + // Copy the []cell slice, but not the []byte inside each cell. + nr.families[fam.name].cells[col] = append([]cell(nil), cs...) + } } return nr } +// isEmpty returns true if a row doesn't contain any cell +func (r *row) isEmpty() bool { + for _, fam := range r.families { + for _, cs := range fam.cells { + if len(cs) > 0 { + return false + } + } + } + return true +} + +// sortedFamilies returns a column family set +// sorted in ascending creation order in a row. +func (r *row) sortedFamilies() []*family { + var families []*family + for _, fam := range r.families { + families = append(families, fam) + } + sort.Sort(byCreationOrder(families)) + return families +} + +func (r *row) getOrCreateFamily(name string, order uint64) *family { + if _, ok := r.families[name]; !ok { + r.families[name] = &family{ + name: name, + order: order, + cells: make(map[string][]cell), + } + } + return r.families[name] +} + // gc applies the given GC rules to the row. // r.mu should be held. func (r *row) gc(rules map[string]*btapb.GcRule) { - for col, cs := range r.cells { - fam := col[:strings.Index(col, ":")] - rule, ok := rules[fam] + for _, fam := range r.families { + rule, ok := rules[fam.name] if !ok { continue } - r.cells[col] = applyGC(cs, rule) + for col, cs := range fam.cells { + r.families[fam.name].cells[col] = applyGC(cs, rule) + } } } // size returns the total size of all cell values in the row. func (r *row) size() int { size := 0 - for _, cells := range r.cells { - for _, cell := range cells { - size += len(cell.value) + for _, fam := range r.families { + for _, cells := range fam.cells { + for _, cell := range cells { + size += len(cell.value) + } } } return size @@ -1114,6 +1175,29 @@ func applyGC(cells []cell, rule *btapb.GcRule) []cell { return cells } +type family struct { + name string // Column family name + order uint64 // Creation order of column family + colNames []string // Collumn names are sorted in lexicographical ascending order + cells map[string][]cell // Keyed by collumn name; cells are in descending timestamp order +} + +type byCreationOrder []*family + +func (b byCreationOrder) Len() int { return len(b) } +func (b byCreationOrder) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCreationOrder) Less(i, j int) bool { return b[i].order < b[j].order } + +// cellsByColumn adds the column name to colNames set if it does not exist +// and returns all cells within a column +func (f *family) cellsByColumn(name string) []cell { + if _, ok := f.cells[name]; !ok { + f.colNames = append(f.colNames, name) + sort.Strings(f.colNames) + } + return f.cells[name] +} + type cell struct { ts int64 value []byte @@ -1127,6 +1211,7 @@ func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } type columnFamily struct { name string + order uint64 // Creation order of column family gcRule *btapb.GcRule } diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go index d0caf3013..0e837bc6b 100644 --- a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go @@ -355,3 +355,163 @@ func TestDropRowRange(t *testing.T) { t.Errorf("Row count after drop range: got %d, want %d", got, want) } } + +type MockReadRowsServer struct { + responses []*btpb.ReadRowsResponse + grpc.ServerStream +} + +func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error { + s.responses = append(s.responses, resp) + return nil +} + +func TestReadRowsOrder(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + count := 3 + mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { + return &btapb.ModifyColumnFamiliesRequest{ + Name: tblInfo.Name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf" + strconv.Itoa(i), + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + } + for i := 1; i <= count; i++ { + _, err = s.ModifyColumnFamilies(ctx, mcf(i)) + if err != nil { + t.Fatal(err) + } + } + // Populate the table + for fc := 0; fc < count; fc++ { + for cc := count; cc > 0; cc-- { + for tc := 0; tc < count; tc++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf" + strconv.Itoa(fc), + ColumnQualifier: []byte("col" + strconv.Itoa(cc)), + TimestampMicros: int64((tc + 1) * 1000), + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + } + } + req := &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock := &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 27 { + t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks)) + } + testOrder := func(ms *MockReadRowsServer) { + var prevFam, prevCol string + var prevTime int64 + for _, cc := range ms.responses[0].Chunks { + if prevFam == "" { + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + continue + } + if cc.FamilyName.Value < prevFam { + t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam) + } else if cc.FamilyName.Value == prevFam { + if string(cc.Qualifier.Value) < prevCol { + t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol) + } else if string(cc.Qualifier.Value) == prevCol { + if cc.TimestampMicros > prevTime { + t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime) + } + } + } + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + } + } + testOrder(mock) + + // Read with interleave filter + inter := &btpb.RowFilter_Interleave{} + fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}} + cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}} + inter.Filters = append(inter.Filters, fnr, cqr) + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + Filter: &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{inter}, + }, + } + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 18 { + t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks)) + } + testOrder(mock) + + // Check order after ReadModifyWriteRow + rmw := func(i int) *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Rules: []*btpb.ReadModifyWriteRule{{ + FamilyName: "cf3", + ColumnQualifier: []byte("col" + strconv.Itoa(i)), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, + }}, + } + } + for i := count; i > 0; i-- { + s.ReadModifyWriteRow(ctx, rmw(i)) + } + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 30 { + t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks)) + } + testOrder(mock) +} diff --git a/vendor/cloud.google.com/go/bigtable/doc.go b/vendor/cloud.google.com/go/bigtable/doc.go index 449680559..0d7706f07 100644 --- a/vendor/cloud.google.com/go/bigtable/doc.go +++ b/vendor/cloud.google.com/go/bigtable/doc.go @@ -92,6 +92,12 @@ If a read or write operation encounters a transient error it will be retried unt response, an unretryable error or the context deadline is reached. Non-idempotent writes (where the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls will not re-scan rows that have already been processed. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + */ package bigtable // import "cloud.google.com/go/bigtable" diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go index 6949758cb..6a8702c77 100644 --- a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go @@ -84,15 +84,16 @@ func main() { log.Print("Reading source context file: ", err) } var ts oauth2.TokenSource + ctx := context.Background() if *serviceAccountFile != "" { - if ts, err = serviceAcctTokenSource(context.Background(), *serviceAccountFile, cd.CloudDebuggerScope); err != nil { + if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil { log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err) } - } else if ts, err = google.DefaultTokenSource(context.Background(), cd.CloudDebuggerScope); err != nil { + } else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil { log.Print("Error getting application default credentials for Cloud Debugger:", err) os.Exit(103) } - c, err := debuglet.NewController(debuglet.Options{ + c, err := debuglet.NewController(ctx, debuglet.Options{ ProjectNumber: *projectNumber, ProjectID: *projectID, AppModule: *appModule, @@ -134,14 +135,14 @@ func main() { ch := make(chan bool) // Start a goroutine that sends List requests to the Debuglet Controller, and // sets any breakpoints it gets back. - go breakpointListLoop(c, bs, ch) + go breakpointListLoop(ctx, c, bs, ch) // Wait until 5 seconds have passed or breakpointListLoop has closed ch. select { case <-time.After(5 * time.Second): case <-ch: } // Run the debuggee. - programLoop(c, bs, prog) + programLoop(ctx, c, bs, prog) } // usage prints a usage message to stderr and exits. @@ -181,7 +182,7 @@ func readSourceContextFile(filename string) ([]*cd.SourceContext, error) { // in the program. // // After the first List call finishes, ch is closed. -func breakpointListLoop(c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) { +func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) { const ( avgTimeBetweenCalls = time.Second errorDelay = 5 * time.Second @@ -196,7 +197,7 @@ func breakpointListLoop(c *debuglet.Controller, bs *breakpoints.BreakpointStore, for { callStart := time.Now() - resp, err := c.List() + resp, err := c.List(ctx) if err != nil && err != debuglet.ErrListUnchanged { log.Printf("Debuglet controller server error: %v", err) } @@ -216,7 +217,7 @@ func breakpointListLoop(c *debuglet.Controller, bs *breakpoints.BreakpointStore, errorBps := bs.ErrorBreakpoints() for _, bp := range errorBps { go func(bp *cd.Breakpoint) { - if err := c.Update(bp.Id, bp); err != nil { + if err := c.Update(ctx, bp.Id, bp); err != nil { log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) } }(bp) @@ -247,7 +248,7 @@ func breakpointListLoop(c *debuglet.Controller, bs *breakpoints.BreakpointStore, // programLoop runs the program being debugged to completion. When a breakpoint's // conditions are satisfied, it sends an Update RPC to the Debuglet Controller. // The function returns when the program exits and all Update RPCs have finished. -func programLoop(c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) { +func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) { var wg sync.WaitGroup for { // Run the program until it hits a breakpoint or exits. @@ -318,7 +319,7 @@ func programLoop(c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog d bp.VariableTable = variableTable bp.Status = stackFramesStatusMessage } - if err := c.Update(bp.Id, bp); err != nil { + if err := c.Update(ctx, bp.Id, bp); err != nil { log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) } }(bp) diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go index 97ae7a7c7..1bc2c982c 100644 --- a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go @@ -82,14 +82,13 @@ type Options struct { } type serviceInterface interface { - Register(req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) - Update(debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) - List(debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) + Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) + Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) + List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) } -var newService = func(tokenSource oauth2.TokenSource) (serviceInterface, error) { - httpClient, endpoint, err := transport.NewHTTPClient(context.Background(), - []option.ClientOption{option.WithTokenSource(tokenSource)}...) +var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) { + httpClient, endpoint, err := transport.NewHTTPClient(ctx, option.WithTokenSource(tokenSource)) if err != nil { return nil, err } @@ -107,25 +106,27 @@ type service struct { s *cd.Service } -func (s *service) Register(req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { - return cd.NewControllerDebuggeesService(s.s).Register(req).Do() +func (s service) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { + call := cd.NewControllerDebuggeesService(s.s).Register(req) + return call.Context(ctx).Do() } -func (s *service) Update(debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { - return cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req).Do() +func (s service) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req) + return call.Context(ctx).Do() } -func (s *service) List(debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { - listCall := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID) - listCall.WaitToken(waitToken) - return listCall.Do() +func (s service) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID) + call.WaitToken(waitToken) + return call.Context(ctx).Do() } // NewController connects to the Debuglet Controller server using the given options, // and returns a Controller for that connection. // Google Application Default Credentials are used to connect to the Debuglet Controller; // see https://developers.google.com/identity/protocols/application-default-credentials -func NewController(o Options) (*Controller, error) { +func NewController(ctx context.Context, o Options) (*Controller, error) { // We build a JSON encoding of o.SourceContexts so we can hash it. scJSON, err := json.Marshal(o.SourceContexts) if err != nil { @@ -152,7 +153,7 @@ func NewController(o Options) (*Controller, error) { description += "-" + o.AppVersion } - s, err := newService(o.TokenSource) + s, err := newService(ctx, o.TokenSource) if err != nil { return nil, err } @@ -170,14 +171,14 @@ func NewController(o Options) (*Controller, error) { return c, nil } -func (c *Controller) getDebuggeeID() (string, error) { +func (c *Controller) getDebuggeeID(ctx context.Context) (string, error) { c.mu.Lock() defer c.mu.Unlock() if c.debuggeeID != "" { return c.debuggeeID, nil } // The debuglet hasn't been registered yet, or it is disabled and we should try registering again. - if err := c.register(); err != nil { + if err := c.register(ctx); err != nil { return "", err } return c.debuggeeID, nil @@ -188,12 +189,12 @@ func (c *Controller) getDebuggeeID() (string, error) { // the previous call to List, the server can delay responding until it changes, // and return an error instead if no change occurs before a time limit the // server sets. List can't be called concurrently with itself. -func (c *Controller) List() (*cd.ListActiveBreakpointsResponse, error) { - id, err := c.getDebuggeeID() +func (c *Controller) List(ctx context.Context) (*cd.ListActiveBreakpointsResponse, error) { + id, err := c.getDebuggeeID(ctx) if err != nil { return nil, err } - resp, err := c.s.List(id, c.waitToken) + resp, err := c.s.List(ctx, id, c.waitToken) if err != nil { if isAbortedError(err) { return nil, ErrListUnchanged @@ -201,7 +202,7 @@ func (c *Controller) List() (*cd.ListActiveBreakpointsResponse, error) { // For other errors, the protocol requires that we attempt to re-register. c.mu.Lock() defer c.mu.Unlock() - if regError := c.register(); regError != nil { + if regError := c.register(ctx); regError != nil { return nil, regError } return nil, err @@ -231,23 +232,23 @@ func isAbortedError(err error) bool { // Update reports information to the server about a breakpoint that was hit. // Update can be called concurrently with List and Update. -func (c *Controller) Update(breakpointID string, bp *cd.Breakpoint) error { +func (c *Controller) Update(ctx context.Context, breakpointID string, bp *cd.Breakpoint) error { req := &cd.UpdateActiveBreakpointRequest{Breakpoint: bp} if c.verbose { log.Printf("sending update for %s: %v", breakpointID, req) } - id, err := c.getDebuggeeID() + id, err := c.getDebuggeeID(ctx) if err != nil { return err } - _, err = c.s.Update(id, breakpointID, req) + _, err = c.s.Update(ctx, id, breakpointID, req) return err } // register calls the Debuglet Controller Register method, and sets c.debuggeeID. // c.mu should be locked while calling this function. List and Update can't // make progress until it returns. -func (c *Controller) register() error { +func (c *Controller) register(ctx context.Context) error { req := cd.RegisterDebuggeeRequest{ Debuggee: &cd.Debuggee{ AgentVersion: agentVersionString, @@ -257,7 +258,7 @@ func (c *Controller) register() error { Uniquifier: c.uniquifier, }, } - resp, err := c.s.Register(&req) + resp, err := c.s.Register(ctx, &req) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go index 06ecfcbf1..fb439c9dc 100644 --- a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go @@ -17,6 +17,7 @@ package controller import ( "testing" + "golang.org/x/net/context" "golang.org/x/oauth2" cd "google.golang.org/api/clouddebugger/v2" @@ -91,7 +92,7 @@ type mockService struct { registerCallsSeen int } -func (s *mockService) Register(req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { +func (s *mockService) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { s.registerCallsSeen++ if req.Debuggee == nil { s.t.Errorf("missing debuggee") @@ -114,7 +115,7 @@ func (s *mockService) Register(req *cd.RegisterDebuggeeRequest) (*cd.RegisterDeb }, nil } -func (s *mockService) Update(id, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { +func (s *mockService) Update(ctx context.Context, id, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { if id != testDebuggeeID { s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) } @@ -127,7 +128,7 @@ func (s *mockService) Update(id, breakpointID string, req *cd.UpdateActiveBreakp return nil, nil } -func (s *mockService) List(id, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { +func (s *mockService) List(ctx context.Context, id, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { if id != testDebuggeeID { s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) } @@ -156,46 +157,47 @@ func TestDebugletControllerClientLibrary(t *testing.T) { err error ) m = &mockService{t: t} - newService = func(_ oauth2.TokenSource) (serviceInterface, error) { return m, nil } + newService = func(context.Context, oauth2.TokenSource) (serviceInterface, error) { return m, nil } opts := Options{ ProjectNumber: "5", ProjectID: "p1", AppModule: "mod1", AppVersion: "v1", } - if c, err = NewController(opts); err != nil { + ctx := context.Background() + if c, err = NewController(ctx, opts); err != nil { t.Fatal("Initializing Controller client:", err) } - if list, err = c.List(); err != nil { + if list, err = c.List(ctx); err != nil { t.Fatal("List:", err) } if m.registerCallsSeen != 1 { t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) } - if list, err = c.List(); err != nil { + if list, err = c.List(ctx); err != nil { t.Fatal("List:", err) } if len(list.Breakpoints) != 1 { t.Fatalf("got %d breakpoints, want 1", len(list.Breakpoints)) } - if err = c.Update(list.Breakpoints[0].Id, &cd.Breakpoint{Id: testBreakpointID, IsFinalState: true}); err != nil { + if err = c.Update(ctx, list.Breakpoints[0].Id, &cd.Breakpoint{Id: testBreakpointID, IsFinalState: true}); err != nil { t.Fatal("Update:", err) } - if list, err = c.List(); err != nil { + if list, err = c.List(ctx); err != nil { t.Fatal("List:", err) } if m.registerCallsSeen != 1 { t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) } // The next List call produces an error that should cause a Register call. - if list, err = c.List(); err == nil { + if list, err = c.List(ctx); err == nil { t.Fatal("List should have returned an error") } if m.registerCallsSeen != 2 { t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) } // The next List call produces an error that should not cause a Register call. - if list, err = c.List(); err == nil { + if list, err = c.List(ctx); err == nil { t.Fatal("List should have returned an error") } if m.registerCallsSeen != 2 { diff --git a/vendor/cloud.google.com/go/container/container.go b/vendor/cloud.google.com/go/container/container.go index 7cee88acb..684984eb1 100644 --- a/vendor/cloud.google.com/go/container/container.go +++ b/vendor/cloud.google.com/go/container/container.go @@ -16,6 +16,11 @@ // // For more information about the API, // see https://cloud.google.com/container-engine/docs +// +// Authentication +// +// See examples of authorization and authentication at +// https://godoc.org/cloud.google.com/go#pkg-examples. package container // import "cloud.google.com/go/container" import ( diff --git a/vendor/cloud.google.com/go/datastore/datastore.go b/vendor/cloud.google.com/go/datastore/datastore.go index deb27cd8d..eaf80ea00 100644 --- a/vendor/cloud.google.com/go/datastore/datastore.go +++ b/vendor/cloud.google.com/go/datastore/datastore.go @@ -21,6 +21,8 @@ import ( "os" "reflect" + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/api/option" @@ -57,8 +59,10 @@ type datastoreClient struct { func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient { return &datastoreClient{ - c: pb.NewDatastoreClient(conn), - md: metadata.Pairs(resourcePrefixHeader, "projects/"+projectID), + c: pb.NewDatastoreClient(conn), + md: metadata.Pairs( + resourcePrefixHeader, "projects/"+projectID, + "x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)), } } diff --git a/vendor/cloud.google.com/go/datastore/datastore_test.go b/vendor/cloud.google.com/go/datastore/datastore_test.go index f81feb82b..0b22b564c 100644 --- a/vendor/cloud.google.com/go/datastore/datastore_test.go +++ b/vendor/cloud.google.com/go/datastore/datastore_test.go @@ -73,6 +73,8 @@ var ( testGeoPt0 = GeoPoint{Lat: 1.2, Lng: 3.4} testGeoPt1 = GeoPoint{Lat: 5, Lng: 10} testBadGeoPt = GeoPoint{Lat: 1000, Lng: 34} + + ts = time.Unix(1e9, 0).UTC() ) type B0 struct { @@ -405,6 +407,14 @@ type PtrToInt struct { I *int } +type EmbeddedTime struct { + time.Time +} + +type SpecialTime struct { + MyTime EmbeddedTime +} + type Doubler struct { S string I int64 @@ -1874,6 +1884,22 @@ var testCases = []testCase{ "duplicate Property", "", }, + { + "embedded time field", + &SpecialTime{MyTime: EmbeddedTime{ts}}, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, + { + "embedded time load", + &PropertyList{ + Property{Name: "MyTime.Time", Value: ts}, + }, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, } // checkErr returns the empty string if either both want and err are zero, @@ -1915,12 +1941,16 @@ func TestRoundTrip(t *testing.T) { // Sort by name to make sure we have a deterministic order. sortPL(*pl) } + equal := false - if gotT, ok := got.(*T); ok { - // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. - // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. - equal = gotT.T.Equal(tc.want.(*T).T) - } else { + switch v := got.(type) { + // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. + // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. + case *T: + equal = v.T.Equal(tc.want.(*T).T) + case *SpecialTime: + equal = v.MyTime.Equal(tc.want.(*SpecialTime).MyTime.Time) + default: equal = reflect.DeepEqual(got, tc.want) } if !equal { @@ -1930,6 +1960,183 @@ func TestRoundTrip(t *testing.T) { } } +type aPtrPLS struct { + Count int +} + +func (pls *aPtrPLS) Load([]Property) error { + pls.Count += 1 + return nil +} + +func (pls *aPtrPLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 4}}, nil +} + +type aValuePLS struct { + Count int +} + +func (pls aValuePLS) Load([]Property) error { + pls.Count += 2 + return nil +} + +func (pls aValuePLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 8}}, nil +} + +type aNotPLS struct { + Count int +} + +type plsString string + +func (s *plsString) Load([]Property) error { + *s = "LOADED" + return nil +} + +func (s *plsString) Save() ([]Property, error) { + return []Property{{Name: "SS", Value: "SAVED"}}, nil +} + +type aSubPLS struct { + Foo string + Bar *aPtrPLS +} + +type aSubNotPLS struct { + Foo string + Bar *aNotPLS + S plsString `datastore:",omitempty"` +} + +type aSubPLSErr struct { + Foo string + Bar aValuePLS +} + +func TestLoadSaveNestedStructPLS(t *testing.T) { + type testCase struct { + desc string + src interface{} + wantSave *pb.Entity + wantLoad interface{} + loadErr string + } + + testCases := []testCase{ + { + desc: "substruct (ptr) does implement PLS", + src: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 2}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{4}}, + }, + }, + }}, + }, + }, + // PLS impl for 'S' not used, not entity. + wantLoad: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 1}}, + }, + { + desc: "substruct (ptr) does implement PLS, nil valued substruct", + src: &aSubPLS{Foo: "foo"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + }, + }, + wantLoad: &aSubPLS{Foo: "foo"}, + }, + { + desc: "substruct (ptr) does not implement PLS", + src: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}, S: "something"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + // PLS impl for 'S' not used, not entity. + "S": {ValueType: &pb.Value_StringValue{"something"}}, + }, + }, + wantLoad: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}, S: "something"}, + }, + { + desc: "substruct (value) does implement PLS, error", + src: &aSubPLSErr{Foo: "foo", Bar: aValuePLS{Count: 3}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{8}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLSErr{}, + loadErr: "PropertyLoadSaver methods must be implemented on a pointer", + }, + } + + for _, tc := range testCases { + e, err := saveEntity(testKey0, tc.src) + if err != nil { + t.Errorf("%s: save: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(e, tc.wantSave) { + t.Errorf("%s: save: got: %#v, want: %#v", tc.desc, e, tc.wantSave) + continue + } + + gota := reflect.New(reflect.TypeOf(tc.wantLoad).Elem()).Interface() + err = loadEntity(gota, e) + switch tc.loadErr { + case "": + if err != nil { + t.Errorf("%s: load: %v", tc.desc, err) + continue + } + default: + if err == nil { + t.Errorf("%s: load: want err", tc.desc) + continue + } + if !strings.Contains(err.Error(), tc.loadErr) { + t.Errorf("%s: load: want err '%s', got '%s'", tc.desc, err.Error(), tc.loadErr) + } + continue + } + + if !reflect.DeepEqual(tc.wantLoad, gota) { + t.Errorf("%s: load: got: %#v, want: %#v", tc.desc, gota, tc.wantLoad) + continue + } + } + +} + func TestQueryConstruction(t *testing.T) { tests := []struct { q, exp *Query diff --git a/vendor/cloud.google.com/go/datastore/doc.go b/vendor/cloud.google.com/go/datastore/doc.go index 854d12e6a..5e3a6a82a 100644 --- a/vendor/cloud.google.com/go/datastore/doc.go +++ b/vendor/cloud.google.com/go/datastore/doc.go @@ -411,5 +411,10 @@ directed to the emulator instead of the production Datastore service. To install and set up the emulator and its environment variables, see the documentation at https://cloud.google.com/datastore/docs/tools/datastore-emulator. +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + */ package datastore // import "cloud.google.com/go/datastore" diff --git a/vendor/cloud.google.com/go/datastore/example_test.go b/vendor/cloud.google.com/go/datastore/example_test.go index c6f4ff667..c6f81e13b 100644 --- a/vendor/cloud.google.com/go/datastore/example_test.go +++ b/vendor/cloud.google.com/go/datastore/example_test.go @@ -24,22 +24,6 @@ import ( "google.golang.org/api/iterator" ) -// TODO(jbd): Document other authorization methods and refer to them here. -func Example_auth() { - ctx := context.Background() - // Use Google Application Default Credentials to authorize and authenticate the client. - // More information about Application Default Credentials and how to enable is at - // https://developers.google.com/identity/protocols/application-default-credentials. - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - // TODO: Handle error. - } - // Use the client (see other examples). - - // Close the client when finished. - client.Close() -} - func ExampleNewClient() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") diff --git a/vendor/cloud.google.com/go/datastore/load.go b/vendor/cloud.google.com/go/datastore/load.go index 2168dc264..7a70ff2a3 100644 --- a/vendor/cloud.google.com/go/datastore/load.go +++ b/vendor/cloud.google.com/go/datastore/load.go @@ -89,36 +89,30 @@ func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.V var v reflect.Value name := p.Name - for name != "" { - // First we try to find a field with name matching - // the value of 'name' exactly (though case-insensitively). - field := codec.Match(name) - if field != nil { - name = "" - } else { - // Now try for legacy flattened nested field (named eg. "A.B.C.D"). - - parent := name - child := "" - - // Cut off the last field (delimited by ".") and find its parent - // in the codec. - // eg. for name "A.B.C.D", split off "A.B.C" and try to - // find a field in the codec with this name. - // Loop again with "A.B", etc. - for field == nil { - i := strings.LastIndex(parent, ".") - if i < 0 { - return "no such struct field" - } - if i == len(name)-1 { - return "field name cannot end with '.'" - } - parent, child = name[:i], name[i+1:] - field = codec.Match(parent) + fieldNames := strings.Split(name, ".") + + for len(fieldNames) > 0 { + var field *fields.Field + + // Start by trying to find a field with name. If none found, + // cut off the last field (delimited by ".") and find its parent + // in the codec. + // eg. for name "A.B.C.D", split off "A.B.C" and try to + // find a field in the codec with this name. + // Loop again with "A.B", etc. + for i := len(fieldNames); i > 0; i-- { + parent := strings.Join(fieldNames[:i], ".") + field = codec.Match(parent) + if field != nil { + fieldNames = fieldNames[i:] + break } + } - name = child + // If we never found a matching field in the codec, return + // error message. + if field == nil { + return "no such struct field" } v = initField(structValue, field.Index) @@ -236,19 +230,28 @@ func setVal(v reflect.Value, p Property) string { return "" } - switch pValue.(type) { + switch x := pValue.(type) { case *Key: if _, ok := v.Interface().(*Key); !ok { return typeMismatchReason(p, v) } - v.Set(reflect.ValueOf(pValue.(*Key))) + v.Set(reflect.ValueOf(x)) case *Entity: - if v.Type().Elem().Kind() != reflect.Struct { - return typeMismatchReason(p, v) - } if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } + // Check if v implements PropertyLoadSaver. + if pls, ok := v.Interface().(PropertyLoadSaver); ok { + err := pls.Load(x.Properties) + if err != nil { + return err.Error() + } + return "" + } + if v.Type().Elem().Kind() != reflect.Struct { + return typeMismatchReason(p, v) + } + return setVal(v.Elem(), p) default: return typeMismatchReason(p, v) @@ -273,7 +276,12 @@ func setVal(v reflect.Value, p Property) string { return typeMismatchReason(p, v) } - // Recursively load nested struct + // Check if v implements PropertyLoadSaver. + if _, ok := v.Interface().(PropertyLoadSaver); ok { + return fmt.Sprintf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface()) + } + + // Recursively load nested struct. pls, err := newStructPLS(v.Addr().Interface()) if err != nil { return err.Error() diff --git a/vendor/cloud.google.com/go/datastore/prop.go b/vendor/cloud.google.com/go/datastore/prop.go index c148b2faa..69e424b37 100644 --- a/vendor/cloud.google.com/go/datastore/prop.go +++ b/vendor/cloud.google.com/go/datastore/prop.go @@ -223,8 +223,14 @@ func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool return nil } +// isLeafType determines whether or not a type is a 'leaf type' +// and should not be recursed into, but considered one field. +func isLeafType(t reflect.Type) bool { + return t == typeOfTime || t == typeOfGeoPoint +} + // structCache collects the structs whose fields have already been calculated. -var structCache = fields.NewCache(parseTag, validateType) +var structCache = fields.NewCache(parseTag, validateType, isLeafType) // structPLS adapts a struct to be a PropertyLoadSaver. type structPLS struct { @@ -249,6 +255,11 @@ func newStructPLS(p interface{}) (*structPLS, error) { // LoadStruct loads the properties from p to dst. // dst must be a struct pointer. +// +// The values of dst's unmatched struct fields are not modified, +// and matching slice-typed fields are not reset before appending to +// them. In particular, it is recommended to pass a pointer to a zero +// valued struct on each LoadStruct call. func LoadStruct(dst interface{}, p []Property) error { x, err := newStructPLS(dst) if err != nil { diff --git a/vendor/cloud.google.com/go/datastore/save.go b/vendor/cloud.google.com/go/datastore/save.go index 75fa6385a..70bfaaa63 100644 --- a/vendor/cloud.google.com/go/datastore/save.go +++ b/vendor/cloud.google.com/go/datastore/save.go @@ -57,6 +57,9 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect return nil } + // Check if v implements PropertyLoadSaver. + pls, isPLS := v.Interface().(PropertyLoadSaver) + switch x := v.Interface().(type) { case *Key, time.Time, GeoPoint: p.Value = x @@ -86,6 +89,15 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect v = v.Elem() fallthrough case reflect.Struct: + if isPLS { + subProps, err := pls.Save() + if err != nil { + return err + } + p.Value = &Entity{Properties: subProps} + break + } + if !v.CanAddr() { return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") } @@ -107,6 +119,7 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect if err != nil { return err } + p.Value = &Entity{ Key: subKey, Properties: subProps, diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go index ec2bf5bba..58044e688 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,4 +22,14 @@ // errors and read access to error groups and their associated errors. package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go index dddfd5ed4..ca5ff85e6 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,10 +18,9 @@ package errorreporting import ( "fmt" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" @@ -29,7 +28,6 @@ import ( clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -84,7 +82,7 @@ type ErrorGroupClient struct { CallOptions *ErrorGroupCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewErrorGroupClient creates a new error group service client. @@ -101,7 +99,7 @@ func NewErrorGroupClient(ctx context.Context, opts ...option.ClientOption) (*Err errorGroupClient: clouderrorreportingpb.NewErrorGroupServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -119,10 +117,8 @@ func (c *ErrorGroupClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *ErrorGroupClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *ErrorGroupClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // ErrorGroupGroupPath returns the path for the group resource. @@ -139,8 +135,7 @@ func ErrorGroupGroupPath(project, group string) string { // GetGroup get the specified group. func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *clouderrorreportingpb.ErrorGroup err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -156,8 +151,7 @@ func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportin // UpdateGroup replace the data for the specified group. // Fails if the group does not exist. func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *clouderrorreportingpb.ErrorGroup err := gax.Invoke(ctx, func(ctx context.Context) error { var err error diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go index a2c5beaa0..bc8619fff 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go index d9ef5b37a..268a8ee5c 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package errorreporting import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -31,7 +30,6 @@ import ( clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -88,7 +86,7 @@ type ErrorStatsClient struct { CallOptions *ErrorStatsCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewErrorStatsClient creates a new error stats service client. @@ -106,7 +104,7 @@ func NewErrorStatsClient(ctx context.Context, opts ...option.ClientOption) (*Err errorStatsClient: clouderrorreportingpb.NewErrorStatsServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -124,10 +122,8 @@ func (c *ErrorStatsClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *ErrorStatsClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *ErrorStatsClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // ErrorStatsProjectPath returns the path for the project resource. @@ -143,8 +139,7 @@ func ErrorStatsProjectPath(project string) string { // ListGroupStats lists the specified groups. func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) *ErrorGroupStatsIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &ErrorGroupStatsIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) { var resp *clouderrorreportingpb.ListGroupStatsResponse @@ -178,8 +173,7 @@ func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorre // ListEvents lists the specified events. func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest) *ErrorEventIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &ErrorEventIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) { var resp *clouderrorreportingpb.ListEventsResponse @@ -213,8 +207,7 @@ func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreport // DeleteEvents deletes all error events of a given project. func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *clouderrorreportingpb.DeleteEventsResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go index d5bce0d25..6f7e9e60b 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go index dc518f103..491f7b63e 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go index 8bc4f00c7..c6455a252 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,16 +18,14 @@ package errorreporting import ( "fmt" - "runtime" - "strings" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" "google.golang.org/grpc" - "google.golang.org/grpc/metadata" ) var ( @@ -67,7 +65,7 @@ type ReportErrorsClient struct { CallOptions *ReportErrorsCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewReportErrorsClient creates a new report errors service client. @@ -84,7 +82,7 @@ func NewReportErrorsClient(ctx context.Context, opts ...option.ClientOption) (*R reportErrorsClient: clouderrorreportingpb.NewReportErrorsServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -102,10 +100,8 @@ func (c *ReportErrorsClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *ReportErrorsClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *ReportErrorsClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // ReportErrorsProjectPath returns the path for the project resource. @@ -128,8 +124,7 @@ func ReportErrorsProjectPath(project string) string { // a `key` parameter. For example: //
POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *clouderrorreportingpb.ReportErrorEventResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go index 0249988ad..ed4cfc441 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/errors/errors.go b/vendor/cloud.google.com/go/errors/errors.go index 3d85c4cdd..88a1be81b 100644 --- a/vendor/cloud.google.com/go/errors/errors.go +++ b/vendor/cloud.google.com/go/errors/errors.go @@ -88,6 +88,7 @@ import ( "time" api "cloud.google.com/go/errorreporting/apiv1beta1" + "cloud.google.com/go/internal/version" "cloud.google.com/go/logging" "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" @@ -105,7 +106,11 @@ type apiInterface interface { var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) { client, err := api.NewReportErrorsClient(ctx, opts...) - return client, err + if err != nil { + return nil, err + } + client.SetGoogleClientInfo("gccl", version.Repo) + return client, nil } type loggerInterface interface { diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go index e9e67bbf5..a976ac099 100644 --- a/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,4 +22,14 @@ // authenticate to Google and make API calls. package admin // import "cloud.google.com/go/iam/admin/apiv1" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go index e11aa30f7..a516a3558 100644 --- a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package admin import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -32,7 +31,6 @@ import ( iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -114,7 +112,7 @@ type IamClient struct { CallOptions *IamCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewIamClient creates a new iam client. @@ -146,7 +144,7 @@ func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, iamClient: adminpb.NewIAMClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -164,10 +162,8 @@ func (c *IamClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *IamClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *IamClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // IamProjectPath returns the path for the project resource. @@ -208,8 +204,7 @@ func IamKeyPath(project, serviceAccount, key string) string { // ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest) *ServiceAccountIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &ServiceAccountIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*adminpb.ServiceAccount, string, error) { var resp *adminpb.ListServiceAccountsResponse @@ -243,8 +238,7 @@ func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListSe // GetServiceAccount gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -260,8 +254,7 @@ func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServi // CreateServiceAccount creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount] // and returns it. func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -280,8 +273,7 @@ func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.Creat // `display_name` . // The `etag` is mandatory. func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -296,8 +288,7 @@ func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.Servi // DeleteServiceAccount deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.iamClient.DeleteServiceAccount(ctx, req) @@ -308,8 +299,7 @@ func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.Delet // ListServiceAccountKeys lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey]. func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.ListServiceAccountKeysResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -325,8 +315,7 @@ func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.Lis // GetServiceAccountKey gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] // by key id. func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.ServiceAccountKey err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -342,8 +331,7 @@ func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetSe // CreateServiceAccountKey creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] // and returns it. func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.ServiceAccountKey err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -358,8 +346,7 @@ func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.Cr // DeleteServiceAccountKey deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]. func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.iamClient.DeleteServiceAccountKey(ctx, req) @@ -370,8 +357,7 @@ func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.De // SignBlob signs a blob using a service account's system-managed private key. func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.SignBlobResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -387,8 +373,7 @@ func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) // getIamPolicy returns the IAM access control policy for a // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -404,8 +389,7 @@ func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyReq // setIamPolicy sets the IAM access control policy for a // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -421,8 +405,7 @@ func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReq // TestIamPermissions tests the specified permissions against the IAM access control policy // for a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -439,8 +422,7 @@ func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPe // A role is grantable if it can be used as the role in a binding for a policy // for that resource. func (c *IamClient) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *adminpb.QueryGrantableRolesResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go index c9e299a2f..983d48e2f 100644 --- a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go index 2561bad77..5d8a1fd8e 100644 --- a/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/internal/fields/fields.go b/vendor/cloud.google.com/go/internal/fields/fields.go index 9f10d41db..4f5516eae 100644 --- a/vendor/cloud.google.com/go/internal/fields/fields.go +++ b/vendor/cloud.google.com/go/internal/fields/fields.go @@ -29,11 +29,22 @@ // // func validate(t reflect.Type) error { ... } // +// Then, if necessary, define a function to specify leaf types - types +// which should be considered one field and not be recursed into: +// +// func isLeafType(t reflect.Type) bool { ... } +// +// eg: +// +// func isLeafType(t reflect.Type) bool { +// return t == reflect.TypeOf(time.Time{}) +// } +// // Next, construct a Cache, passing your functions. As its name suggests, a // Cache remembers validation and field information for a type, so subsequent // calls with the same type are very fast. // -// cache := fields.NewCache(parseTag, validate) +// cache := fields.NewCache(parseTag, validate, isLeafType) // // To get the fields of a struct type as determined by the above rules, call // the Fields method: @@ -76,13 +87,16 @@ type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interfa type ValidateFunc func(reflect.Type) error +type LeafTypesFunc func(reflect.Type) bool + // A Cache records information about the fields of struct types. // // A Cache is safe for use by multiple goroutines. type Cache struct { - parseTag ParseTagFunc - validate ValidateFunc - cache atomiccache.Cache // from reflect.Type to cacheValue + parseTag ParseTagFunc + validate ValidateFunc + leafTypes LeafTypesFunc + cache atomiccache.Cache // from reflect.Type to cacheValue } // NewCache constructs a Cache. @@ -97,7 +111,7 @@ type Cache struct { // returns an error if the struct type is invalid in any way. For example, it // may check that all of the struct field tags are valid, or that all fields // are of an appropriate type. -func NewCache(parseTag ParseTagFunc, validate ValidateFunc) *Cache { +func NewCache(parseTag ParseTagFunc, validate ValidateFunc, leafTypes LeafTypesFunc) *Cache { if parseTag == nil { parseTag = func(reflect.StructTag) (string, bool, interface{}, error) { return "", true, nil, nil @@ -108,7 +122,17 @@ func NewCache(parseTag ParseTagFunc, validate ValidateFunc) *Cache { return nil } } - return &Cache{parseTag: parseTag, validate: validate} + if leafTypes == nil { + leafTypes = func(reflect.Type) bool { + return false + } + } + + return &Cache{ + parseTag: parseTag, + validate: validate, + leafTypes: leafTypes, + } } // A fieldScan represents an item on the fieldByNameFunc scan work list. @@ -270,6 +294,7 @@ func (c *Cache) listFields(t reflect.Type) ([]Field, error) { visited[t] = true for i := 0; i < t.NumField(); i++ { f := t.Field(i) + exported := (f.PkgPath == "") // If a named field is unexported, ignore it. An anonymous @@ -287,6 +312,10 @@ func (c *Cache) listFields(t reflect.Type) ([]Field, error) { if !keep { continue } + if c.leafTypes(f.Type) { + fields = append(fields, newField(f, tagName, other, scan.index, i)) + continue + } var ntyp reflect.Type if f.Anonymous { diff --git a/vendor/cloud.google.com/go/internal/fields/fields_test.go b/vendor/cloud.google.com/go/internal/fields/fields_test.go index 74a4c579e..904d8b855 100644 --- a/vendor/cloud.google.com/go/internal/fields/fields_test.go +++ b/vendor/cloud.google.com/go/internal/fields/fields_test.go @@ -21,6 +21,7 @@ import ( "reflect" "strings" "testing" + "time" ) type embed1 struct { @@ -62,6 +63,10 @@ type S1 struct { Anonymous } +type Time struct { + time.Time +} + var intType = reflect.TypeOf(int(0)) func field(name string, tval interface{}, index ...int) *Field { @@ -82,7 +87,7 @@ func tfield(name string, tval interface{}, index ...int) *Field { } func TestFieldsNoTags(t *testing.T) { - c := NewCache(nil, nil) + c := NewCache(nil, nil, nil) got, err := c.Fields(reflect.TypeOf(S1{})) if err != nil { t.Fatal(err) @@ -132,7 +137,7 @@ func TestAgainstJSONEncodingNoTags(t *testing.T) { jsonRoundTrip(t, s1, &want) var got S1 got.embed2 = &embed2{} // need this because reflection won't create it - fields, err := NewCache(nil, nil).Fields(reflect.TypeOf(got)) + fields, err := NewCache(nil, nil, nil).Fields(reflect.TypeOf(got)) if err != nil { t.Fatal(err) } @@ -142,6 +147,30 @@ func TestAgainstJSONEncodingNoTags(t *testing.T) { } } +// Tests use of LeafTypes parameter to NewCache +func TestAgainstJSONEncodingEmbeddedTime(t *testing.T) { + timeLeafFn := func(t reflect.Type) bool { + return t == reflect.TypeOf(time.Time{}) + } + // Demonstrates that this package can produce the same set of + // fields as encoding/json for a struct with an embedded time.Time. + now := time.Now().UTC() + myt := Time{ + now, + } + var want Time + jsonRoundTrip(t, myt, &want) + var got Time + fields, err := NewCache(nil, nil, timeLeafFn).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, myt) + if !reflect.DeepEqual(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + type S2 struct { NoTag int XXX int `json:"tag"` // tag name takes precedence @@ -196,7 +225,7 @@ func validateFunc(t reflect.Type) (err error) { } func TestFieldsWithTags(t *testing.T) { - got, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(S2{})) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) if err != nil { t.Fatal(err) } @@ -235,7 +264,7 @@ func TestAgainstJSONEncodingWithTags(t *testing.T) { var want S2 jsonRoundTrip(t, s2, &want) var got S2 - fields, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(got)) + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(got)) if err != nil { t.Fatal(err) } @@ -259,7 +288,7 @@ func TestUnexportedAnonymousNonStruct(t *testing.T) { } ) - got, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(S{})) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) if err != nil { t.Fatal(err) } @@ -278,7 +307,7 @@ func TestUnexportedAnonymousStruct(t *testing.T) { s1 `json:"Y"` } ) - got, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(S2{})) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) if err != nil { t.Fatal(err) } @@ -320,7 +349,7 @@ func TestIgnore(t *testing.T) { type S struct { X int `json:"-"` } - got, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(S{})) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) if err != nil { t.Fatal(err) } @@ -333,7 +362,7 @@ func TestParsedTag(t *testing.T) { type S struct { X int `json:"name,omitempty"` } - got, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(S{})) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) if err != nil { t.Fatal(err) } @@ -352,7 +381,7 @@ func TestValidateFunc(t *testing.T) { B []int } - _, err := NewCache(nil, validateFunc).Fields(reflect.TypeOf(MyInvalidStruct{})) + _, err := NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyInvalidStruct{})) if err == nil { t.Fatal("expected error, got nil") } @@ -361,7 +390,7 @@ func TestValidateFunc(t *testing.T) { A string B int } - _, err = NewCache(nil, validateFunc).Fields(reflect.TypeOf(MyValidStruct{})) + _, err = NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyValidStruct{})) if err != nil { t.Fatalf("expected nil, got error: %s\n", err) } @@ -430,7 +459,7 @@ type S4 struct { } func TestMatchingField(t *testing.T) { - fields, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(S3{})) + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) if err != nil { t.Fatal(err) } @@ -472,7 +501,7 @@ func TestAgainstJSONMatchingField(t *testing.T) { var want S3 jsonRoundTrip(t, s3, &want) v := reflect.ValueOf(want) - fields, err := NewCache(jsonTagParser, nil).Fields(reflect.TypeOf(S3{})) + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) if err != nil { t.Fatal(err) } @@ -506,7 +535,7 @@ func TestTagErrors(t *testing.T) { return "", false, nil, errors.New("error") } return s, true, nil, nil - }, nil) + }, nil, nil) type T struct { X int `f:"ok"` diff --git a/vendor/cloud.google.com/go/internal/kokoro/build.sh b/vendor/cloud.google.com/go/internal/kokoro/build.sh index 3a868a9b9..9b93ac4c4 100755 --- a/vendor/cloud.google.com/go/internal/kokoro/build.sh +++ b/vendor/cloud.google.com/go/internal/kokoro/build.sh @@ -1,11 +1,14 @@ #!/bin/bash # Fail on any error -set -e +set -eo pipefail # Display commands being run set -x +# cd to project dir on Kokoro instance +cd git/gocloud + go version # Set $GOPATH @@ -18,5 +21,21 @@ cp -R ./* $GOCLOUD_HOME cd $GOCLOUD_HOME go get -v ./... -# Run tests -GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" go test -race -v ./... \ No newline at end of file +# # Don't run integration tests until we can protect against code from +# # untrusted forks reading and storing our service account key. +# cd internal/kokoro +# # Don't print out encryption keys, etc +# set +x +# key=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_key) +# iv=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_iv) +# pass=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_pass) + +# openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d +# set -x + +# export GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" +# export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" +# cd $GOCLOUD_HOME + +# Run tests and tee output to log file, to be pushed to GCS as artifact. + go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log \ No newline at end of file diff --git a/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/continuous.cfg b/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/continuous.cfg deleted file mode 100644 index 0620d6c43..000000000 --- a/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/continuous.cfg +++ /dev/null @@ -1,4 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Location of the continuous bash script in Git. -build_file: "gocloud/internal/kokoro/gcp_ubuntu/continuous.sh" \ No newline at end of file diff --git a/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/continuous.sh b/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/continuous.sh deleted file mode 100755 index a082044d5..000000000 --- a/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/continuous.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Fail on any error. -set -e - -cd git/gocloud -./internal/kokoro/build.sh diff --git a/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/presubmit.cfg b/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/presubmit.cfg deleted file mode 100644 index 0620d6c43..000000000 --- a/vendor/cloud.google.com/go/internal/kokoro/gcp_ubuntu/presubmit.cfg +++ /dev/null @@ -1,4 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Location of the continuous bash script in Git. -build_file: "gocloud/internal/kokoro/gcp_ubuntu/continuous.sh" \ No newline at end of file diff --git a/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc b/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc new file mode 100644 index 000000000..b23885469 Binary files /dev/null and b/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc differ diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index 9ff132236..0d7f05d70 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -19,22 +19,31 @@ package version import ( - "regexp" + "bytes" "runtime" + "unicode" ) // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20161214" +const Repo = "20170210" // Go returns the Go runtime version. The returned string // has no whitespace. func Go() string { - return removeWhitespace(runtime.Version()) + return goVersion } -var whitespace = regexp.MustCompile(`\s`) +var goVersion = removeWhitespace(runtime.Version()) func removeWhitespace(s string) string { - return whitespace.ReplaceAllString(s, "_") + var buf bytes.Buffer + for _, r := range s { + if unicode.IsSpace(r) { + buf.WriteByte('_') + } else { + buf.WriteRune(r) + } + } + return buf.String() } diff --git a/vendor/cloud.google.com/go/internal/version/version_test.go b/vendor/cloud.google.com/go/internal/version/version_test.go index c5b937296..a7bc4db64 100644 --- a/vendor/cloud.google.com/go/internal/version/version_test.go +++ b/vendor/cloud.google.com/go/internal/version/version_test.go @@ -21,7 +21,7 @@ import ( func TestGo(t *testing.T) { got := Go() - want := `^go1\.\S+$` + want := `^\S+$` match, err := regexp.MatchString(want, got) if err != nil { t.Fatal(err) diff --git a/vendor/cloud.google.com/go/language/apiv1/doc.go b/vendor/cloud.google.com/go/language/apiv1/doc.go index eeea774bb..8c8cb2fcf 100644 --- a/vendor/cloud.google.com/go/language/apiv1/doc.go +++ b/vendor/cloud.google.com/go/language/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,4 +22,14 @@ // recognition, and text annotations. package language // import "cloud.google.com/go/language/apiv1" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client.go b/vendor/cloud.google.com/go/language/apiv1/language_client.go index 9762a00b1..d7f826977 100644 --- a/vendor/cloud.google.com/go/language/apiv1/language_client.go +++ b/vendor/cloud.google.com/go/language/apiv1/language_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,10 +18,9 @@ package language import ( "fmt" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" @@ -29,7 +28,6 @@ import ( languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) // CallOptions contains the retry settings for each method of Client. @@ -84,7 +82,7 @@ type Client struct { CallOptions *CallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewClient creates a new language service client. @@ -102,7 +100,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error client: languagepb.NewLanguageServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -120,16 +118,13 @@ func (c *Client) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *Client) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *Client) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // AnalyzeSentiment analyzes the sentiment of the provided text. func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *languagepb.AnalyzeSentimentResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -145,8 +140,7 @@ func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSe // AnalyzeEntities finds named entities (currently finds proper names) in the text, // entity types, salience, mentions for each entity, and other properties. func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *languagepb.AnalyzeEntitiesResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -163,8 +157,7 @@ func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEnt // tokenization along with part of speech tags, dependency trees, and other // properties. func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *languagepb.AnalyzeSyntaxResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -180,8 +173,7 @@ func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSynta // AnnotateText a convenience method that provides all the features that analyzeSentiment, // analyzeEntities, and analyzeSyntax provide in one call. func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *languagepb.AnnotateTextResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go index c1ed7dc42..f1fcf1476 100644 --- a/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go +++ b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/language/apiv1/mock_test.go b/vendor/cloud.google.com/go/language/apiv1/mock_test.go index f4f590ccc..adf11e34f 100644 --- a/vendor/cloud.google.com/go/language/apiv1/mock_test.go +++ b/vendor/cloud.google.com/go/language/apiv1/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -186,7 +186,7 @@ func TestLanguageServiceAnalyzeEntities(t *testing.T) { mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} - var encodingType languagepb.EncodingType = 0 + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitiesRequest{ Document: document, EncodingType: encodingType, @@ -217,7 +217,7 @@ func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { mockLanguage.err = grpc.Errorf(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} - var encodingType languagepb.EncodingType = 0 + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitiesRequest{ Document: document, EncodingType: encodingType, @@ -247,7 +247,7 @@ func TestLanguageServiceAnalyzeSyntax(t *testing.T) { mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} - var encodingType languagepb.EncodingType = 0 + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeSyntaxRequest{ Document: document, EncodingType: encodingType, @@ -278,7 +278,7 @@ func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { mockLanguage.err = grpc.Errorf(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} - var encodingType languagepb.EncodingType = 0 + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeSyntaxRequest{ Document: document, EncodingType: encodingType, @@ -309,7 +309,7 @@ func TestLanguageServiceAnnotateText(t *testing.T) { var document *languagepb.Document = &languagepb.Document{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} - var encodingType languagepb.EncodingType = 0 + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnnotateTextRequest{ Document: document, Features: features, @@ -342,7 +342,7 @@ func TestLanguageServiceAnnotateTextError(t *testing.T) { var document *languagepb.Document = &languagepb.Document{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} - var encodingType languagepb.EncodingType = 0 + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnnotateTextRequest{ Document: document, Features: features, diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go index d67be97e0..119e693a3 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/config_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package logging import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -31,7 +30,6 @@ import ( loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -97,7 +95,7 @@ type ConfigClient struct { CallOptions *ConfigCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewConfigClient creates a new config service v2 client. @@ -115,7 +113,7 @@ func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigC configClient: loggingpb.NewConfigServiceV2Client(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -133,10 +131,8 @@ func (c *ConfigClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *ConfigClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *ConfigClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // ConfigProjectPath returns the path for the project resource. @@ -164,8 +160,7 @@ func ConfigSinkPath(project, sink string) string { // ListSinks lists sinks. func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &LogSinkIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { var resp *loggingpb.ListSinksResponse @@ -199,8 +194,7 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe // GetSink gets a sink. func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -219,8 +213,7 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques // `writer_identity` is not permitted to write to the destination. A sink can // export log entries only from the resource owning the sink. func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -242,8 +235,7 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink // The updated filter might also have a new `writer_identity`; see the // `unique_writer_identity` field. func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -259,8 +251,7 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink // DeleteSink deletes a sink. If the sink has a unique `writer_identity`, then that // service account is also deleted. func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.configClient.DeleteSink(ctx, req) diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go index 8e2badec6..620aa6505 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go index 3a9227867..c0016d46d 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/doc.go +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,4 +23,14 @@ // Use the client at cloud.google.com/go/logging in preference to this. package logging // import "cloud.google.com/go/logging/apiv2" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go index 81adc2423..a1ace7136 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package logging import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -32,7 +31,6 @@ import ( loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -110,7 +108,7 @@ type Client struct { CallOptions *CallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewClient creates a new logging service v2 client. @@ -127,7 +125,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error client: loggingpb.NewLoggingServiceV2Client(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -145,10 +143,8 @@ func (c *Client) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *Client) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *Client) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // LoggingProjectPath returns the path for the project resource. @@ -177,8 +173,7 @@ func LoggingLogPath(project, log string) string { // DeleteLog deletes all the log entries in a log. // The log reappears if it receives new entries. func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.client.DeleteLog(ctx, req) @@ -190,8 +185,7 @@ func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) // WriteLogEntries writes log entries to Stackdriver Logging. All log entries are // written by this method. func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *loggingpb.WriteLogEntriesResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -208,8 +202,7 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt // Stackdriver Logging. For ways to export log entries, see // [Exporting Logs](/logging/docs/export). func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &LogEntryIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { var resp *loggingpb.ListLogEntriesResponse @@ -244,8 +237,7 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri // ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver // Logging. func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &MonitoredResourceDescriptorIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { var resp *loggingpb.ListMonitoredResourceDescriptorsResponse @@ -280,8 +272,7 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg // ListLogs lists the logs in projects or organizations. // Only logs that have entries are listed. func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest) *StringIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &StringIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { var resp *loggingpb.ListLogsResponse diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go index 8e7994498..6dc537f5a 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go index ac838e8d6..144ea5a18 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package logging import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -31,7 +30,6 @@ import ( loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -97,7 +95,7 @@ type MetricsClient struct { CallOptions *MetricsCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewMetricsClient creates a new metrics service v2 client. @@ -114,7 +112,7 @@ func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*Metric metricsClient: loggingpb.NewMetricsServiceV2Client(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -132,10 +130,8 @@ func (c *MetricsClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *MetricsClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *MetricsClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // MetricsProjectPath returns the path for the project resource. @@ -163,8 +159,7 @@ func MetricsMetricPath(project, metric string) string { // ListLogMetrics lists logs-based metrics. func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &LogMetricIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { var resp *loggingpb.ListLogMetricsResponse @@ -198,8 +193,7 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL // GetLogMetric gets a logs-based metric. func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -214,8 +208,7 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM // CreateLogMetric creates a logs-based metric. func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -230,8 +223,7 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea // UpdateLogMetric creates or updates a logs-based metric. func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -246,8 +238,7 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda // DeleteLogMetric deletes a logs-based metric. func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.metricsClient.DeleteLogMetric(ctx, req) diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go index c2e5c45b9..c92412047 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/logging/apiv2/mock_test.go b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go index af513a493..032982c3d 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/mock_test.go +++ b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/logging/logadmin/logadmin.go b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go index 0a9924d68..aba055efc 100644 --- a/vendor/cloud.google.com/go/logging/logadmin/logadmin.go +++ b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go @@ -34,6 +34,7 @@ import ( "strings" "time" + "cloud.google.com/go/internal/version" "cloud.google.com/go/logging" vkit "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/internal" @@ -83,9 +84,9 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio if err != nil { return nil, err } - lc.SetGoogleClientInfo("logging", internal.Version) - sc.SetGoogleClientInfo("logging", internal.Version) - mc.SetGoogleClientInfo("logging", internal.Version) + lc.SetGoogleClientInfo("gccl", version.Repo) + sc.SetGoogleClientInfo("gccl", version.Repo) + mc.SetGoogleClientInfo("gccl", version.Repo) client := &Client{ lClient: lc, sClient: sc, diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go index 51d92eba0..b56938ede 100644 --- a/vendor/cloud.google.com/go/logging/logging.go +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -36,6 +36,7 @@ import ( "sync" "time" + "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/internal" "github.com/golang/protobuf/proto" @@ -125,7 +126,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio if err != nil { return nil, err } - c.SetGoogleClientInfo("logging", internal.Version) + c.SetGoogleClientInfo("gccl", version.Repo) client := &Client{ client: c, projectID: projectID, diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go index 92c1536d8..3fa72a351 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,4 +22,14 @@ // noted on the individual method pages. package monitoring // import "cloud.google.com/go/monitoring/apiv3" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go index 0cce06f83..e16fe0e7b 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package monitoring import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -32,7 +31,6 @@ import ( monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -99,7 +97,7 @@ type GroupClient struct { CallOptions *GroupCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewGroupClient creates a new group service client. @@ -127,7 +125,7 @@ func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupCli groupClient: monitoringpb.NewGroupServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -145,10 +143,8 @@ func (c *GroupClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *GroupClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *GroupClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // GroupProjectPath returns the path for the project resource. @@ -176,8 +172,7 @@ func GroupGroupPath(project, group string) string { // ListGroups lists the existing groups. func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest) *GroupIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &GroupIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { var resp *monitoringpb.ListGroupsResponse @@ -211,8 +206,7 @@ func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGrou // GetGroup gets a single group. func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest) (*monitoringpb.Group, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *monitoringpb.Group err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -227,8 +221,7 @@ func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRe // CreateGroup creates a new group. func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest) (*monitoringpb.Group, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *monitoringpb.Group err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -244,8 +237,7 @@ func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateG // UpdateGroup updates an existing group. // You can change any group attributes except `name`. func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest) (*monitoringpb.Group, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *monitoringpb.Group err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -260,8 +252,7 @@ func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateG // DeleteGroup deletes an existing group. func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.groupClient.DeleteGroup(ctx, req) @@ -272,8 +263,7 @@ func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteG // ListGroupMembers lists the monitored resources that are members of a group. func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest) *MonitoredResourceIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &MonitoredResourceIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { var resp *monitoringpb.ListGroupMembersResponse diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go index cc35ecc31..095661a77 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go index e5e950b35..e591ce343 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package monitoring import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -33,7 +32,6 @@ import ( monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -105,7 +103,7 @@ type MetricClient struct { CallOptions *MetricCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewMetricClient creates a new metric service client. @@ -123,7 +121,7 @@ func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricC metricClient: monitoringpb.NewMetricServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -141,10 +139,8 @@ func (c *MetricClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *MetricClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *MetricClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // MetricProjectPath returns the path for the project resource. @@ -184,8 +180,7 @@ func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor // ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &MonitoredResourceDescriptorIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse @@ -219,8 +214,7 @@ func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req // GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest) (*monitoredrespb.MonitoredResourceDescriptor, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *monitoredrespb.MonitoredResourceDescriptor err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -235,8 +229,7 @@ func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req * // ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) *MetricDescriptorIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &MetricDescriptorIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { var resp *monitoringpb.ListMetricDescriptorsResponse @@ -270,8 +263,7 @@ func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitorin // GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *metricpb.MetricDescriptor err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -288,8 +280,7 @@ func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringp // User-created metric descriptors define // [custom metrics](/monitoring/custom-metrics). func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *metricpb.MetricDescriptor err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -305,8 +296,7 @@ func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitori // DeleteMetricDescriptor deletes a metric descriptor. Only user-created // [custom metrics](/monitoring/custom-metrics) can be deleted. func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.metricClient.DeleteMetricDescriptor(ctx, req) @@ -317,8 +307,7 @@ func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitori // ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) *TimeSeriesIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &TimeSeriesIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { var resp *monitoringpb.ListTimeSeriesResponse @@ -355,8 +344,7 @@ func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.Lis // If any time series could not be written, a corresponding failure message is // included in the error response. func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.metricClient.CreateTimeSeries(ctx, req) diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go index eaaa845c2..5dbb5efdd 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go index d2c1d8159..82d738e7c 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -1012,7 +1012,7 @@ func TestMetricServiceListTimeSeries(t *testing.T) { var formattedName string = MetricProjectPath("[PROJECT]") var filter string = "filter-1274492040" var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} - var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = 0 + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL var request = &monitoringpb.ListTimeSeriesRequest{ Name: formattedName, Filter: filter, @@ -1057,7 +1057,7 @@ func TestMetricServiceListTimeSeriesError(t *testing.T) { var formattedName string = MetricProjectPath("[PROJECT]") var filter string = "filter-1274492040" var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} - var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = 0 + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL var request = &monitoringpb.ListTimeSeriesRequest{ Name: formattedName, Filter: filter, diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go index 27df2003c..cf9769a31 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,4 +23,14 @@ // Use the client at cloud.google.com/go/pubsub in preference to this. package pubsub // import "cloud.google.com/go/pubsub/apiv1" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go index 08610ccdb..f0005ad4d 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go index 4e53ae092..10189125c 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,11 +19,10 @@ package pubsub import ( "fmt" "math" - "runtime" - "strings" "time" "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -32,7 +31,6 @@ import ( pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -109,7 +107,7 @@ type PublisherClient struct { CallOptions *PublisherCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewPublisherClient creates a new publisher client. @@ -127,7 +125,7 @@ func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*Publ publisherClient: pubsubpb.NewPublisherClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -145,10 +143,8 @@ func (c *PublisherClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *PublisherClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *PublisherClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // PublisherProjectPath returns the path for the project resource. @@ -184,8 +180,7 @@ func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { // CreateTopic creates the given topic with the given name. func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic) (*pubsubpb.Topic, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -202,8 +197,7 @@ func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic) // does not exist. The message payload must not be empty; it must contain // either a non-empty data field, or at least one attribute. func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *pubsubpb.PublishResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -218,8 +212,7 @@ func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequ // GetTopic gets the configuration of a topic. func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest) (*pubsubpb.Topic, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -234,8 +227,7 @@ func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRe // ListTopics lists matching topics. func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest) *TopicIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &TopicIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { var resp *pubsubpb.ListTopicsResponse @@ -269,8 +261,7 @@ func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopi // ListTopicSubscriptions lists the name of the subscriptions for this topic. func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) *StringIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &StringIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { var resp *pubsubpb.ListTopicSubscriptionsResponse @@ -308,8 +299,7 @@ func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsu // configuration or subscriptions. Existing subscriptions to this topic are // not deleted, but their `topic` field is set to `_deleted-topic_`. func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.publisherClient.DeleteTopic(ctx, req) diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go index 468c3bc59..6e63f80e3 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go index 90b42498c..d3875a825 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,11 +19,10 @@ package pubsub import ( "fmt" "math" - "runtime" - "strings" "time" "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -32,7 +31,6 @@ import ( pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) var ( @@ -104,7 +102,7 @@ type SubscriberClient struct { CallOptions *SubscriberCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewSubscriberClient creates a new subscriber client. @@ -122,7 +120,7 @@ func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*Sub subscriberClient: pubsubpb.NewSubscriberClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -140,10 +138,8 @@ func (c *SubscriberClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *SubscriberClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *SubscriberClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // SubscriberProjectPath returns the path for the project resource. @@ -200,8 +196,7 @@ func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { // The generated name is populated in the returned Subscription object. // Note that for REST API requests, you must specify a name in the request. func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription) (*pubsubpb.Subscription, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -216,8 +211,7 @@ func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb // GetSubscription gets the configuration details of a subscription. func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest) (*pubsubpb.Subscription, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -232,8 +226,7 @@ func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.Ge // ListSubscriptions lists matching subscriptions. func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest) *SubscriptionIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &SubscriptionIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { var resp *pubsubpb.ListSubscriptionsResponse @@ -271,8 +264,7 @@ func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb. // the same name, but the new one has no association with the old // subscription or its topic unless the same topic is specified. func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.subscriberClient.DeleteSubscription(ctx, req) @@ -287,8 +279,7 @@ func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb // processing was interrupted. Note that this does not modify the // subscription-level `ackDeadlineSeconds` used for subsequent messages. func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.subscriberClient.ModifyAckDeadline(ctx, req) @@ -305,8 +296,7 @@ func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb. // but such a message may be redelivered later. Acknowledging a message more // than once will not result in an error. func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.subscriberClient.Acknowledge(ctx, req) @@ -320,8 +310,7 @@ func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.Acknow // there are too many concurrent pull requests pending for the given // subscription. func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest) (*pubsubpb.PullResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *pubsubpb.PullResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -347,8 +336,7 @@ func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest) // (e.g., a server restart). These should also be retried by the client. Flow // control can be achieved by configuring the underlying RPC channel. func (c *SubscriberClient) StreamingPull(ctx context.Context) (pubsubpb.Subscriber_StreamingPullClient, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp pubsubpb.Subscriber_StreamingPullClient err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -368,8 +356,7 @@ func (c *SubscriberClient) StreamingPull(ctx context.Context) (pubsubpb.Subscrib // attributes of a push subscription. Messages will accumulate for delivery // continuously through the call regardless of changes to the `PushConfig`. func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.subscriberClient.ModifyPushConfig(ctx, req) diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go index 094cd8e44..28492e3a4 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go index 26a3d7083..159469a51 100644 --- a/vendor/cloud.google.com/go/pubsub/doc.go +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -111,5 +111,10 @@ process messages, and the redelivery delay if messages fail to be acknowledged increases the available time for client code to process messages. However, if the client code neglects to call Message.Done, a large MaxExtension will increase the delay before the message is redelivered. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. */ package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/fake_test.go b/vendor/cloud.google.com/go/pubsub/fake_test.go new file mode 100644 index 000000000..552dd1e61 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/fake_test.go @@ -0,0 +1,139 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// This file provides a fake/mock in-memory pubsub server. +// (Really just a mock at the moment, but we hope to turn it into +// more of a fake.) + +import ( + "io" + "sync" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +type fakeServer struct { + pb.PublisherServer + pb.SubscriberServer + + Addr string + Acked map[string]bool // acked message IDs + Deadlines map[string]int32 // deadlines by message ID + pullResponses []*pullResponse + wg sync.WaitGroup +} + +type pullResponse struct { + msgs []*pb.ReceivedMessage + err error +} + +func newFakeServer() (*fakeServer, error) { + srv, err := testutil.NewServer() + if err != nil { + return nil, err + } + fake := &fakeServer{ + Addr: srv.Addr, + Acked: map[string]bool{}, + Deadlines: map[string]int32{}, + } + pb.RegisterPublisherServer(srv.Gsrv, fake) + pb.RegisterSubscriberServer(srv.Gsrv, fake) + srv.Start() + return fake, nil +} + +// Each call to addStreamingPullMessages results in one StreamingPullResponse. +func (s *fakeServer) addStreamingPullMessages(msgs []*pb.ReceivedMessage) { + s.pullResponses = append(s.pullResponses, &pullResponse{msgs, nil}) +} + +func (s *fakeServer) addStreamingPullError(err error) { + s.pullResponses = append(s.pullResponses, &pullResponse{nil, err}) +} + +func (s *fakeServer) wait() { + s.wg.Wait() +} + +func (s *fakeServer) StreamingPull(stream pb.Subscriber_StreamingPullServer) error { + // Receive initial request. + _, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + // Consume and ignore subsequent requests. + errc := make(chan error, 1) + s.wg.Add(1) + go func() { + defer s.wg.Done() + for { + req, err := stream.Recv() + if err != nil { + errc <- err + return + } + for _, id := range req.AckIds { + s.Acked[id] = true + } + for i, id := range req.ModifyDeadlineAckIds { + s.Deadlines[id] = req.ModifyDeadlineSeconds[i] + } + } + }() + // Send responses. + for { + if len(s.pullResponses) == 0 { + return nil + } + pr := s.pullResponses[0] + // Repeat last response. + if len(s.pullResponses) > 1 { + s.pullResponses = s.pullResponses[1:] + } + if pr.err == io.EOF { + return nil + } + if pr.err != nil { + return pr.err + } + // Return any error from Recv. + select { + case err := <-errc: + return err + default: + } + res := &pb.StreamingPullResponse{ReceivedMessages: pr.msgs} + if err := stream.Send(res); err != nil { + return err + } + } +} + +func (s *fakeServer) GetSubscription(ctx context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { + return &pb.Subscription{ + Name: req.Subscription, + AckDeadlineSeconds: 10, + PushConfig: &pb.PushConfig{}, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go index d49e0fe1a..7c644ccf2 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -15,15 +15,26 @@ package pubsub import ( + "log" "sync" "time" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/support/bundler" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" ) type MessageIterator struct { + impl interface { + next() (*Message, error) + stop() + } +} + +type pollingMessageIterator struct { // kaTicker controls how often we send an ack deadline extension request. kaTicker *time.Ticker // ackTicker controls how often we acknowledge a batch of messages. @@ -42,11 +53,34 @@ type MessageIterator struct { closed chan struct{} } +var useStreamingPull = false + // newMessageIterator starts a new MessageIterator. Stop must be called on the MessageIterator // when it is no longer needed. // subName is the full name of the subscription to pull messages from. // ctx is the context to use for acking messages and extending message deadlines. func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *MessageIterator { + if !useStreamingPull { + return &MessageIterator{ + impl: newPollingMessageIterator(ctx, s, subName, po), + } + } + sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds())) + err := sp.open() + if grpc.Code(err) == codes.Unimplemented { + log.Println("pubsub: streaming pull unimplemented; falling back to legacy pull") + return &MessageIterator{ + impl: newPollingMessageIterator(ctx, s, subName, po), + } + } + // TODO(jba): handle other non-nil error? + log.Println("using streaming pull") + return &MessageIterator{ + impl: newStreamingMessageIterator(ctx, sp, po), + } +} + +func newPollingMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *pollingMessageIterator { // TODO: make kaTicker frequency more configurable. // (ackDeadline - 5s) is a reasonable default for now, because the minimum ack period is 10s. This gives us 5s grace. keepAlivePeriod := po.ackDeadline - 5*time.Second @@ -90,7 +124,7 @@ func newMessageIterator(ctx context.Context, s service, subName string, po *pull ka.Start() ack.Start() - return &MessageIterator{ + return &pollingMessageIterator{ kaTicker: kaTicker, ackTicker: ackTicker, ka: ka, @@ -105,10 +139,13 @@ func newMessageIterator(ctx context.Context, s service, subName string, po *pull // Message.Done when finished with it. // Once Stop has been called, calls to Next will return iterator.Done. func (it *MessageIterator) Next() (*Message, error) { - m, err := it.puller.Next() + return it.impl.next() +} +func (it *pollingMessageIterator) next() (*Message, error) { + m, err := it.puller.Next() if err == nil { - m.it = it + m.done = it.done return m, nil } @@ -128,6 +165,10 @@ func (it *MessageIterator) Next() (*Message, error) { // Stop need only be called once, but may be called multiple times from // multiple goroutines. func (it *MessageIterator) Stop() { + it.impl.stop() +} + +func (it *pollingMessageIterator) stop() { it.mu.Lock() defer it.mu.Unlock() @@ -163,7 +204,7 @@ func (it *MessageIterator) Stop() { it.ackTicker.Stop() } -func (it *MessageIterator) done(ackID string, ack bool) { +func (it *pollingMessageIterator) done(ackID string, ack bool) { if ack { it.acker.Ack(ackID) // There's no need to call it.ka.Remove here, as acker will @@ -173,3 +214,312 @@ func (it *MessageIterator) done(ackID string, ack bool) { _ = it.nacker.Add(ackID, len(ackID)) // ignore error; this is just an optimization } } + +type streamingMessageIterator struct { + ctx context.Context + po *pullOptions + sp *streamingPuller + kaTicker *time.Ticker // keep-alive (deadline extensions) + ackTicker *time.Ticker // message acks + nackTicker *time.Ticker // message nacks (more frequent than acks) + failed chan struct{} // closed on stream error + stopped chan struct{} // closed when Stop is called + drained chan struct{} // closed when stopped && no more pending messages + msgc chan *Message + wg sync.WaitGroup + + mu sync.Mutex + keepAliveDeadlines map[string]time.Time + pendingReq *pb.StreamingPullRequest + err error // error from stream failure +} + +const messageBufferSize = 1000 + +func newStreamingMessageIterator(ctx context.Context, sp *streamingPuller, po *pullOptions) *streamingMessageIterator { + // TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a + // reasonable default for now, because the minimum ack period is 10s. This + // gives us 5s grace. + keepAlivePeriod := po.ackDeadline - 5*time.Second + kaTicker := time.NewTicker(keepAlivePeriod) + + // TODO: make ackTicker more configurable. Something less than + // kaTicker is a reasonable default (there's no point extending + // messages when they could be acked instead). + ackTicker := time.NewTicker(keepAlivePeriod / 2) + nackTicker := time.NewTicker(keepAlivePeriod / 10) + it := &streamingMessageIterator{ + ctx: ctx, + sp: sp, + po: po, + kaTicker: kaTicker, + ackTicker: ackTicker, + nackTicker: nackTicker, + failed: make(chan struct{}), + stopped: make(chan struct{}), + drained: make(chan struct{}), + msgc: make(chan *Message, messageBufferSize), + keepAliveDeadlines: map[string]time.Time{}, + pendingReq: &pb.StreamingPullRequest{}, + } + it.wg.Add(2) + go it.receiver() + go it.sender() + return it +} + +func (it *streamingMessageIterator) next() (*Message, error) { + // If ctx has been cancelled or the iterator is done, return straight + // away (even if there are buffered messages available). + select { + case <-it.ctx.Done(): + return nil, it.ctx.Err() + + case <-it.failed: + break + + case <-it.stopped: + break + + default: + // Wait for a message, but also for one of the above conditions. + select { + case msg := <-it.msgc: + // Since active select cases are chosen at random, this can return + // nil (from the channel close) even if it.failed or it.stopped is + // closed. + if msg == nil { + break + } + msg.done = it.done + return msg, nil + + case <-it.ctx.Done(): + return nil, it.ctx.Err() + + case <-it.failed: + break + + case <-it.stopped: + break + } + } + // Here if the iterator is done. + it.mu.Lock() + defer it.mu.Unlock() + return nil, it.err +} + +func (it *streamingMessageIterator) stop() { + it.mu.Lock() + select { + case <-it.stopped: + it.mu.Unlock() + it.wg.Wait() + return + default: + close(it.stopped) + } + if it.err == nil { + it.err = iterator.Done + } + it.mu.Unlock() + // Nack all the pending messages. + // Grab the lock separately for each message to allow the receiver + // and sender goroutines to make progress. + // Why this will eventually terminate: + // - If the receiver is not blocked on a stream Recv, then + // it will write all the messages it has received to the channel, + // then exit, closing the channel. + // - If the receiver is blocked, then this loop will eventually + // nack all the messages in the channel. Once done is called + // on the remaining messages, the iterator will be marked as drained, + // which will trigger the sender to terminate. When it does, it + // performs a CloseSend on the stream, which will result in the blocked + // stream Recv returning. + for m := range it.msgc { + it.mu.Lock() + delete(it.keepAliveDeadlines, m.ackID) + it.addDeadlineMod(m.ackID, 0) + it.checkDrained() + it.mu.Unlock() + } + it.wg.Wait() +} + +// checkDrained closes the drained channel if the iterator has been stopped and all +// pending messages have either been n/acked or expired. +// +// Called with the lock held. +func (it *streamingMessageIterator) checkDrained() { + select { + case <-it.drained: + return + default: + } + select { + case <-it.stopped: + if len(it.keepAliveDeadlines) == 0 { + close(it.drained) + } + default: + } +} + +// Called when a message is acked/nacked. +func (it *streamingMessageIterator) done(ackID string, ack bool) { + it.mu.Lock() + defer it.mu.Unlock() + delete(it.keepAliveDeadlines, ackID) + if ack { + it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID) + } else { + it.addDeadlineMod(ackID, 0) // Nack indicated by modifying the deadline to zero. + } + it.checkDrained() +} + +// addDeadlineMod adds the ack ID to the pending request with the given deadline. +// +// Called with the lock held. +func (it *streamingMessageIterator) addDeadlineMod(ackID string, deadlineSecs int32) { + pr := it.pendingReq + pr.ModifyDeadlineAckIds = append(pr.ModifyDeadlineAckIds, ackID) + pr.ModifyDeadlineSeconds = append(pr.ModifyDeadlineSeconds, deadlineSecs) +} + +// fail is called when a stream method returns a permanent error. +func (it *streamingMessageIterator) fail(err error) { + it.mu.Lock() + if it.err == nil { + it.err = err + close(it.failed) + } + it.mu.Unlock() +} + +// receiver runs in a goroutine and handles all receives from the stream. +func (it *streamingMessageIterator) receiver() { + defer it.wg.Done() + defer close(it.msgc) + + for { + // Stop retrieving messages if the context is done, the stream + // failed, or the iterator's Stop method was called. + select { + case <-it.ctx.Done(): + return + case <-it.failed: + return + case <-it.stopped: + return + default: + } + // Receive messages from stream. This may block indefinitely. + msgs, err := it.sp.fetchMessages() + + // The streamingPuller handles retries, so any error here + // is fatal to the iterator. + if err != nil { + it.fail(err) + return + } + // We received some messages. Remember them so we can + // keep them alive. + deadline := time.Now().Add(it.po.maxExtension) + it.mu.Lock() + for _, m := range msgs { + it.keepAliveDeadlines[m.ackID] = deadline + } + it.mu.Unlock() + // Deliver the messages to the channel. + for _, m := range msgs { + select { + case <-it.ctx.Done(): + return + case <-it.failed: + return + // Don't return if stopped. We want to send the remaining + // messages on the channel, where they will be nacked. + case it.msgc <- m: + } + } + } +} + +// sender runs in a goroutine and handles all sends to the stream. +func (it *streamingMessageIterator) sender() { + defer it.wg.Done() + defer it.kaTicker.Stop() + defer it.ackTicker.Stop() + defer it.nackTicker.Stop() + defer it.sp.closeSend() + + done := false + for !done { + send := false + select { + case <-it.ctx.Done(): + // Context canceled or timed out: stop immediately, without + // another RPC. + return + + case <-it.failed: + // Stream failed: nothing to do, so stop immediately. + return + + case <-it.drained: + // All outstanding messages have been marked done: + // nothing left to do except send the final request. + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingReq.ModifyDeadlineAckIds) > 0) + done = true + + case <-it.kaTicker.C: + it.mu.Lock() + send = it.handleKeepAlives() + + case <-it.nackTicker.C: + it.mu.Lock() + send = (len(it.pendingReq.ModifyDeadlineAckIds) > 0) + + case <-it.ackTicker.C: + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0) + + } + // Lock is held here. + if send { + req := it.pendingReq + it.pendingReq = &pb.StreamingPullRequest{} + it.mu.Unlock() + err := it.sp.send(req) + if err != nil { + // The streamingPuller handles retries, so any error here + // is fatal to the iterator. + it.fail(err) + return + } + } else { + it.mu.Unlock() + } + } +} + +// handleKeepAlives modifies the pending request to include deadline extensions +// for live messages. It also purges expired messages. It reports whether +// there were any live messages. +// +// Called with the lock held. +func (it *streamingMessageIterator) handleKeepAlives() bool { + live, expired := getKeepAliveAckIDs(it.keepAliveDeadlines) + for _, e := range expired { + delete(it.keepAliveDeadlines, e) + } + dl := trunc32(int64(it.po.ackDeadline.Seconds())) + for _, m := range live { + it.addDeadlineMod(m, dl) + } + it.checkDrained() + return len(live) > 0 +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator_test.go b/vendor/cloud.google.com/go/pubsub/iterator_test.go index 0e163d267..b631b4fb5 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator_test.go +++ b/vendor/cloud.google.com/go/pubsub/iterator_test.go @@ -82,6 +82,10 @@ func (s *blockingFetch) fetchMessages(ctx context.Context, subName string, maxMe return nil, ctx.Err() } +func (s *blockingFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { + return nil +} + // justInTimeFetch simulates the situation where the iterator is aborted just after the fetch RPC // succeeds, so the rest of puller.Next will continue to execute and return sucessfully. type justInTimeFetch struct { @@ -108,6 +112,10 @@ func (s *justInTimeFetch) modifyAckDeadline(ctx context.Context, subName string, return nil } +func (s *justInTimeFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { + return nil +} + func TestAfterAbortReturnsNoMoreThanOneMessage(t *testing.T) { // Each test case is excercised by making two concurrent blocking calls on a // MessageIterator, and then aborting the iterator. @@ -224,6 +232,10 @@ func (f *fetcherServiceWithModifyAckDeadline) splitAckIDs(ackIDs []string) ([]st return ackIDs, nil } +func (f *fetcherServiceWithModifyAckDeadline) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { + return nil +} + func TestMultipleStopCallsBlockUntilMessageDone(t *testing.T) { events := make(chan string, 3) s := &fetcherServiceWithModifyAckDeadline{ diff --git a/vendor/cloud.google.com/go/pubsub/keepalive.go b/vendor/cloud.google.com/go/pubsub/keepalive.go index bf2b95c05..f57c3831c 100644 --- a/vendor/cloud.google.com/go/pubsub/keepalive.go +++ b/vendor/cloud.google.com/go/pubsub/keepalive.go @@ -107,9 +107,12 @@ func (ka *keepAlive) Stop() { func (ka *keepAlive) getAckIDs() (live, expired []string) { ka.mu.Lock() defer ka.mu.Unlock() + return getKeepAliveAckIDs(ka.items) +} +func getKeepAliveAckIDs(items map[string]time.Time) (live, expired []string) { now := time.Now() - for id, expiry := range ka.items { + for id, expiry := range items { if expiry.Before(now) { expired = append(expired, id) } else { diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go index dd08ea1b8..2ecc86c58 100644 --- a/vendor/cloud.google.com/go/pubsub/message.go +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -45,8 +45,8 @@ type Message struct { calledDone bool - // The iterator that created this Message. - it *MessageIterator + // The done method of the iterator that created this Message. + done func(string, bool) } func toMessage(resp *pb.ReceivedMessage) (*Message, error) { @@ -80,5 +80,5 @@ func (m *Message) Done(ack bool) { return } m.calledDone = true - m.it.done(m.ackID, ack) + m.done(m.ackID, ack) } diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go index 5a1169d6b..328fe48d2 100644 --- a/vendor/cloud.google.com/go/pubsub/service.go +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -16,10 +16,13 @@ package pubsub import ( "fmt" + "io" "math" + "sync" "time" "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/pubsub/apiv1" "golang.org/x/net/context" "google.golang.org/api/option" @@ -28,8 +31,6 @@ import ( "google.golang.org/grpc/codes" ) -const version = "0.2.0" - type nextStringFunc func() (string, error) // service provides an internal abstraction to isolate the generated @@ -65,6 +66,8 @@ type service interface { iamHandle(resourceName string) *iam.Handle + newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller + close() error } @@ -83,8 +86,8 @@ func newPubSubService(ctx context.Context, opts []option.ClientOption) (*apiServ _ = pubc.Close() // ignore error return nil, err } - pubc.SetGoogleClientInfo("pubsub", version) - subc.SetGoogleClientInfo("pubsub", version) + pubc.SetGoogleClientInfo("gccl", version.Repo) + subc.SetGoogleClientInfo("gccl", version.Repo) return &apiService{pubc: pubc, subc: subc}, nil } @@ -222,13 +225,13 @@ func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, dead // it 512K. const ( maxPayload = 512 * 1024 - ackFixedOverhead = 100 + reqFixedOverhead = 100 overheadPerID = 3 ) // splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data. func (s *apiService) splitAckIDs(ids []string) ([]string, []string) { - total := ackFixedOverhead + total := reqFixedOverhead for i, id := range ids { total += len(id) + overheadPerID if total > maxPayload { @@ -253,8 +256,12 @@ func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessa if err != nil { return nil, err } - msgs := make([]*Message, 0, len(resp.ReceivedMessages)) - for i, m := range resp.ReceivedMessages { + return convertMessages(resp.ReceivedMessages) +} + +func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { + msgs := make([]*Message, 0, len(rms)) + for i, m := range rms { msg, err := toMessage(m) if err != nil { return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) @@ -302,3 +309,177 @@ func trunc32(i int64) int32 { } return int32(i) } + +func (s *apiService) newStreamingPuller(ctx context.Context, subName string, ackDeadlineSecs int32) *streamingPuller { + p := &streamingPuller{ + ctx: ctx, + subName: subName, + ackDeadlineSecs: ackDeadlineSecs, + subc: s.subc, + } + p.c = sync.NewCond(&p.mu) + return p +} + +type streamingPuller struct { + ctx context.Context + subName string + ackDeadlineSecs int32 + subc *vkit.SubscriberClient + + mu sync.Mutex + c *sync.Cond + inFlight bool + closed bool // set after CloseSend called + spc pb.Subscriber_StreamingPullClient + err error +} + +// open establishes (or re-establishes) a stream for pulling messages. +// It takes care that only one RPC is in flight at a time. +func (p *streamingPuller) open() error { + p.c.L.Lock() + defer p.c.L.Unlock() + p.openLocked() + return p.err +} + +func (p *streamingPuller) openLocked() { + if p.inFlight { + // Another goroutine is opening; wait for it. + for p.inFlight { + p.c.Wait() + } + return + } + // No opens in flight; start one. + p.inFlight = true + p.c.L.Unlock() + spc, err := p.subc.StreamingPull(p.ctx) + if err == nil { + err = spc.Send(&pb.StreamingPullRequest{ + Subscription: p.subName, + StreamAckDeadlineSeconds: p.ackDeadlineSecs, + }) + } + p.c.L.Lock() + p.spc = spc + p.err = err + p.inFlight = false + p.c.Broadcast() +} + +func (p *streamingPuller) call(f func(pb.Subscriber_StreamingPullClient) error) error { + p.c.L.Lock() + defer p.c.L.Unlock() + // Wait for an open in flight. + for p.inFlight { + p.c.Wait() + } + // TODO(jba): better retry strategy. + var err error + for i := 0; i < 3; i++ { + if p.err != nil { + return p.err + } + spc := p.spc + // Do not call f with the lock held. Only one goroutine calls Send + // (streamingMessageIterator.sender) and only one calls Recv + // (streamingMessageIterator.receiver). If we locked, then a + // blocked Recv would prevent a Send from happening. + p.c.L.Unlock() + err = f(spc) + p.c.L.Lock() + if !p.closed && (err == io.EOF || grpc.Code(err) == codes.Unavailable) { + time.Sleep(500 * time.Millisecond) + p.openLocked() + continue + } + // Not a retry-able error; fail permanently. + // TODO(jba): for some errors, should we retry f (the Send or Recv) + // but not re-open the stream? + p.err = err + return err + } + p.err = fmt.Errorf("retry exceeded; last error was %v", err) + return p.err +} + +func (p *streamingPuller) fetchMessages() ([]*Message, error) { + var res *pb.StreamingPullResponse + err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { + var err error + res, err = spc.Recv() + return err + }) + if err != nil { + return nil, err + } + return convertMessages(res.ReceivedMessages) +} + +func (p *streamingPuller) send(req *pb.StreamingPullRequest) error { + // Note: len(modAckIDs) == len(modSecs) + var rest *pb.StreamingPullRequest + for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 { + req, rest = splitRequest(req, maxPayload) + err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { + x := spc.Send(req) + return x + }) + if err != nil { + return err + } + req = rest + } + return nil +} + +func (p *streamingPuller) closeSend() { + p.mu.Lock() + p.closed = true + p.mu.Unlock() + p.spc.CloseSend() +} + +// Split req into a prefix that is smaller than maxSize, and a remainder. +func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) { + const int32Bytes = 4 + + // Copy all fields before splitting the variable-sized ones. + remainder = &pb.StreamingPullRequest{} + *remainder = *req + // Split message so it isn't too big. + size := reqFixedOverhead + i := 0 + for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) { + if i < len(req.AckIds) { + size += overheadPerID + len(req.AckIds[i]) + } + if i < len(req.ModifyDeadlineAckIds) { + size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes + } + i++ + } + + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + j := i + if size > maxSize { + j-- + } + k := min(j, len(req.AckIds)) + remainder.AckIds = req.AckIds[k:] + req.AckIds = req.AckIds[:k] + k = min(j, len(req.ModifyDeadlineAckIds)) + remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:] + remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:] + req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k] + req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k] + return req, remainder +} diff --git a/vendor/cloud.google.com/go/pubsub/service_test.go b/vendor/cloud.google.com/go/pubsub/service_test.go new file mode 100644 index 000000000..e8a9b0a11 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service_test.go @@ -0,0 +1,68 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "reflect" + "testing" + + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func TestSplitRequest(t *testing.T) { + split := func(a []string, i int) ([]string, []string) { + if len(a) < i { + return a, nil + } + return a[:i], a[i:] + } + ackIDs := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"} + modDeadlines := []int32{1, 2, 3, 4, 5} + for i, test := range []struct { + ackIDs []string + modAckIDs []string + splitIndex int + }{ + {ackIDs, ackIDs, 2}, + {nil, ackIDs, 3}, + {ackIDs, nil, 5}, + {nil, ackIDs[:1], 1}, + } { + req := &pb.StreamingPullRequest{ + AckIds: test.ackIDs, + ModifyDeadlineAckIds: test.modAckIDs, + ModifyDeadlineSeconds: modDeadlines[:len(test.modAckIDs)], + } + a1, a2 := split(test.ackIDs, test.splitIndex) + m1, m2 := split(test.modAckIDs, test.splitIndex) + want1 := &pb.StreamingPullRequest{ + AckIds: a1, + ModifyDeadlineAckIds: m1, + ModifyDeadlineSeconds: modDeadlines[:len(m1)], + } + want2 := &pb.StreamingPullRequest{ + AckIds: a2, + ModifyDeadlineAckIds: m2, + ModifyDeadlineSeconds: modDeadlines[len(m1) : len(m1)+len(m2)], + } + got1, got2 := splitRequest(req, reqFixedOverhead+40) + if !reflect.DeepEqual(got1, want1) { + t.Errorf("#%d: first:\ngot %+v\nwant %+v", i, got1, want1) + } + if !reflect.DeepEqual(got2, want2) { + t.Errorf("#%d: second:\ngot %+v\nwant %+v", i, got2, want2) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go new file mode 100644 index 000000000..bff1c5366 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go @@ -0,0 +1,277 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// TODO(jba): test keepalive +// TODO(jba): test that expired messages are not kept alive +// TODO(jba): test that when all messages expire, Stop returns. + +import ( + "io" + "reflect" + "strconv" + "testing" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + timestamp = &tspb.Timestamp{} + testMessages = []*pb.ReceivedMessage{ + {AckId: "1", Message: &pb.PubsubMessage{Data: []byte{1}, PublishTime: timestamp}}, + {AckId: "2", Message: &pb.PubsubMessage{Data: []byte{2}, PublishTime: timestamp}}, + {AckId: "3", Message: &pb.PubsubMessage{Data: []byte{3}, PublishTime: timestamp}}, + } +) + +func TestStreamingPullBasic(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullMultipleFetches(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullMessages(testMessages[1:]) + testStreamingPullIteration(t, client, server, testMessages) +} + +func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) { + if !useStreamingPull { + t.Skip() + } + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + for i := 0; i < len(msgs); i++ { + got, err := iter.Next() + if err != nil { + t.Fatal(err) + } + got.Done(i%2 == 0) // ack evens, nack odds + want, err := toMessage(msgs[i]) + if err != nil { + t.Fatal(err) + } + want.calledDone = true + // Don't compare done; it's a function. + got.done = nil + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got\n%#v\nwant\n%#v", i, got, want) + } + + } + iter.Stop() + server.wait() + for i := 0; i < len(msgs); i++ { + id := msgs[i].AckId + if i%2 == 0 { + if !server.Acked[id] { + t.Errorf("msg %q should have been acked but wasn't", id) + } + } else { + if dl, ok := server.Deadlines[id]; !ok || dl != 0 { + t.Errorf("msg %q should have been nacked but wasn't", id) + } + } + } +} + +func TestStreamingPullStop(t *testing.T) { + if !useStreamingPull { + t.Skip() + } + // After Stop is called, Next returns iterator.Done. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + msg, err := iter.Next() + if err != nil { + t.Fatal(err) + } + msg.Done(true) + iter.Stop() + // Next should always return the same error. + for i := 0; i < 3; i++ { + _, err = iter.Next() + if want := iterator.Done; err != want { + t.Fatalf("got <%v> %p, want <%v> %p", err, err, want, want) + } + } +} + +func TestStreamingPullError(t *testing.T) { + if !useStreamingPull { + t.Skip() + } + client, server := newFake(t) + server.addStreamingPullError(grpc.Errorf(codes.Internal, "")) + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + // Next should always return the same error. + for i := 0; i < 3; i++ { + _, err = iter.Next() + if want := codes.Internal; grpc.Code(err) != want { + t.Fatalf("got <%v>, want code %v", err, want) + } + } +} + +func TestStreamingPullCancel(t *testing.T) { + if !useStreamingPull { + t.Skip() + } + // Test that canceling the iterator's context behaves correctly. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := client.Subscription("s") + ctx, cancel := context.WithCancel(context.Background()) + iter, err := sub.Pull(ctx) + if err != nil { + t.Fatal(err) + } + _, err = iter.Next() + if err != nil { + t.Fatal(err) + } + // Here we have one message read (but not acked), and two + // in the iterator's buffer. + cancel() + // Further calls to Next will return Canceled. + _, err = iter.Next() + if got, want := err, context.Canceled; got != want { + t.Errorf("got %v, want %v", got, want) + } + // Despite the unacked message, Stop will still return promptly. + done := make(chan struct{}) + go func() { + iter.Stop() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("iter.Stop timed out") + } +} + +func TestStreamingPullRetry(t *testing.T) { + if !useStreamingPull { + t.Skip() + } + // Check that we retry on io.EOF or Unavailable. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullError(io.EOF) + server.addStreamingPullError(io.EOF) + server.addStreamingPullMessages(testMessages[1:2]) + server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) + server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) + server.addStreamingPullMessages(testMessages[2:]) + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullConcurrent(t *testing.T) { + if !useStreamingPull { + t.Skip() + } + newMsg := func(i int) *pb.ReceivedMessage { + return &pb.ReceivedMessage{ + AckId: strconv.Itoa(i), + Message: &pb.PubsubMessage{Data: []byte{byte(i)}, PublishTime: timestamp}, + } + } + + // Multiple goroutines should be able to read from the same iterator. + client, server := newFake(t) + // Add a lot of messages, a few at a time, to make sure both threads get a chance. + nMessages := 100 + for i := 0; i < nMessages; i += 2 { + server.addStreamingPullMessages([]*pb.ReceivedMessage{newMsg(i), newMsg(i + 1)}) + } + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + seenc := make(chan string) + errc := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + for { + msg, err := iter.Next() + if err == iterator.Done { + return + } + if err != nil { + errc <- err + return + } + // Must ack before sending to channel, or Stop may hang. + msg.Done(true) + seenc <- msg.ackID + } + }() + } + seen := map[string]bool{} + for i := 0; i < nMessages; i++ { + select { + case err := <-errc: + t.Fatal(err) + case id := <-seenc: + if seen[id] { + t.Fatalf("duplicate ID %q", id) + } + seen[id] = true + } + } + iter.Stop() + if len(seen) != nMessages { + t.Fatalf("got %d messages, want %d", len(seen), nMessages) + } +} + +func newFake(t *testing.T) (*Client, *fakeServer) { + srv, err := newFakeServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + return client, srv +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go new file mode 100644 index 000000000..ae5768689 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go @@ -0,0 +1,521 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + "fmt" + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + databaseAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}") + databaseAdminDatabasePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}/databases/{database}") +) + +// DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient. +type DatabaseAdminCallOptions struct { + ListDatabases []gax.CallOption + CreateDatabase []gax.CallOption + GetDatabase []gax.CallOption + UpdateDatabaseDdl []gax.CallOption + DropDatabase []gax.CallOption + GetDatabaseDdl []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultDatabaseAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + } +} + +func defaultDatabaseAdminCallOptions() *DatabaseAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &DatabaseAdminCallOptions{ + ListDatabases: retry[[2]string{"default", "idempotent"}], + CreateDatabase: retry[[2]string{"default", "non_idempotent"}], + GetDatabase: retry[[2]string{"default", "idempotent"}], + UpdateDatabaseDdl: retry[[2]string{"default", "idempotent"}], + DropDatabase: retry[[2]string{"default", "idempotent"}], + GetDatabaseDdl: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// DatabaseAdminClient is a client for interacting with Cloud Spanner Database Admin API. +type DatabaseAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + databaseAdminClient databasepb.DatabaseAdminClient + + // The call options for this service. + CallOptions *DatabaseAdminCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewDatabaseAdminClient creates a new database admin client. +// +// Cloud Spanner Database Admin API +// +// The Cloud Spanner Database Admin API can be used to create, drop, and +// list databases. It also enables updating the schema of pre-existing +// databases. +func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultDatabaseAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &DatabaseAdminClient{ + conn: conn, + CallOptions: defaultDatabaseAdminCallOptions(), + + databaseAdminClient: databasepb.NewDatabaseAdminClient(conn), + } + c.SetGoogleClientInfo("gapic", version.Repo) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *DatabaseAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *DatabaseAdminClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *DatabaseAdminClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) +} + +// DatabaseAdminInstancePath returns the path for the instance resource. +func DatabaseAdminInstancePath(project, instance string) string { + path, err := databaseAdminInstancePathTemplate.Render(map[string]string{ + "project": project, + "instance": instance, + }) + if err != nil { + panic(err) + } + return path +} + +// DatabaseAdminDatabasePath returns the path for the database resource. +func DatabaseAdminDatabasePath(project, instance, database string) string { + path, err := databaseAdminDatabasePathTemplate.Render(map[string]string{ + "project": project, + "instance": instance, + "database": database, + }) + if err != nil { + panic(err) + } + return path +} + +// ListDatabases lists Cloud Spanner databases. +func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest) *DatabaseIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &DatabaseIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) { + var resp *databasepb.ListDatabasesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.ListDatabases(ctx, req) + return err + }, c.CallOptions.ListDatabases...) + if err != nil { + return nil, "", err + } + return resp.Databases, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving. +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format `/operations/` and +// can be used to track preparation of the database. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The +// [response][google.longrunning.Operation.response] field type is +// [Database][google.spanner.admin.database.v1.Database], if successful. +func (c *DatabaseAdminClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest) (*DatabaseOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.CreateDatabase(ctx, req) + return err + }, c.CallOptions.CreateDatabase...) + if err != nil { + return nil, err + } + return &DatabaseOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// GetDatabase gets the state of a Cloud Spanner database. +func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest) (*databasepb.Database, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *databasepb.Database + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.GetDatabase(ctx, req) + return err + }, c.CallOptions.GetDatabase...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by +// creating/altering/dropping tables, columns, indexes, etc. The returned +// [long-running operation][google.longrunning.Operation] will have a name of +// the format `/operations/` and can be used to +// track execution of the schema change(s). The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. +func (c *DatabaseAdminClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest) (*EmptyOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.UpdateDatabaseDdl(ctx, req) + return err + }, c.CallOptions.UpdateDatabaseDdl...) + if err != nil { + return nil, err + } + return &EmptyOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// DropDatabase drops (aka deletes) a Cloud Spanner database. +func (c *DatabaseAdminClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.databaseAdminClient.DropDatabase(ctx, req) + return err + }, c.CallOptions.DropDatabase...) + return err +} + +// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted +// DDL statements. This method does not show pending schema updates, those may +// be queried using the [Operations][google.longrunning.Operations] API. +func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest) (*databasepb.GetDatabaseDdlResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *databasepb.GetDatabaseDdlResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.GetDatabaseDdl(ctx, req) + return err + }, c.CallOptions.GetDatabaseDdl...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on a database resource. Replaces any +// existing policy. +// +// Authorization requires `spanner.databases.setIamPolicy` permission on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.SetIamPolicy(ctx, req) + return err + }, c.CallOptions.SetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for a database resource. Returns an empty +// policy if a database exists but does not have a policy set. +// +// Authorization requires `spanner.databases.getIamPolicy` permission on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.GetIamPolicy(ctx, req) + return err + }, c.CallOptions.GetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified database resource. +// +// Attempting this RPC on a non-existent Cloud Spanner database will result in +// a NOT_FOUND error if the user has `spanner.databases.list` permission on +// the containing Cloud Spanner instance. Otherwise returns an empty set of +// permissions. +func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.TestIamPermissions(ctx, req) + return err + }, c.CallOptions.TestIamPermissions...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DatabaseIterator manages a stream of *databasepb.Database. +type DatabaseIterator struct { + items []*databasepb.Database + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Database, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DatabaseIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DatabaseIterator) Next() (*databasepb.Database, error) { + var item *databasepb.Database + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DatabaseIterator) bufLen() int { + return len(it.items) +} + +func (it *DatabaseIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// DatabaseOperation manages a long-running operation yielding databasepb.Database. +type DatabaseOperation struct { + lro *longrunning.Operation +} + +// DatabaseOperation returns a new DatabaseOperation from a given name. +// The name must be that of a previously created DatabaseOperation, possibly from a different process. +func (c *DatabaseAdminClient) DatabaseOperation(name string) *DatabaseOperation { + return &DatabaseOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *DatabaseOperation) Wait(ctx context.Context) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.Wait(ctx, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *DatabaseOperation) Poll(ctx context.Context) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.Poll(ctx, &resp); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *DatabaseOperation) Metadata() (*databasepb.CreateDatabaseMetadata, error) { + var meta databasepb.CreateDatabaseMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *DatabaseOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *DatabaseOperation) Name() string { + return op.lro.Name() +} + +// EmptyOperation manages a long-running operation with no result. +type EmptyOperation struct { + lro *longrunning.Operation +} + +// EmptyOperation returns a new EmptyOperation from a given name. +// The name must be that of a previously created EmptyOperation, possibly from a different process. +func (c *DatabaseAdminClient) EmptyOperation(name string) *EmptyOperation { + return &EmptyOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning any error encountered. +// +// See documentation of Poll for error-handling information. +func (op *EmptyOperation) Wait(ctx context.Context) error { + return op.lro.Wait(ctx, nil) +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, op.Done will return true. +func (op *EmptyOperation) Poll(ctx context.Context) error { + return op.lro.Poll(ctx, nil) +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *EmptyOperation) Metadata() (*databasepb.UpdateDatabaseDdlMetadata, error) { + var meta databasepb.UpdateDatabaseDdlMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *EmptyOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *EmptyOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go new file mode 100644 index 000000000..0769d1193 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go @@ -0,0 +1,204 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database_test + +import ( + "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + iampb "google.golang.org/genproto/googleapis/iam/v1" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +func ExampleNewDatabaseAdminClient() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleDatabaseAdminClient_ListDatabases() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.ListDatabasesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDatabases(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleDatabaseAdminClient_CreateDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.CreateDatabaseRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_UpdateDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.UpdateDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + + err = op.Wait(ctx) + // TODO: Handle error. +} + +func ExampleDatabaseAdminClient_DropDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.DropDatabaseRequest{ + // TODO: Fill request struct fields. + } + err = c.DropDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDatabaseAdminClient_GetDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go new file mode 100644 index 000000000..b0ef0f3de --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go @@ -0,0 +1,32 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package database is an experimental, auto-generated package for the +// database API. +// +package database // import "cloud.google.com/go/spanner/admin/database/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go new file mode 100644 index 000000000..37bc0a335 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go @@ -0,0 +1,740 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDatabaseAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + databasepb.DatabaseAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDatabaseAdminServer) ListDatabases(_ context.Context, req *databasepb.ListDatabasesRequest) (*databasepb.ListDatabasesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.ListDatabasesResponse), nil +} + +func (s *mockDatabaseAdminServer) CreateDatabase(_ context.Context, req *databasepb.CreateDatabaseRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) GetDatabase(_ context.Context, req *databasepb.GetDatabaseRequest) (*databasepb.Database, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.Database), nil +} + +func (s *mockDatabaseAdminServer) UpdateDatabaseDdl(_ context.Context, req *databasepb.UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) DropDatabase(_ context.Context, req *databasepb.DropDatabaseRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockDatabaseAdminServer) GetDatabaseDdl(_ context.Context, req *databasepb.GetDatabaseDdlRequest) (*databasepb.GetDatabaseDdlResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.GetDatabaseDdlResponse), nil +} + +func (s *mockDatabaseAdminServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) TestIamPermissions(_ context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDatabaseAdmin mockDatabaseAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + databasepb.RegisterDatabaseAdminServer(serv, &mockDatabaseAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDatabaseAdminListDatabases(t *testing.T) { + var nextPageToken string = "" + var databasesElement *databasepb.Database = &databasepb.Database{} + var databases = []*databasepb.Database{databasesElement} + var expectedResponse = &databasepb.ListDatabasesResponse{ + NextPageToken: nextPageToken, + Databases: databases, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Databases[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminListDatabasesError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminCreateDatabase(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &databasepb.Database{ + Name: name, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminCreateDatabaseError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetDatabase(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &databasepb.Database{ + Name: name2, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminUpdateDatabaseDdl(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminUpdateDatabaseDdlError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminDropDatabase(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminDropDatabaseError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminGetDatabaseDdl(t *testing.T) { + var expectedResponse *databasepb.GetDatabaseDdlResponse = &databasepb.GetDatabaseDdlResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseDdlError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminSetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go new file mode 100644 index 000000000..db4986e6f --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go @@ -0,0 +1,32 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package instance is an experimental, auto-generated package for the +// instance API. +// +package instance // import "cloud.google.com/go/spanner/admin/instance/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go new file mode 100644 index 000000000..c2242fb84 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go @@ -0,0 +1,637 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + "fmt" + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + instanceAdminProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + instanceAdminInstanceConfigPathTemplate = gax.MustCompilePathTemplate("projects/{project}/instanceConfigs/{instance_config}") + instanceAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}") +) + +// InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient. +type InstanceAdminCallOptions struct { + ListInstanceConfigs []gax.CallOption + GetInstanceConfig []gax.CallOption + ListInstances []gax.CallOption + GetInstance []gax.CallOption + CreateInstance []gax.CallOption + UpdateInstance []gax.CallOption + DeleteInstance []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultInstanceAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + } +} + +func defaultInstanceAdminCallOptions() *InstanceAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &InstanceAdminCallOptions{ + ListInstanceConfigs: retry[[2]string{"default", "idempotent"}], + GetInstanceConfig: retry[[2]string{"default", "idempotent"}], + ListInstances: retry[[2]string{"default", "idempotent"}], + GetInstance: retry[[2]string{"default", "idempotent"}], + CreateInstance: retry[[2]string{"default", "non_idempotent"}], + UpdateInstance: retry[[2]string{"default", "non_idempotent"}], + DeleteInstance: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// InstanceAdminClient is a client for interacting with Cloud Spanner Instance Admin API. +type InstanceAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + instanceAdminClient instancepb.InstanceAdminClient + + // The call options for this service. + CallOptions *InstanceAdminCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewInstanceAdminClient creates a new instance admin client. +// +// Cloud Spanner Instance Admin API +// +// The Cloud Spanner Instance Admin API can be used to create, delete, +// modify and list instances. Instances are dedicated Cloud Spanner serving +// and storage resources to be used by Cloud Spanner databases. +// +// Each instance has a "configuration", which dictates where the +// serving resources for the Cloud Spanner instance are located (e.g., +// US-central, Europe). Configurations are created by Google based on +// resource availability. +// +// Cloud Spanner billing is based on the instances that exist and their +// sizes. After an instance exists, there are no additional +// per-database or per-operation charges for use of the instance +// (though there may be additional network bandwidth charges). +// Instances offer isolation: problems with databases in one instance +// will not affect other instances. However, within an instance +// databases can affect each other. For example, if one database in an +// instance receives a lot of requests and consumes most of the +// instance resources, fewer resources are available for other +// databases in that instance, and their performance may suffer. +func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultInstanceAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &InstanceAdminClient{ + conn: conn, + CallOptions: defaultInstanceAdminCallOptions(), + + instanceAdminClient: instancepb.NewInstanceAdminClient(conn), + } + c.SetGoogleClientInfo("gapic", version.Repo) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *InstanceAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *InstanceAdminClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *InstanceAdminClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) +} + +// InstanceAdminProjectPath returns the path for the project resource. +func InstanceAdminProjectPath(project string) string { + path, err := instanceAdminProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// InstanceAdminInstanceConfigPath returns the path for the instance config resource. +func InstanceAdminInstanceConfigPath(project, instanceConfig string) string { + path, err := instanceAdminInstanceConfigPathTemplate.Render(map[string]string{ + "project": project, + "instance_config": instanceConfig, + }) + if err != nil { + panic(err) + } + return path +} + +// InstanceAdminInstancePath returns the path for the instance resource. +func InstanceAdminInstancePath(project, instance string) string { + path, err := instanceAdminInstancePathTemplate.Render(map[string]string{ + "project": project, + "instance": instance, + }) + if err != nil { + panic(err) + } + return path +} + +// ListInstanceConfigs lists the supported instance configurations for a given project. +func (c *InstanceAdminClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest) *InstanceConfigIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &InstanceConfigIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) { + var resp *instancepb.ListInstanceConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.ListInstanceConfigs(ctx, req) + return err + }, c.CallOptions.ListInstanceConfigs...) + if err != nil { + return nil, "", err + } + return resp.InstanceConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstanceConfig gets information about a particular instance configuration. +func (c *InstanceAdminClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest) (*instancepb.InstanceConfig, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *instancepb.InstanceConfig + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.GetInstanceConfig(ctx, req) + return err + }, c.CallOptions.GetInstanceConfig...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInstances lists all instances in the given project. +func (c *InstanceAdminClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest) *InstanceIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &InstanceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) { + var resp *instancepb.ListInstancesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.ListInstances(ctx, req) + return err + }, c.CallOptions.ListInstances...) + if err != nil { + return nil, "", err + } + return resp.Instances, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstance gets information about a particular instance. +func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest) (*instancepb.Instance, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *instancepb.Instance + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.GetInstance(ctx, req) + return err + }, c.CallOptions.GetInstance...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInstance creates an instance and begins preparing it to begin serving. The +// returned [long-running operation][google.longrunning.Operation] +// can be used to track the progress of preparing the new +// instance. The instance name is assigned by the caller. If the +// named instance already exists, `CreateInstance` returns +// `ALREADY_EXISTS`. +// +// Immediately upon completion of this request: +// +// * The instance is readable via the API, with all requested attributes +// but no allocated resources. Its state is `CREATING`. +// +// Until completion of the returned operation: +// +// * Cancelling the operation renders the instance immediately unreadable +// via the API. +// * The instance can be deleted. +// * All other attempts to modify the instance are rejected. +// +// Upon completion of the returned operation: +// +// * Billing for all successfully-allocated resources begins (some types +// may have lower than the requested levels). +// * Databases can be created in the instance. +// * The instance's allocated resource levels are readable via the API. +// * The instance's state becomes `READY`. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format `/operations/` and +// can be used to track creation of the instance. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest) (*InstanceOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.CreateInstance(ctx, req) + return err + }, c.CallOptions.CreateInstance...) + if err != nil { + return nil, err + } + return &InstanceOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// UpdateInstance updates an instance, and begins allocating or releasing resources +// as requested. The returned [long-running +// operation][google.longrunning.Operation] can be used to track the +// progress of updating the instance. If the named instance does not +// exist, returns `NOT_FOUND`. +// +// Immediately upon completion of this request: +// +// * For resource types for which a decrease in the instance's allocation +// has been requested, billing is based on the newly-requested level. +// +// Until completion of the returned operation: +// +// * Cancelling the operation sets its metadata's +// [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins +// restoring resources to their pre-request values. The operation +// is guaranteed to succeed at undoing all resource changes, +// after which point it terminates with a `CANCELLED` status. +// * All other attempts to modify the instance are rejected. +// * Reading the instance via the API continues to give the pre-request +// resource levels. +// +// Upon completion of the returned operation: +// +// * Billing begins for all successfully-allocated resources (some types +// may have lower than the requested levels). +// * All newly-reserved resources are available for serving the instance's +// tables. +// * The instance's new resource levels are readable via the API. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format `/operations/` and +// can be used to track the instance modification. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +// +// Authorization requires `spanner.instances.update` permission on +// resource [name][google.spanner.admin.instance.v1.Instance.name]. +func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest) (*InstanceOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.UpdateInstance(ctx, req) + return err + }, c.CallOptions.UpdateInstance...) + if err != nil { + return nil, err + } + return &InstanceOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// DeleteInstance deletes an instance. +// +// Immediately upon completion of the request: +// +// * Billing ceases for all of the instance's reserved resources. +// +// Soon afterward: +// +// * The instance and *all of its databases* immediately and +// irrevocably disappear from the API. All data in the databases +// is permanently deleted. +func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.instanceAdminClient.DeleteInstance(ctx, req) + return err + }, c.CallOptions.DeleteInstance...) + return err +} + +// SetIamPolicy sets the access control policy on an instance resource. Replaces any +// existing policy. +// +// Authorization requires `spanner.instances.setIamPolicy` on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.SetIamPolicy(ctx, req) + return err + }, c.CallOptions.SetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for an instance resource. Returns an empty +// policy if an instance exists but does not have a policy set. +// +// Authorization requires `spanner.instances.getIamPolicy` on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.GetIamPolicy(ctx, req) + return err + }, c.CallOptions.GetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified instance resource. +// +// Attempting this RPC on a non-existent Cloud Spanner instance resource will +// result in a NOT_FOUND error if the user has `spanner.instances.list` +// permission on the containing Google Cloud Project. Otherwise returns an +// empty set of permissions. +func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.TestIamPermissions(ctx, req) + return err + }, c.CallOptions.TestIamPermissions...) + if err != nil { + return nil, err + } + return resp, nil +} + +// InstanceConfigIterator manages a stream of *instancepb.InstanceConfig. +type InstanceConfigIterator struct { + items []*instancepb.InstanceConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstanceConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceConfigIterator) Next() (*instancepb.InstanceConfig, error) { + var item *instancepb.InstanceConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InstanceIterator manages a stream of *instancepb.Instance. +type InstanceIterator struct { + items []*instancepb.Instance + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.Instance, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceIterator) Next() (*instancepb.Instance, error) { + var item *instancepb.Instance + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InstanceOperation manages a long-running operation yielding instancepb.Instance. +type InstanceOperation struct { + lro *longrunning.Operation +} + +// InstanceOperation returns a new InstanceOperation from a given name. +// The name must be that of a previously created InstanceOperation, possibly from a different process. +func (c *InstanceAdminClient) InstanceOperation(name string) *InstanceOperation { + return &InstanceOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *InstanceOperation) Wait(ctx context.Context) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Wait(ctx, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *InstanceOperation) Poll(ctx context.Context) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Poll(ctx, &resp); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *InstanceOperation) Metadata() (*instancepb.UpdateInstanceMetadata, error) { + var meta instancepb.UpdateInstanceMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *InstanceOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *InstanceOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go new file mode 100644 index 000000000..ee807fdbc --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go @@ -0,0 +1,230 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance_test + +import ( + "cloud.google.com/go/spanner/admin/instance/apiv1" + "golang.org/x/net/context" + iampb "google.golang.org/genproto/googleapis/iam/v1" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" +) + +func ExampleNewInstanceAdminClient() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleInstanceAdminClient_ListInstanceConfigs() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstanceConfigsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstanceConfigs(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstanceConfig() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstanceConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_ListInstances() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstancesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstances(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_CreateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.CreateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_UpdateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.UpdateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_DeleteInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.DeleteInstanceRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInstanceAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go new file mode 100644 index 000000000..8c8cc8c33 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go @@ -0,0 +1,853 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + protobufpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockInstanceAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + instancepb.InstanceAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockInstanceAdminServer) ListInstanceConfigs(_ context.Context, req *instancepb.ListInstanceConfigsRequest) (*instancepb.ListInstanceConfigsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstanceConfigsResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstanceConfig(_ context.Context, req *instancepb.GetInstanceConfigRequest) (*instancepb.InstanceConfig, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.InstanceConfig), nil +} + +func (s *mockInstanceAdminServer) ListInstances(_ context.Context, req *instancepb.ListInstancesRequest) (*instancepb.ListInstancesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstancesResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstance(_ context.Context, req *instancepb.GetInstanceRequest) (*instancepb.Instance, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.Instance), nil +} + +func (s *mockInstanceAdminServer) CreateInstance(_ context.Context, req *instancepb.CreateInstanceRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) UpdateInstance(_ context.Context, req *instancepb.UpdateInstanceRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) DeleteInstance(_ context.Context, req *instancepb.DeleteInstanceRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockInstanceAdminServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) TestIamPermissions(_ context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockInstanceAdmin mockInstanceAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + instancepb.RegisterInstanceAdminServer(serv, &mockInstanceAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestInstanceAdminListInstanceConfigs(t *testing.T) { + var nextPageToken string = "" + var instanceConfigsElement *instancepb.InstanceConfig = &instancepb.InstanceConfig{} + var instanceConfigs = []*instancepb.InstanceConfig{instanceConfigsElement} + var expectedResponse = &instancepb.ListInstanceConfigsResponse{ + NextPageToken: nextPageToken, + InstanceConfigs: instanceConfigs, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InstanceConfigs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstanceConfigsError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstanceConfig(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &instancepb.InstanceConfig{ + Name: name2, + DisplayName: displayName, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceConfigError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminListInstances(t *testing.T) { + var nextPageToken string = "" + var instancesElement *instancepb.Instance = &instancepb.Instance{} + var instances = []*instancepb.Instance{instancesElement} + var expectedResponse = &instancepb.ListInstancesResponse{ + NextPageToken: nextPageToken, + Instances: instances, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Instances[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstancesError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstance(t *testing.T) { + var name2 string = "name2-1052831874" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name2, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminCreateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminCreateInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminUpdateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *protobufpb.FieldMask = &protobufpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminUpdateInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *protobufpb.FieldMask = &protobufpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminDeleteInstance(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestInstanceAdminDeleteInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestInstanceAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminSetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/backoff.go b/vendor/cloud.google.com/go/spanner/backoff.go new file mode 100644 index 000000000..d38723843 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math/rand" + "time" +) + +const ( + // minBackoff is the minimum backoff used by default. + minBackoff = 1 * time.Second + // maxBackoff is the maximum backoff used by default. + maxBackoff = 32 * time.Second + // jitter is the jitter factor. + jitter = 0.4 + // rate is the rate of exponential increase in the backoff. + rate = 1.3 +) + +var defaultBackoff = exponentialBackoff{minBackoff, maxBackoff} + +type exponentialBackoff struct { + min, max time.Duration +} + +// delay calculates the delay that should happen at n-th +// exponential backoff in a series. +func (b exponentialBackoff) delay(retries int) time.Duration { + min, max := float64(b.min), float64(b.max) + delay := min + for delay < max && retries > 0 { + delay *= rate + retries-- + } + if delay > max { + delay = max + } + delay -= delay * jitter * rand.Float64() + if delay < min { + delay = min + } + return time.Duration(delay) +} diff --git a/vendor/cloud.google.com/go/spanner/backoff_test.go b/vendor/cloud.google.com/go/spanner/backoff_test.go new file mode 100644 index 000000000..7a0314e81 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "time" + + "testing" +) + +// Test if exponential backoff helper can produce correct series of +// retry delays. +func TestBackoff(t *testing.T) { + b := exponentialBackoff{minBackoff, maxBackoff} + tests := []struct { + retries int + min time.Duration + max time.Duration + }{ + { + retries: 0, + min: minBackoff, + max: minBackoff, + }, + { + retries: 1, + min: minBackoff, + max: time.Duration(rate * float64(minBackoff)), + }, + { + retries: 3, + min: time.Duration(math.Pow(rate, 3) * (1 - jitter) * float64(minBackoff)), + max: time.Duration(math.Pow(rate, 3) * float64(minBackoff)), + }, + { + retries: 1000, + min: time.Duration((1 - jitter) * float64(maxBackoff)), + max: maxBackoff, + }, + } + for _, test := range tests { + got := b.delay(test.retries) + if float64(got) < float64(test.min) || float64(got) > float64(test.max) { + t.Errorf("delay(%v) = %v, want in range [%v, %v]", test.retries, got, test.min, test.max) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/client.go b/vendor/cloud.google.com/go/spanner/client.go new file mode 100644 index 000000000..3a5ab33ea --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client.go @@ -0,0 +1,302 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "regexp" + "runtime" + "sync/atomic" + "time" + + "cloud.google.com/go/internal/version" + + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +const ( + prodAddr = "spanner.googleapis.com:443" + + // resourcePrefixHeader is the name of the metadata header used to indicate + // the resource being operated on. + resourcePrefixHeader = "google-cloud-resource-prefix" + // apiClientHeader is the name of the metadata header used to indicate client + // information. + apiClientHeader = "x-goog-api-client" +) + +const ( + // Scope is the scope for Cloud Spanner Data API. + Scope = "https://www.googleapis.com/auth/spanner.data" + + // AdminScope is the scope for Cloud Spanner Admin APIs. + AdminScope = "https://www.googleapis.com/auth/spanner.admin" +) + +var ( + validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$") + clientUserAgent = fmt.Sprintf("cloudspanner go/%s", runtime.Version()) +) + +func validDatabaseName(db string) error { + if matched := validDBPattern.MatchString(db); !matched { + return fmt.Errorf("database name %q should conform to pattern %q", + db, validDBPattern.String()) + } + return nil +} + +// Client is a client for reading and writing data to a Cloud Spanner database. A +// client is safe to use concurrently, except for its Close method. +type Client struct { + // rr must be accessed through atomic operations. + rr uint32 + conns []*grpc.ClientConn + clients []sppb.SpannerClient + database string + // Metadata to be sent with each request. + md metadata.MD + idleSessions *sessionPool +} + +// ClientConfig has configurations for the client. +type ClientConfig struct { + // NumChannels is the number of GRPC channels. + NumChannels int + co []option.ClientOption + // SessionPoolConfig is the configuration for session pool. + SessionPoolConfig +} + +// errDial returns error for dialing to Cloud Spanner. +func errDial(ci int, err error) error { + e := toSpannerError(err).(*Error) + e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci)) + return e +} + +func contextWithMetadata(ctx context.Context, md metadata.MD) context.Context { + existing, ok := metadata.FromContext(ctx) + if ok { + md = metadata.Join(existing, md) + } + return metadata.NewContext(ctx, md) +} + +// NewClient creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses a default +// configuration. +func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) { + return NewClientWithConfig(ctx, database, ClientConfig{}, opts...) +} + +// NewClientWithConfig creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. +func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (*Client, error) { + // Validate database path. + if err := validDatabaseName(database); err != nil { + return nil, err + } + c := &Client{ + database: database, + md: metadata.Pairs( + resourcePrefixHeader, database, + apiClientHeader, clientUserAgent, + "x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)), + } + allOpts := []option.ClientOption{option.WithEndpoint(prodAddr), option.WithScopes(Scope), option.WithUserAgent(clientUserAgent)} + allOpts = append(allOpts, opts...) + // Prepare gRPC channels. + if config.NumChannels == 0 { + config.NumChannels = 4 + } + for i := 0; i < config.NumChannels; i++ { + conn, err := transport.DialGRPC(ctx, allOpts...) + if err != nil { + return nil, errDial(i, err) + } + c.conns = append(c.conns, conn) + c.clients = append(c.clients, sppb.NewSpannerClient(conn)) + } + // Prepare session pool. + config.SessionPoolConfig.getRPCClient = func() (sppb.SpannerClient, error) { + // TODO: support more loadbalancing options. + return c.rrNext(), nil + } + sp, err := newSessionPool(database, config.SessionPoolConfig, c.md) + if err != nil { + c.Close() + return nil, err + } + c.idleSessions = sp + return c, nil +} + +// rrNext returns the next available Cloud Spanner RPC client in a round-robin manner. +func (c *Client) rrNext() sppb.SpannerClient { + return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))] +} + +// Close closes the client. +func (c *Client) Close() { + if c.idleSessions != nil { + c.idleSessions.close() + } + for _, conn := range c.conns { + conn.Close() + } +} + +// Single provides a read-only snapshot transaction optimized for the case +// where only a single read or query is needed. This is more efficient than +// using ReadOnlyTransaction() for a single read or query. +// +// Single will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) Single() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions} + t.txReadOnly.txReadEnv = t + return t +} + +// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for +// multiple reads from the database. You must call Close() when the +// ReadOnlyTransaction is no longer needed to release resources on the server. +// +// ReadOnlyTransaction will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{ + singleUse: false, + sp: c.idleSessions, + txReadyOrClosed: make(chan struct{}), + } + t.txReadOnly.txReadEnv = t + return t +} + +// ReadWriteTransaction executes a read-write transaction, with retries as +// necessary. +// +// The function f will be called one or more times. It must not maintain +// any state between calls. +// +// If the transaction cannot be committed or if f returns an IsAborted error, +// ReadWriteTransaction will call f again. It will continue to call f until the +// transaction can be committed or the Context times out or is cancelled. If f +// returns an error other than IsAborted, ReadWriteTransaction will abort the +// transaction and return the error. +// +// To limit the number of retries, set a deadline on the Context rather than +// using a fixed limit on the number of attempts. ReadWriteTransaction will +// retry as needed until that deadline is met. +func (c *Client) ReadWriteTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error) { + var ( + ts time.Time + sh *sessionHandle + ) + err := runRetryable(ctx, func(ctx context.Context) error { + var ( + err error + t *ReadWriteTransaction + ) + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // Session handle hasn't been allocated or has been destroyed. + sh, err = c.idleSessions.takeWriteSession(ctx) + if err != nil { + // If session retrieval fails, just fail the transaction. + return err + } + t = &ReadWriteTransaction{ + sh: sh, + tx: sh.getTransactionID(), + } + } else { + t = &ReadWriteTransaction{ + sh: sh, + } + } + t.txReadOnly.txReadEnv = t + if err = t.begin(ctx); err != nil { + // Mask error from begin operation as retryable error. + return errRetry(err) + } + ts, err = t.runInTransaction(ctx, f) + if err != nil { + return err + } + return nil + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// applyOption controls the behavior of Client.Apply. +type applyOption struct { + // If atLeastOnce == true, Client.Apply will execute the mutations on Cloud Spanner at least once. + atLeastOnce bool +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption func(*applyOption) + +// ApplyAtLeastOnce returns an ApplyOption that removes replay protection. +// +// With this option, Apply may attempt to apply mutations more than once; if +// the mutations are not idempotent, this may lead to a failure being reported +// when the mutation was applied more than once. For example, an insert may +// fail with ALREADY_EXISTS even though the row did not exist before Apply was +// called. For this reason, most users of the library will prefer not to use +// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas +// Apply's default replay protection may require an additional RPC. So this +// option may be appropriate for latency sensitive and/or high throughput blind +// writing. +func ApplyAtLeastOnce() ApplyOption { + return func(ao *applyOption) { + ao.atLeastOnce = true + } +} + +// Apply applies a list of mutations atomically to the database. +func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error) { + ao := &applyOption{} + for _, opt := range opts { + opt(ao) + } + if !ao.atLeastOnce { + return c.ReadWriteTransaction(ctx, func(t *ReadWriteTransaction) error { + t.BufferWrite(ms) + return nil + }) + } + t := &writeOnlyTransaction{c.idleSessions} + return t.applyAtLeastOnce(ctx, ms...) +} diff --git a/vendor/cloud.google.com/go/spanner/client_test.go b/vendor/cloud.google.com/go/spanner/client_test.go new file mode 100644 index 000000000..eb4395626 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "strings" + "testing" +) + +// Test validDatabaseName() +func TestValidDatabaseName(t *testing.T) { + validDbUri := "projects/spanner-cloud-test/instances/foo/databases/foodb" + invalidDbUris := []string{ + // Completely wrong DB URI. + "foobarDB", + // Project ID contains "/". + "projects/spanner-cloud/test/instances/foo/databases/foodb", + // No instance ID. + "projects/spanner-cloud-test/instances//databases/foodb", + } + if err := validDatabaseName(validDbUri); err != nil { + t.Errorf("validateDatabaseName(%q) = %v, want nil", validDbUri, err) + } + for _, d := range invalidDbUris { + if err, wantErr := validDatabaseName(d), "should conform to pattern"; !strings.Contains(err.Error(), wantErr) { + t.Errorf("validateDatabaseName(%q) = %q, want error pattern %q", validDbUri, err, wantErr) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/doc.go b/vendor/cloud.google.com/go/spanner/doc.go new file mode 100644 index 000000000..3aee32a8d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/doc.go @@ -0,0 +1,319 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package spanner provides a client for reading and writing to Cloud Spanner +databases. See the packages under admin for clients that operate on databases +and instances. + +Note: This package is in alpha. Backwards-incompatible changes may occur +without notice. + +See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction +to Cloud Spanner and additional help on using this API. + +Creating a Client + +To start working with this package, create a client that refers to the database +of interest: + + ctx := context.Background() + client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") + if err != nil { + // TODO: Handle error. + } + + +Simple Reads and Writes + +Two Client methods, Apply and Single, work well for simple reads and writes. As +a quick introduction, here we write a new row to the database and read it back: + + _, err := client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) + if err != nil { + // TODO: Handle error. + } + +All the methods used above are discussed in more detail below. + + +Keys + +Every Cloud Spanner row has a unique key, composed of one or more columns. +Construct keys with a literal of type Key: + + key1 := spanner.Key{"alice"} + + +KeyRanges + +The keys of a Cloud Spanner table are ordered. You can specify ranges of keys +using the KeyRange type: + + kr1 := spanner.KeyRange{Start: key1, End: key2} + +By default, a KeyRange includes its start key but not its end key. Use +the Kind field to specify other boundary conditions: + + // include both keys + kr2 := spanner.KeyRange{Start: key1, End: key2, Kind: spanner.ClosedClosed} + + +KeySets + +A KeySet represents a set of keys. AllKeys returns a KeySet that refers to all +the keys in a table: + + ks1 := spanner.AllKeys() + +To construct a set of specific keys, use the Keys function: + + ks2 := spanner.Keys(key1, key2, key3) + +You can also build KeySets from ranges of keys: + + ks3 := spanner.Range(kr1) + +Use UnionKeySets to build up more complex KeySets, or construct one directly +using a KeySet literal: + + ks4 := spanner.KeySet{ + Keys: []spanner.Keys{key1, key2}, + Ranges: []spanner.KeyRange{kr1, kr2}, + } + + +Transactions + +All Cloud Spanner reads and writes occur inside transactions. There are two +types of transactions, read-only and read-write. Read-only transactions cannot +change the database, do not acquire locks, and may access either the current +database state or states in the past. Read-write transactions can read the +database before writing to it, and always apply to the most recent database +state. + + +Single Reads + +The simplest and fastest transaction is a ReadOnlyTransaction that supports a +single read operation. Use Client.Single to create such a transaction. You can +chain the call to Single with a call to a Read method. + +When you only want one row whose key you know, use ReadRow. Provide the table +name, key, and the columns you want to read: + + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + +Read multiple rows with the Read method. It takes a table name, KeySet, and list +of columns: + + iter := client.Single().Read(ctx, "Accounts", keyset1, columns) + +Read returns a RowIterator. You can call the Do method on the iterator and pass +a callback: + + err := iter.Do(func(row *Row) error { + // TODO: use row + return nil + }) + +RowIterator also follows the standard pattern for the Google +Cloud Client Libraries: + + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use row + } + +Always call Stop when you finish using an iterator this way, whether or not you +iterate to the end. (Failing to call Stop could lead you to exhaust the +database's session quota.) + +To read rows with an index, use ReadUsingIndex. + +Statements + +The most general form of reading uses SQL statements. Construct a Statement +with NewStatement, setting any parameters using the Statement's Params map: + + stmt := spanner.NewStatement("SELECT First, Last FROM SINGERS WHERE Last >= @start") + stmt.Params["start"] = "Dylan" + +You can also construct a Statement directly with a struct literal, providing +your own map of parameters. + +Use the Query method to run the statement and obtain an iterator: + + iter := client.Single().Query(ctx, stmt) + + +Rows + +Once you have a Row, via an iterator or a call to ReadRow, you can extract +column values in several ways. Pass in a pointer to a Go variable of the +appropriate type when you extract a value. + +You can extract by column position or name: + + err := row.Column(0, &name) + err = row.ColumnByName("balance", &balance) + +You can extract all the columns at once: + + err = row.Columns(&name, &balance) + +Or you can define a Go struct that corresponds to your columns, and extract +into that: + + var s struct { Name string; Balance int64 } + err = row.ToStruct(&s) + + +For Cloud Spanner columns that may contain NULL, use one of the NullXXX types, +like NullString: + + var ns spanner.NullString + if err =: row.Column(0, &ns); err != nil { + // TODO: Handle error. + } + if ns.Valid { + fmt.Println(ns.StringVal) + } else { + fmt.Println("column is NULL") + } + + +Multiple Reads + +To perform more than one read in a transaction, use ReadOnlyTransaction: + + txn := client.ReadOnlyTransaction() + defer txn.Close() + iter := txn.Query(ctx, stmt1) + // ... + iter = txn.Query(ctx, stmt2) + // ... + +You must call Close when you are done with the transaction. + + +Timestamps and Timestamp Bounds + +Cloud Spanner read-only transactions conceptually perform all their reads at a +single moment in time, called the transaction's read timestamp. Once a read has +started, you can call ReadOnlyTransaction's Timestamp method to obtain the read +timestamp. + +By default, a transaction will pick the most recent time (a time where all +previously committed transactions are visible) for its reads. This provides the +freshest data, but may involve some delay. You can often get a quicker response +if you are willing to tolerate "stale" data. You can control the read timestamp +selected by a transaction by calling the WithTimestampBound method on the +transaction before using it. For example, to perform a query on data that is at +most one minute stale, use + + client.Single(). + WithTimestampBound(spanner.MaxStaleness(1*time.Minute)). + Query(ctx, stmt) + +See the documentation of TimestampBound for more details. + + +Mutations + +To write values to a Cloud Spanner database, construct a Mutation. The spanner +package has functions for inserting, updating and deleting rows. Except for the +Delete methods, which take a Key or KeyRange, each mutation-building function +comes in three varieties. + +One takes lists of columns and values along with the table name: + + m1 := spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"}) + +One takes a map from column names to values: + + m2 := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + +And the third accepts a struct value, and determines the columns from the +struct field names: + + type User struct { Name, Email string } + u := User{Name: "alice", Email: "a@example.com"} + m3, err := spanner.InsertStruct("Users", u) + + +Writes + +To apply a list of mutations to the database, use Apply: + + _, err := client.Apply(ctx, []*spanner.Mutation{m1, m2, m3}) + +If you need to read before writing in a single transaction, use a +ReadWriteTransaction. ReadWriteTransactions may abort and need to be retried. +You pass in a function to ReadWriteTransaction, and the client will handle the +retries automatically. Use the transaction's BufferWrite method to buffer +mutations, which will all be executed at the end of the transaction: + + _, err := client.ReadWriteTransaction(ctx, func(txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + txn.BufferWrite([]*spanner.Mutation{m}) + + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + return nil + }) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package spanner // import "cloud.google.com/go/spanner" diff --git a/vendor/cloud.google.com/go/spanner/errors.go b/vendor/cloud.google.com/go/spanner/errors.go new file mode 100644 index 000000000..e1ec0b22e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/errors.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Error is the structured error returned by Cloud Spanner client. +type Error struct { + // Code is the canonical error code for describing the nature of a + // particular error. + Code codes.Code + // Desc explains more details of the error. + Desc string + // trailers are the trailers returned in the response, if any. + trailers metadata.MD +} + +// Error implements error.Error. +func (e *Error) Error() string { + if e == nil { + return fmt.Sprintf("spanner: OK") + } + return fmt.Sprintf("spanner: code = %q, desc = %q", e.Code, e.Desc) +} + +// decorate decorates an existing spanner.Error with more information. +func (e *Error) decorate(info string) { + e.Desc = fmt.Sprintf("%v, %v", info, e.Desc) +} + +// spannerErrorf generates a *spanner.Error with the given error code and +// description. +func spannerErrorf(ec codes.Code, format string, args ...interface{}) error { + return &Error{ + Code: ec, + Desc: fmt.Sprintf(format, args...), + } +} + +// toSpannerError converts general Go error to *spanner.Error. +func toSpannerError(err error) error { + return toSpannerErrorWithMetadata(err, nil) +} + +// toSpannerErrorWithMetadata converts general Go error and grpc trailers to *spanner.Error. +func toSpannerErrorWithMetadata(err error, trailers metadata.MD) error { + if err == nil { + return nil + } + if se, ok := err.(*Error); ok { + se.trailers = metadata.Join(se.trailers, trailers) + return se + } + if grpc.Code(err) == codes.Unknown { + return &Error{codes.Unknown, err.Error(), trailers} + } + return &Error{grpc.Code(err), grpc.ErrorDesc(err), trailers} +} + +// ErrCode extracts the canonical error code from a Go error. +func ErrCode(err error) codes.Code { + se, ok := toSpannerError(err).(*Error) + if !ok { + return codes.Unknown + } + return se.Code +} + +// ErrDesc extracts the Cloud Spanner error description from a Go error. +func ErrDesc(err error) string { + se, ok := toSpannerError(err).(*Error) + if !ok { + return err.Error() + } + return se.Desc +} + +// errTrailers extracts the grpc trailers if present from a Go error. +func errTrailers(err error) metadata.MD { + se, ok := err.(*Error) + if !ok { + return nil + } + return se.trailers +} diff --git a/vendor/cloud.google.com/go/spanner/examples_test.go b/vendor/cloud.google.com/go/spanner/examples_test.go new file mode 100644 index 000000000..ed99dcadf --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/examples_test.go @@ -0,0 +1,420 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner_test + +import ( + "errors" + "fmt" + "time" + + "cloud.google.com/go/spanner" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +const myDB = "projects/my-project/instances/my-instance/database/my-db" + +func ExampleNewClientWithConfig() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClientWithConfig(ctx, myDB, spanner.ClientConfig{ + NumChannels: 10, + }) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. + client.Close() // Close client when done. +} + +func ExampleClient_Single() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleClient_ReadOnlyTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + t := client.ReadOnlyTransaction() + defer t.Close() + // TODO: Read with t using Read, ReadRow, ReadUsingIndex, or Query. +} + +func ExampleClient_ReadWriteTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an + // IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + txn.BufferWrite([]*spanner.Mutation{m}) + + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + return nil + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Apply() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + m := spanner.Update("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _, err = client.Apply(ctx, []*spanner.Mutation{m}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInsert() { + m := spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertMap() { + m := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertStruct() { + type User struct { + Name, Email string + } + u := User{Name: "alice", Email: "a@example.com"} + m, err := spanner.InsertStruct("Users", u) + if err != nil { + // TODO: Handle error. + } + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDelete() { + m := spanner.Delete("Users", spanner.Key{"alice"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDeleteKeyRange() { + m := spanner.DeleteKeyRange("Users", spanner.KeyRange{ + Start: spanner.Key{"alice"}, + End: spanner.Key{"bob"}, + Kind: spanner.ClosedClosed, + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleRowIterator_Next() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + var firstName string + if err := row.Column(0, &firstName); err != nil { + // TODO: Handle error. + } + fmt.Println(firstName) + } +} + +func ExampleRowIterator_Do() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + err = iter.Do(func(r *spanner.Row) error { + var firstName string + if err := r.Column(0, &firstName); err != nil { + return err + } + fmt.Println(firstName) + return nil + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleRow_Size() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.Size()) // size is 2 +} + +func ExampleRow_ColumnName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnName(1)) // prints "balance" +} + +func ExampleRow_ColumnIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + index, err := row.ColumnIndex("balance") + if err != nil { + // TODO: Handle error. + } + fmt.Println(index) +} + +func ExampleRow_ColumnNames() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnNames()) +} + +func ExampleRow_ColumnByName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var balance int64 + if err := row.ColumnByName("balance", &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(balance) +} + +func ExampleRow_Columns() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var name string + var balance int64 + if err := row.Columns(&name, &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(name, balance) +} + +func ExampleRow_ToStruct() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + + type Account struct { + Name string + Balance int64 + } + + var acct Account + if err := row.ToStruct(&acct); err != nil { + // TODO: Handle error. + } + fmt.Println(acct) +} + +func ExampleReadOnlyTransaction_Read() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Read(ctx, "Users", + spanner.Keys(spanner.Key{"alice"}, spanner.Key{"bob"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadUsingIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().ReadUsingIndex(ctx, "Users", + "UsersByEmail", + spanner.Keys(spanner.Key{"a@example.com"}, spanner.Key{"b@example.com"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadRow() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_Query() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleNewStatement() { + stmt := spanner.NewStatement("SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start") + stmt.Params["start"] = "Dylan" + // TODO: Use stmt in Query. +} + +func ExampleNewStatement_structLiteral() { + stmt := spanner.Statement{ + SQL: "SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start", + Params: map[string]interface{}{"start": "Dylan"}, + } + _ = stmt // TODO: Use stmt in Query. +} + +func ExampleReadOnlyTransaction_Timestamp() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single() + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_WithTimestampBound() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single().WithTimestampBound(spanner.MaxStaleness(30 * time.Second)) + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go new file mode 100644 index 000000000..f278c7cc6 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go @@ -0,0 +1,355 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "errors" + "fmt" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Action is a mocked RPC activity that MockCloudSpannerClient will take. +type Action struct { + method string + err error +} + +// NewAction creates Action objects. +func NewAction(m string, e error) Action { + return Action{m, e} +} + +// MockCloudSpannerClient is a mock implementation of sppb.SpannerClient. +type MockCloudSpannerClient struct { + mu sync.Mutex + t *testing.T + // Live sessions on the client. + sessions map[string]bool + // Expected set of actions that will be executed by the client. + actions []Action + // Session ping history + pings []string + // Injected error, will be returned by all APIs + injErr map[string]error + // nice client will not fail on any request + nice bool +} + +// NewMockCloudSpannerClient creates new MockCloudSpannerClient instance. +func NewMockCloudSpannerClient(t *testing.T, acts ...Action) *MockCloudSpannerClient { + mc := &MockCloudSpannerClient{t: t, sessions: map[string]bool{}, injErr: map[string]error{}} + mc.SetActions(acts...) + return mc +} + +// MakeNice makes this a nice mock which will not fail on any request. +func (m *MockCloudSpannerClient) MakeNice() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = true +} + +// MakeStrict makes this a strict mock which will fail on any unexpected request. +func (m *MockCloudSpannerClient) MakeStrict() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = false +} + +// InjectError injects a global error that will be returned by all APIs regardless of +// the actions array. +func (m *MockCloudSpannerClient) InjectError(method string, err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.injErr[method] = err +} + +// SetActions sets the new set of expected actions to MockCloudSpannerClient. +func (m *MockCloudSpannerClient) SetActions(acts ...Action) { + m.mu.Lock() + defer m.mu.Unlock() + m.actions = []Action{} + for _, act := range acts { + m.actions = append(m.actions, act) + } +} + +// DumpPings dumps the ping history. +func (m *MockCloudSpannerClient) DumpPings() []string { + m.mu.Lock() + defer m.mu.Unlock() + return append([]string(nil), m.pings...) +} + +// DumpSessions dumps the internal session table. +func (m *MockCloudSpannerClient) DumpSessions() map[string]bool { + m.mu.Lock() + defer m.mu.Unlock() + st := map[string]bool{} + for s, v := range m.sessions { + st[s] = v + } + return st +} + +// CreateSession is a placeholder for SpannerClient.CreateSession. +func (m *MockCloudSpannerClient) CreateSession(c context.Context, r *sppb.CreateSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["CreateSession"]; err != nil { + return nil, err + } + s := &sppb.Session{} + if r.Database != "mockdb" { + // Reject other databases + return s, grpc.Errorf(codes.NotFound, fmt.Sprintf("database not found: %v", r.Database)) + } + // Generate & record session name. + s.Name = fmt.Sprintf("mockdb-%v", time.Now().UnixNano()) + m.sessions[s.Name] = true + return s, nil +} + +// GetSession is a placeholder for SpannerClient.GetSession. +func (m *MockCloudSpannerClient) GetSession(c context.Context, r *sppb.GetSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["GetSession"]; err != nil { + return nil, err + } + m.pings = append(m.pings, r.Name) + if _, ok := m.sessions[r.Name]; !ok { + return nil, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + return &sppb.Session{Name: r.Name}, nil +} + +// DeleteSession is a placeholder for SpannerClient.DeleteSession. +func (m *MockCloudSpannerClient) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["DeleteSession"]; err != nil { + return nil, err + } + if _, ok := m.sessions[r.Name]; !ok { + // Session not found. + return &empty.Empty{}, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + // Delete session from in-memory table. + delete(m.sessions, r.Name) + return &empty.Empty{}, nil +} + +// ExecuteSql is a placeholder for SpannerClient.ExecuteSql. +func (m *MockCloudSpannerClient) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (*sppb.ResultSet, error) { + return nil, errors.New("Unimplemented") +} + +// ExecuteStreamingSql is a mock implementation of SpannerClient.ExecuteStreamingSql. +func (m *MockCloudSpannerClient) ExecuteStreamingSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (sppb.Spanner_ExecuteStreamingSqlClient, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["ExecuteStreamingSql"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected ExecuteStreamingSql executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "ExecuteStreamingSql" { + m.t.Fatalf("unexpected ExecuteStreamingSql call, want action: %v", act) + } + wantReq := &sppb.ExecuteSqlRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Sql: "mockquery", + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{"var1": &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "abc"}}}, + }, + ParamTypes: map[string]*sppb.Type{"var1": &sppb.Type{Code: sppb.TypeCode_STRING}}, + } + if !reflect.DeepEqual(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.err != nil { + return nil, act.err + } + return nil, errors.New("query never succeeds on mock client") +} + +// Read is a placeholder for SpannerClient.Read. +func (m *MockCloudSpannerClient) Read(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (*sppb.ResultSet, error) { + m.t.Fatalf("Read is unimplemented") + return nil, errors.New("Unimplemented") +} + +// StreamingRead is a placeholder for SpannerClient.StreamingRead. +func (m *MockCloudSpannerClient) StreamingRead(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (sppb.Spanner_StreamingReadClient, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["StreamingRead"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected StreamingRead executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "StreamingRead" && act.method != "StreamingIndexRead" { + m.t.Fatalf("unexpected read call, want action: %v", act) + } + wantReq := &sppb.ReadRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Table: "t_mock", + Columns: []string{"col1", "col2"}, + KeySet: &sppb.KeySet{ + []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{ + &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + []*sppb.KeyRange{}, + false, + }, + } + if act.method == "StreamingIndexRead" { + wantReq.Index = "idx1" + } + if !reflect.DeepEqual(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.err != nil { + return nil, act.err + } + return nil, errors.New("read never succeeds on mock client") +} + +// BeginTransaction is a placeholder for SpannerClient.BeginTransaction. +func (m *MockCloudSpannerClient) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest, opts ...grpc.CallOption) (*sppb.Transaction, error) { + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + if err := m.injErr["BeginTransaction"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected Begin executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "Begin" { + m.t.Fatalf("unexpected Begin call, want action: %v", act) + } + if act.err != nil { + return nil, act.err + } + } + resp := &sppb.Transaction{Id: []byte("transaction-1")} + if _, ok := r.Options.Mode.(*sppb.TransactionOptions_ReadOnly_); ok { + resp.ReadTimestamp = &pbt.Timestamp{Seconds: 3, Nanos: 4} + } + return resp, nil +} + +// Commit is a placeholder for SpannerClient.Commit. +func (m *MockCloudSpannerClient) Commit(c context.Context, r *sppb.CommitRequest, opts ...grpc.CallOption) (*sppb.CommitResponse, error) { + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + if err := m.injErr["Commit"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected Commit executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "Commit" { + m.t.Fatalf("unexpected Commit call, want action: %v", act) + } + if act.err != nil { + return nil, act.err + } + } + return &sppb.CommitResponse{CommitTimestamp: &pbt.Timestamp{Seconds: 1, Nanos: 2}}, nil +} + +// Rollback is a placeholder for SpannerClient.Rollback. +func (m *MockCloudSpannerClient) Rollback(c context.Context, r *sppb.RollbackRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + if err := m.injErr["Rollback"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected Rollback executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "Rollback" { + m.t.Fatalf("unexpected Rollback call, want action: %v", act) + } + if act.err != nil { + return nil, act.err + } + } + return nil, nil +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go new file mode 100644 index 000000000..7a04e7f7f --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go @@ -0,0 +1,255 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // KvMeta is the Metadata for mocked KV table. + KvMeta = sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + { + Name: "Value", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + }, + }, + } +) + +// MockCtlMsg encapsulates PartialResultSet/error that might be sent to +// client +type MockCtlMsg struct { + // If ResumeToken == true, mock server will generate a row with + // resume token. + ResumeToken bool + // If Err != nil, mock server will return error in RPC response. + Err error +} + +// MockCloudSpanner is a mock implementation of SpannerServer interface. +// TODO: make MockCloudSpanner a full-fleged Cloud Spanner implementation. +type MockCloudSpanner struct { + s *grpc.Server + t *testing.T + addr string + msgs chan MockCtlMsg + readTs time.Time + next int +} + +// Addr returns the listening address of mock server. +func (m *MockCloudSpanner) Addr() string { + return m.addr +} + +// AddMsg generates a new mocked row which can be received by client. +func (m *MockCloudSpanner) AddMsg(err error, resumeToken bool) { + msg := MockCtlMsg{ + ResumeToken: resumeToken, + Err: err, + } + if err == io.EOF { + close(m.msgs) + } else { + m.msgs <- msg + } +} + +// Done signals an end to a mocked stream. +func (m *MockCloudSpanner) Done() { + close(m.msgs) +} + +// CreateSession is a placeholder for SpannerServer.CreateSession. +func (m *MockCloudSpanner) CreateSession(c context.Context, r *sppb.CreateSessionRequest) (*sppb.Session, error) { + m.t.Fatalf("CreateSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// GetSession is a placeholder for SpannerServer.GetSession. +func (m *MockCloudSpanner) GetSession(c context.Context, r *sppb.GetSessionRequest) (*sppb.Session, error) { + m.t.Fatalf("GetSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// DeleteSession is a placeholder for SpannerServer.DeleteSession. +func (m *MockCloudSpanner) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest) (*empty.Empty, error) { + m.t.Fatalf("DeleteSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// ExecuteSql is a placeholder for SpannerServer.ExecuteSql. +func (m *MockCloudSpanner) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest) (*sppb.ResultSet, error) { + m.t.Fatalf("ExecuteSql is unimplemented") + return nil, errors.New("Unimplemented") +} + +// EncodeResumeToken return mock resume token encoding for an uint64 integer. +func EncodeResumeToken(t uint64) []byte { + rt := make([]byte, 16) + binary.PutUvarint(rt, t) + return rt +} + +// DecodeResumeToken decodes a mock resume token into an uint64 integer. +func DecodeResumeToken(t []byte) (uint64, error) { + s, n := binary.Uvarint(t) + if n <= 0 { + return 0, fmt.Errorf("invalid resume token: %v", t) + } + return s, nil +} + +// ExecuteStreamingSql is a mock implementation of SpannerServer.ExecuteStreamingSql. +func (m *MockCloudSpanner) ExecuteStreamingSql(r *sppb.ExecuteSqlRequest, s sppb.Spanner_ExecuteStreamingSqlServer) error { + switch r.Sql { + case "SELECT * from t_unavailable": + return grpc.Errorf(codes.Unavailable, "mock table unavailable") + case "SELECT t.key key, t.value value FROM t_mock t": + if r.ResumeToken != nil { + s, err := DecodeResumeToken(r.ResumeToken) + if err != nil { + return err + } + m.next = int(s) + 1 + } + for { + msg, more := <-m.msgs + if !more { + break + } + if msg.Err == nil { + var rt []byte + if msg.ResumeToken { + rt = EncodeResumeToken(uint64(m.next)) + } + meta := KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: &pbt.Timestamp{ + Seconds: m.readTs.Unix(), + Nanos: int32(m.readTs.Nanosecond()), + }, + } + err := s.Send(&sppb.PartialResultSet{ + Metadata: &meta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("foo-%02d", m.next)}}, + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("bar-%02d", m.next)}}, + }, + ResumeToken: rt, + }) + m.next = m.next + 1 + if err != nil { + return err + } + continue + } + return msg.Err + } + return nil + default: + return fmt.Errorf("unsupported SQL: %v", r.Sql) + } +} + +// Read is a placeholder for SpannerServer.Read. +func (m *MockCloudSpanner) Read(c context.Context, r *sppb.ReadRequest) (*sppb.ResultSet, error) { + m.t.Fatalf("Read is unimplemented") + return nil, errors.New("Unimplemented") +} + +// StreamingRead is a placeholder for SpannerServer.StreamingRead. +func (m *MockCloudSpanner) StreamingRead(r *sppb.ReadRequest, s sppb.Spanner_StreamingReadServer) error { + m.t.Fatalf("StreamingRead is unimplemented") + return errors.New("Unimplemented") +} + +// BeginTransaction is a placeholder for SpannerServer.BeginTransaction. +func (m *MockCloudSpanner) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest) (*sppb.Transaction, error) { + m.t.Fatalf("BeginTransaction is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Commit is a placeholder for SpannerServer.Commit. +func (m *MockCloudSpanner) Commit(c context.Context, r *sppb.CommitRequest) (*sppb.CommitResponse, error) { + m.t.Fatalf("Commit is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Rollback is a placeholder for SpannerServer.Rollback. +func (m *MockCloudSpanner) Rollback(c context.Context, r *sppb.RollbackRequest) (*empty.Empty, error) { + m.t.Fatalf("Rollback is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Serve runs a MockCloudSpanner listening on a random localhost address. +func (m *MockCloudSpanner) Serve() { + m.s = grpc.NewServer() + if m.addr == "" { + m.addr = "localhost:0" + } + lis, err := net.Listen("tcp", m.addr) + if err != nil { + m.t.Fatalf("Failed to listen: %v", err) + } + go m.s.Serve(lis) + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + m.t.Fatalf("Failed to parse listener address: %v", err) + } + sppb.RegisterSpannerServer(m.s, m) + m.addr = "localhost:" + port +} + +// Stop terminates MockCloudSpanner and closes the serving port. +func (m *MockCloudSpanner) Stop() { + m.s.Stop() +} + +// NewMockCloudSpanner creates a new MockCloudSpanner instance. +func NewMockCloudSpanner(t *testing.T, ts time.Time) *MockCloudSpanner { + mcs := &MockCloudSpanner{ + t: t, + msgs: make(chan MockCtlMsg, 1000), + readTs: ts, + } + return mcs +} diff --git a/vendor/cloud.google.com/go/spanner/key.go b/vendor/cloud.google.com/go/spanner/key.go new file mode 100644 index 000000000..3b6886370 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key.go @@ -0,0 +1,321 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "fmt" + "time" + + "google.golang.org/grpc/codes" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// A Key can be either a Cloud Spanner row's primary key or a secondary index key. +// It is essentially an interface{} array, which represents a set of Cloud Spanner +// columns. A Key type has the following usages: +// +// - Used as primary key which uniquely identifies a Cloud Spanner row. +// - Used as secondary index key which maps to a set of Cloud Spanner rows +// indexed under it. +// - Used as endpoints of primary key/secondary index ranges, +// see also the KeyRange type. +// +// Rows that are identified by the Key type are outputs of read operation or targets of +// delete operation in a mutation. Note that for Insert/Update/InsertOrUpdate/Update +// mutation types, although they don't require a primary key explicitly, the column list +// provided must contain enough columns that can comprise a primary key. +// +// Keys are easy to construct. For example, suppose you have a table with a +// primary key of username and product ID. To make a key for this table: +// +// key := spanner.Key{"john", 16} +// +// See the description of Row and Mutation types for how Go types are +// mapped to Cloud Spanner types. For convenience, Key type supports a wide range +// of Go types: +// - int, int8, int16, int32, int64, and NullInt64 are mapped to Cloud Spanner's INT64 type. +// - uint8, uint16 and uint32 are also mapped to Cloud Spanner's INT64 type. +// - float32, float64, NullFloat64 are mapped to Cloud Spanner's FLOAT64 type. +// - bool and NullBool are mapped to Cloud Spanner's BOOL type. +// - []byte is mapped to Cloud Spanner's BYTES type. +// - string and NullString are mapped to Cloud Spanner's STRING type. +// - time.Time and NullTime are mapped to Cloud Spanner's TIMESTAMP type. +// - civil.Date and NullDate are mapped to Cloud Spanner's DATE type. +type Key []interface{} + +// errInvdKeyPartType returns error for unsupported key part type. +func errInvdKeyPartType(part interface{}) error { + return spannerErrorf(codes.InvalidArgument, "key part has unsupported type %T", part) +} + +// keyPartValue converts a part of the Key (which is a valid Cloud Spanner type) +// into a proto3.Value. Used for encoding Key type into protobuf. +func keyPartValue(part interface{}) (pb *proto3.Value, err error) { + switch v := part.(type) { + case int: + pb, _, err = encodeValue(int64(v)) + case int8: + pb, _, err = encodeValue(int64(v)) + case int16: + pb, _, err = encodeValue(int64(v)) + case int32: + pb, _, err = encodeValue(int64(v)) + case uint8: + pb, _, err = encodeValue(int64(v)) + case uint16: + pb, _, err = encodeValue(int64(v)) + case uint32: + pb, _, err = encodeValue(int64(v)) + case float32: + pb, _, err = encodeValue(float64(v)) + case int64, float64, NullInt64, NullFloat64, bool, NullBool, []byte, string, NullString, time.Time, civil.Date, NullTime, NullDate: + pb, _, err = encodeValue(v) + default: + return nil, errInvdKeyPartType(v) + } + return pb, err +} + +// proto converts a spanner.Key into a proto3.ListValue. +func (key Key) proto() (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(key)) + for _, part := range key { + v, err := keyPartValue(part) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, v) + } + return lv, nil +} + +// String implements fmt.Stringer for Key. For string, []byte and NullString, it +// prints the uninterpreted bytes of their contents, leaving caller with the +// opportunity to escape the output. +func (key Key) String() string { + b := &bytes.Buffer{} + fmt.Fprint(b, "(") + for i, part := range []interface{}(key) { + if i != 0 { + fmt.Fprint(b, ",") + } + switch v := part.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool: + // Use %v to print numeric types and bool. + fmt.Fprintf(b, "%v", v) + case string: + fmt.Fprintf(b, "%q", v) + case []byte: + if v != nil { + fmt.Fprintf(b, "%q", v) + } else { + fmt.Fprint(b, "") + } + case NullInt64, NullFloat64, NullBool, NullString, NullTime, NullDate: + // The above types implement fmt.Stringer. + fmt.Fprintf(b, "%s", v) + case civil.Date: + fmt.Fprintf(b, "%q", v) + case time.Time: + fmt.Fprintf(b, "%q", v.Format(time.RFC3339Nano)) + default: + fmt.Fprintf(b, "%v", v) + } + } + fmt.Fprint(b, ")") + return b.String() +} + +// KeyRangeKind describes the kind of interval represented by a KeyRange: +// whether it is open or closed on the left and right. +type KeyRangeKind int + +const ( + // ClosedOpen is closed on the left and open on the right: the Start + // key is included, the End key is excluded. + ClosedOpen KeyRangeKind = iota + + // ClosedClosed is closed on the left and the right: both keys are included. + ClosedClosed + + // OpenClosed is open on the left and closed on the right: the Start + // key is excluded, the End key is included. + OpenClosed + + // OpenOpen is open on the left and the right: neither key is included. + OpenOpen +) + +// A KeyRange represents a range of rows in a table or index. +// +// A range has a Start key and an End key. IncludeStart and IncludeEnd +// indicate whether the Start and End keys are included in the range. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10), +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// spanner.Key{"Bob", "2014-09-23"} +// spanner.Key{"Alfred", "2015-06-12"} +// +// Since the UserEvents table's PRIMARY KEY clause names two columns, each +// UserEvents key has two elements; the first is the UserName, and the second +// is the EventDate. +// +// Key ranges with multiple components are interpreted lexicographically by +// component using the table or index key's declared sort order. For example, +// the following range returns all events for user "Bob" that occurred in the +// year 2015: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2015-01-01"}, +// End: spanner.Key{"Bob", "2015-12-31"}, +// Kind: ClosedClosed, +// } +// +// Start and end keys can omit trailing key components. This affects the +// inclusion and exclusion of rows that exactly match the provided key +// components: if IncludeStart is true, then rows that exactly match the +// provided components of the Start key are included; if IncludeStart is false +// then rows that exactly match are not included. IncludeEnd and End key +// behave in the same fashion. +// +// For example, the following range includes all events for "Bob" that occurred +// during and after the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2000-01-01"}, +// End: spanner.Key{"Bob"}, +// Kind: ClosedClosed, +// } +// +// The next example retrieves all events for "Bob": +// +// spanner.PrefixRange(spanner.Key{"Bob"}) +// +// To retrieve events before the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob"}, +// End: spanner.Key{"Bob", "2000-01-01"}, +// Kind: ClosedOpen, +// } +// +// Although we specified a Kind for this KeyRange, we didn't need to, because +// the default is ClosedOpen. In later examples we'll omit Kind if it is +// ClosedOpen. +// +// The following range includes all rows in a table or under a +// index: +// +// spanner.AllKeys() +// +// This range returns all users whose UserName begins with any +// character from A to C: +// +// spanner.KeyRange{ +// Start: spanner.Key{"A"}, +// End: spanner.Key{"D"}, +// } +// +// This range returns all users whose UserName begins with B: +// +// spanner.KeyRange{ +// Start: spanner.Key{"B"}, +// End: spanner.Key{"C"}, +// } +// +// Key ranges honor column sort order. For example, suppose a table is defined +// as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 and 100 +// inclusive: +// +// spanner.KeyRange{ +// Start: spanner.Key{100}, +// End: spanner.Key{1}, +// Kind: ClosedClosed, +// } +// +// Note that 100 is passed as the start, and 1 is passed as the end, because +// Key is a descending column in the schema. +type KeyRange struct { + // Start specifies the left boundary of the key range; End specifies + // the right boundary of the key range. + Start, End Key + + // Kind describes whether the boundaries of the key range include + // their keys. + Kind KeyRangeKind +} + +// String implements fmt.Stringer for KeyRange type. +func (r KeyRange) String() string { + var left, right string + switch r.Kind { + case ClosedClosed: + left, right = "[", "]" + case ClosedOpen: + left, right = "[", ")" + case OpenClosed: + left, right = "(", "]" + case OpenOpen: + left, right = "(", ")" + default: + left, right = "?", "?" + } + return fmt.Sprintf("%s%s,%s%s", left, r.Start, r.End, right) +} + +// proto converts KeyRange into sppb.KeyRange. +func (r KeyRange) proto() (*sppb.KeyRange, error) { + var err error + var start, end *proto3.ListValue + pb := &sppb.KeyRange{} + if start, err = r.Start.proto(); err != nil { + return nil, err + } + if end, err = r.End.proto(); err != nil { + return nil, err + } + if r.Kind == ClosedClosed || r.Kind == ClosedOpen { + pb.StartKeyType = &sppb.KeyRange_StartClosed{StartClosed: start} + } else { + pb.StartKeyType = &sppb.KeyRange_StartOpen{StartOpen: start} + } + if r.Kind == ClosedClosed || r.Kind == OpenClosed { + pb.EndKeyType = &sppb.KeyRange_EndClosed{EndClosed: end} + } else { + pb.EndKeyType = &sppb.KeyRange_EndOpen{EndOpen: end} + } + return pb, nil +} diff --git a/vendor/cloud.google.com/go/spanner/key_test.go b/vendor/cloud.google.com/go/spanner/key_test.go new file mode 100644 index 000000000..66b957d3d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key_test.go @@ -0,0 +1,253 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Key.String() and Key.proto(). +func TestKey(t *testing.T) { + tm, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + dt, _ := civil.ParseDate("2016-11-15") + for _, test := range []struct { + k Key + wantProto *proto3.ListValue + wantStr string + }{ + { + k: Key{int(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int64(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{true}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{float32(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{float64(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{"value"}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{[]byte(nil)}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{[]byte{}}, + wantProto: listValueProto(stringProto("")), + wantStr: `("")`, + }, + { + k: Key{tm}, + wantProto: listValueProto(stringProto("2016-11-15T15:04:05.999999999Z")), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + {k: Key{dt}, + wantProto: listValueProto(stringProto("2016-11-15")), + wantStr: `("2016-11-15")`, + }, + { + k: Key{[]byte("value")}, + wantProto: listValueProto(bytesProto([]byte("value"))), + wantStr: `("value")`, + }, + { + k: Key{NullInt64{1, true}}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{NullInt64{2, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullFloat64{1.5, true}}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{NullFloat64{2.0, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullBool{true, true}}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{NullBool{true, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullString{"value", true}}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{NullString{"value", false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullTime{tm, true}}, + wantProto: listValueProto(timeProto(tm)), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + + { + k: Key{NullTime{time.Now(), false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullDate{dt, true}}, + wantProto: listValueProto(dateProto(dt)), + wantStr: `("2016-11-15")`, + }, + { + k: Key{NullDate{civil.Date{}, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{int(1), NullString{"value", false}, "value", 1.5, true}, + wantProto: listValueProto(stringProto("1"), nullProto(), stringProto("value"), floatProto(1.5), boolProto(true)), + wantStr: `(1,,"value",1.5,true)`, + }, + } { + if got := test.k.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.k, got, test.wantStr) + } + gotProto, err := test.k.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.k, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.k, gotProto, test.wantProto) + } + } +} + +// Test KeyRange.String() and KeyRange.proto(). +func TestKeyRange(t *testing.T) { + for _, test := range []struct { + kr KeyRange + wantProto *sppb.KeyRange + wantStr string + }{ + { + kr: KeyRange{Key{"A"}, Key{"D"}, OpenOpen}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartOpen{listValueProto(stringProto("A"))}, + &sppb.KeyRange_EndOpen{listValueProto(stringProto("D"))}, + }, + wantStr: `(("A"),("D"))`, + }, + { + kr: KeyRange{Key{1}, Key{10}, OpenClosed}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartOpen{listValueProto(stringProto("1"))}, + &sppb.KeyRange_EndClosed{listValueProto(stringProto("10"))}, + }, + wantStr: "((1),(10)]", + }, + { + kr: KeyRange{Key{1.5, 2.1, 0.2}, Key{1.9, 0.7}, ClosedOpen}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartClosed{listValueProto(floatProto(1.5), floatProto(2.1), floatProto(0.2))}, + &sppb.KeyRange_EndOpen{listValueProto(floatProto(1.9), floatProto(0.7))}, + }, + wantStr: "[(1.5,2.1,0.2),(1.9,0.7))", + }, + { + kr: KeyRange{Key{NullInt64{1, true}}, Key{10}, ClosedClosed}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartClosed{listValueProto(stringProto("1"))}, + &sppb.KeyRange_EndClosed{listValueProto(stringProto("10"))}, + }, + wantStr: "[(1),(10)]", + }, + } { + if got := test.kr.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.kr, got, test.wantStr) + } + gotProto, err := test.kr.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.kr, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.kr, gotProto.String(), test.wantProto.String()) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/keyset.go b/vendor/cloud.google.com/go/spanner/keyset.go new file mode 100644 index 000000000..28c7f054f --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/keyset.go @@ -0,0 +1,108 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// A KeySet defines a collection of Cloud Spanner keys and/or key +// ranges. All the keys are expected to be in the same table or index. The keys +// need not be sorted in any particular way. +// +// If the same key is specified multiple times in the set (for example if two +// ranges, two keys, or a key and a range overlap), the Cloud Spanner backend behaves +// as if the key were only specified once. +type KeySet struct { + // If All == true, then the KeySet names all rows of a table or + // under a index. + All bool + // Keys is a list of keys covered by KeySet, see also documentation of + // Key for details. + Keys []Key + // Ranges is a list of key ranges covered by KeySet, see also documentation of + // KeyRange for details. + Ranges []KeyRange +} + +// AllKeys returns a KeySet that represents all Keys of a table or a index. +func AllKeys() KeySet { + return KeySet{All: true} +} + +// Keys returns a KeySet for a set of keys. +func Keys(keys ...Key) KeySet { + ks := KeySet{Keys: make([]Key, len(keys))} + copy(ks.Keys, keys) + return ks +} + +// Range returns a KeySet for a range of keys. +func Range(r KeyRange) KeySet { + return KeySet{Ranges: []KeyRange{r}} +} + +// PrefixRange returns a KeySet for all keys with the given prefix, which is +// a key itself. +func PrefixRange(prefix Key) KeySet { + return KeySet{Ranges: []KeyRange{ + { + Start: prefix, + End: prefix, + Kind: ClosedClosed, + }, + }} +} + +// UnionKeySets unions multiple KeySets into a superset. +func UnionKeySets(keySets ...KeySet) KeySet { + s := KeySet{} + for _, ks := range keySets { + if ks.All { + return KeySet{All: true} + } + s.Keys = append(s.Keys, ks.Keys...) + s.Ranges = append(s.Ranges, ks.Ranges...) + } + return s +} + +// proto converts KeySet into sppb.KeySet, which is the protobuf +// representation of KeySet. +func (keys KeySet) proto() (*sppb.KeySet, error) { + pb := &sppb.KeySet{ + Keys: make([]*proto3.ListValue, 0, len(keys.Keys)), + Ranges: make([]*sppb.KeyRange, 0, len(keys.Ranges)), + All: keys.All, + } + for _, key := range keys.Keys { + keyProto, err := key.proto() + if err != nil { + return nil, err + } + pb.Keys = append(pb.Keys, keyProto) + } + for _, r := range keys.Ranges { + rProto, err := r.proto() + if err != nil { + return nil, err + } + pb.Ranges = append(pb.Ranges, rProto) + } + return pb, nil +} diff --git a/vendor/cloud.google.com/go/spanner/keyset_test.go b/vendor/cloud.google.com/go/spanner/keyset_test.go new file mode 100644 index 000000000..ba1e9dba0 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/keyset_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test KeySet.proto(). +func TestKeySetToProto(t *testing.T) { + for _, test := range []struct { + ks KeySet + wantProto *sppb.KeySet + }{ + { + KeySet{}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{}, + Ranges: []*sppb.KeyRange{}, + }, + }, + { + KeySet{All: true}, + &sppb.KeySet{ + All: true, + Keys: []*proto3.ListValue{}, + Ranges: []*sppb.KeyRange{}, + }, + }, + { + KeySet{Keys: []Key{{1, 2}, {3, 4}}}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(intProto(1), intProto(2)), listValueProto(intProto(3), intProto(4))}, + Ranges: []*sppb.KeyRange{}, + }, + }, + { + KeySet{Ranges: []KeyRange{{Key{1}, Key{2}, ClosedClosed}, {Key{3}, Key{10}, OpenClosed}}}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{}, + Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + &sppb.KeyRange_StartClosed{listValueProto(intProto(1))}, + &sppb.KeyRange_EndClosed{listValueProto(intProto(2))}, + }, + &sppb.KeyRange{ + &sppb.KeyRange_StartOpen{listValueProto(intProto(3))}, + &sppb.KeyRange_EndClosed{listValueProto(intProto(10))}, + }, + }, + }, + }, + } { + gotProto, err := test.ks.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.ks, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.ks, gotProto.String(), test.wantProto.String()) + } + } +} + +// Test helpers that help to create KeySets. +func TestKeySetHelpers(t *testing.T) { + // Test Keys with one key. + k := Key{[]byte{1, 2, 3}} + if got, want := Keys(k), (KeySet{Keys: []Key{k}}); !reflect.DeepEqual(got, want) { + t.Errorf("Keys(%q) = %q, want %q", k, got, want) + } + // Test Keys with multiple keys. + ks := []Key{Key{57}, Key{NullString{"value", false}}} + if got, want := Keys(ks...), (KeySet{Keys: ks}); !reflect.DeepEqual(got, want) { + t.Errorf("Keys(%v) = %v, want %v", ks, got, want) + } + // Test Range. + kr := KeyRange{Key{1}, Key{10}, ClosedClosed} + if got, want := Range(kr), (KeySet{Ranges: []KeyRange{kr}}); !reflect.DeepEqual(got, want) { + t.Errorf("Range(%v) = %v, want %v", kr, got, want) + } + // Test PrefixRange. + k = Key{2} + kr = KeyRange{k, k, ClosedClosed} + if got, want := PrefixRange(k), (KeySet{Ranges: []KeyRange{kr}}); !reflect.DeepEqual(got, want) { + t.Errorf("PrefixRange(%v) = %v, want %v", k, got, want) + } + // Test UnionKeySets. + sk1, sk2 := Keys(Key{2}), Keys(Key{3}) + r1, r2 := Range(KeyRange{Key{1}, Key{10}, ClosedClosed}), Range(KeyRange{Key{15}, Key{20}, OpenClosed}) + want := KeySet{ + Keys: []Key{Key{2}, Key{3}}, + Ranges: []KeyRange{KeyRange{Key{1}, Key{10}, ClosedClosed}, KeyRange{Key{15}, Key{20}, OpenClosed}}, + } + if got := UnionKeySets(sk1, sk2, r1, r2); !reflect.DeepEqual(got, want) { + t.Errorf("UnionKeySets(%v, %v, %v, %v) = %v, want %v", sk1, sk2, r1, r2, got, want) + } + all := AllKeys() + if got := UnionKeySets(sk1, sk2, r1, r2, all); !reflect.DeepEqual(got, all) { + t.Errorf("UnionKeySets(%v, %v, %v, %v, %v) = %v, want %v", sk1, sk2, r1, r2, all, got, all) + } +} diff --git a/vendor/cloud.google.com/go/spanner/mutation.go b/vendor/cloud.google.com/go/spanner/mutation.go new file mode 100644 index 000000000..eba7fe815 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation.go @@ -0,0 +1,422 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// op is the mutation operation. +type op int + +const ( + // opDelete removes a row from a table. Succeeds whether or not the + // key was present. + opDelete op = iota + // opInsert inserts a row into a table. If the row already exists, the + // write or transaction fails. + opInsert + // opInsertOrUpdate inserts a row into a table. If the row already + // exists, it updates it instead. Any column values not explicitly + // written are preserved. + opInsertOrUpdate + // opReplace inserts a row into a table, deleting any existing row. + // Unlike InsertOrUpdate, this means any values not explicitly written + // become NULL. + opReplace + // opUpdate updates a row in a table. If the row does not already + // exist, the write or transaction fails. + opUpdate +) + +// A Mutation describes a modification to one or more Cloud Spanner rows. The +// mutation represents an insert, update, delete, etc on a table. +// +// Many mutations can be applied in a single atomic commit. For purposes of +// constraint checking (such as foreign key constraints), the operations can be +// viewed as applying in same order as the mutations are supplied in (so that +// e.g., a row and its logical "child" can be inserted in the same commit). +// +// - The Apply function applies series of mutations. +// - A ReadWriteTransaction applies a series of mutations as part of an +// atomic read-modify-write operation. +// Example: +// +// m := spanner.Insert("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// In this example, we insert a new row into the User table. The primary key +// for the new row is UserID (presuming that "user_id" has been declared as the +// primary key of the "User" table). +// +// Updating a row +// +// Changing the values of columns in an existing row is very similar to +// inserting a new row: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// Deleting a row +// +// To delete a row, use spanner.Delete: +// +// m := spanner.Delete("User", spanner.Key{UserId}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// Note that deleting a row in a table may also delete rows from other tables +// if cascading deletes are specified in those tables' schemas. Delete does +// nothing if the named row does not exist (does not yield an error). +// +// Deleting a field +// +// To delete/clear a field within a row, use spanner.Update with the value nil: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, nil}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// The valid Go types and their corresponding Cloud Spanner types that can be +// used in the Insert/Update/InsertOrUpdate functions are: +// +// string, NullString - STRING +// []string, []NullString - STRING ARRAY +// []byte - BYTES +// [][]byte - BYTES ARRAY +// int, int64, NullInt64 - INT64 +// []int, []int64, []NullInt64 - INT64 ARRAY +// bool, NullBool - BOOL +// []bool, []NullBool - BOOL ARRAY +// float64, NullFloat64 - FLOAT64 +// []float64, []NullFloat64 - FLOAT64 ARRAY +// time.Time, NullTime - TIMESTAMP +// []time.Time, []NullTime - TIMESTAMP ARRAY +// Date, NullDate - DATE +// []Date, []NullDate - DATE ARRAY +// +// To compare two Mutations for testing purposes, use reflect.DeepEqual. +type Mutation struct { + // op is the operation type of the mutation. + // See documentation for spanner.op for more details. + op op + // Table is the name of the taget table to be modified. + table string + // keySet is a set of primary keys that names the rows + // in a delete operation. + keySet KeySet + // columns names the set of columns that are going to be + // modified by Insert, InsertOrUpdate, Replace or Update + // operations. + columns []string + // values specifies the new values for the target columns + // named by Columns. + values []interface{} +} + +// mapToMutationParams converts Go map into mutation parameters. +func mapToMutationParams(in map[string]interface{}) ([]string, []interface{}) { + cols := []string{} + vals := []interface{}{} + for k, v := range in { + cols = append(cols, k) + vals = append(vals, v) + } + return cols, vals +} + +// errNotStruct returns error for not getting a go struct type. +func errNotStruct(in interface{}) error { + return spannerErrorf(codes.InvalidArgument, "%T is not a go struct type", in) +} + +// structToMutationParams converts Go struct into mutation parameters. +// If the input is not a valid Go struct type, structToMutationParams +// returns error. +func structToMutationParams(in interface{}) ([]string, []interface{}, error) { + if in == nil { + return nil, nil, errNotStruct(in) + } + v := reflect.ValueOf(in) + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + // t is a pointer to a struct. + if v.IsNil() { + // Return empty results. + return nil, nil, nil + } + // Get the struct value that in points to. + v = v.Elem() + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, nil, errNotStruct(in) + } + fields, err := fieldCache.Fields(t) + if err != nil { + return nil, nil, toSpannerError(err) + } + var cols []string + var vals []interface{} + for _, f := range fields { + cols = append(cols, f.Name) + vals = append(vals, v.FieldByIndex(f.Index).Interface()) + } + return cols, vals, nil +} + +// Insert returns a Mutation to insert a row into a table. If the row already +// exists, the write or transaction fails. +func Insert(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsert, + table: table, + columns: cols, + values: vals, + } +} + +// InsertMap returns a Mutation to insert a row into a table, specified by +// a map of column name to value. If the row already exists, the write or +// transaction fails. +func InsertMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Insert(table, cols, vals) +} + +// InsertStruct returns a Mutation to insert a row into a table, specified by +// a Go struct. If the row already exists, the write or transaction fails. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func InsertStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Insert(table, cols, vals), nil +} + +// Update returns a Mutation to update a row in a table. If the row does not +// already exist, the write or transaction fails. +func Update(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// UpdateMap returns a Mutation to update a row in a table, specified by +// a map of column to value. If the row does not already exist, the write or +// transaction fails. +func UpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Update(table, cols, vals) +} + +// UpdateStruct returns a Mutation to update a row in a table, specified by a Go +// struct. If the row does not already exist, the write or transaction fails. +func UpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Update(table, cols, vals), nil +} + +// InsertOrUpdate returns a Mutation to insert a row into a table. If the row +// already exists, it updates it instead. Any column values not explicitly +// written are preserved. +func InsertOrUpdate(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsertOrUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// InsertOrUpdateMap returns a Mutation to insert a row into a table, +// specified by a map of column to value. If the row already exists, it +// updates it instead. Any column values not explicitly written are preserved. +func InsertOrUpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return InsertOrUpdate(table, cols, vals) +} + +// InsertOrUpdateStruct returns a Mutation to insert a row into a table, +// specified by a Go struct. If the row already exists, it updates it instead. +// Any column values not explicitly written are preserved. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func InsertOrUpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return InsertOrUpdate(table, cols, vals), nil +} + +// Replace returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdate, this means any values not explicitly +// written become NULL. +func Replace(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opReplace, + table: table, + columns: cols, + values: vals, + } +} + +// ReplaceMap returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a map of column to value. +func ReplaceMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Replace(table, cols, vals) +} + +// ReplaceStruct returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a Go struct. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func ReplaceStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Replace(table, cols, vals), nil +} + +// Delete removes a key from a table. Succeeds whether or not the key was +// present. +func Delete(table string, key Key) *Mutation { + return &Mutation{ + op: opDelete, + table: table, + keySet: Keys(key), + } +} + +// DeleteKeyRange removes a range of keys from a table. Succeeds whether or not +// the keys were present. +func DeleteKeyRange(table string, r KeyRange) *Mutation { + return &Mutation{ + op: opDelete, + table: table, + keySet: Range(r), + } +} + +// prepareWrite generates sppb.Mutation_Write from table name, column names +// and new column values. +func prepareWrite(table string, columns []string, vals []interface{}) (*sppb.Mutation_Write, error) { + v, err := encodeValueArray(vals) + if err != nil { + return nil, err + } + return &sppb.Mutation_Write{ + Table: table, + Columns: columns, + Values: []*proto3.ListValue{v}, + }, nil +} + +// errInvdMutationOp returns error for unrecognized mutation operation. +func errInvdMutationOp(m Mutation) error { + return spannerErrorf(codes.InvalidArgument, "Unknown op type: %d", m.op) +} + +// proto converts spanner.Mutation to sppb.Mutation, in preparation to send +// RPCs. +func (m Mutation) proto() (*sppb.Mutation, error) { + var pb *sppb.Mutation + switch m.op { + case opDelete: + keySetProto, err := m.keySet.proto() + if err != nil { + return nil, err + } + pb = &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: m.table, + KeySet: keySetProto, + }, + }, + } + case opInsert: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Insert{Insert: w}} + case opInsertOrUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_InsertOrUpdate{InsertOrUpdate: w}} + case opReplace: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Replace{Replace: w}} + case opUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Update{Update: w}} + default: + return nil, errInvdMutationOp(m) + } + return pb, nil +} + +// mutationsProto turns a spanner.Mutation array into a sppb.Mutation array, +// it is convenient for sending batch mutations to Cloud Spanner. +func mutationsProto(ms []*Mutation) ([]*sppb.Mutation, error) { + l := make([]*sppb.Mutation, 0, len(ms)) + for _, m := range ms { + pb, err := m.proto() + if err != nil { + return nil, err + } + l = append(l, pb) + } + return l, nil +} diff --git a/vendor/cloud.google.com/go/spanner/mutation_test.go b/vendor/cloud.google.com/go/spanner/mutation_test.go new file mode 100644 index 000000000..6233b3909 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation_test.go @@ -0,0 +1,545 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "sort" + "strings" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// keysetProto returns protobuf encoding of valid spanner.KeySet. +func keysetProto(t *testing.T, ks KeySet) *sppb.KeySet { + k, err := ks.proto() + if err != nil { + t.Fatalf("cannot convert keyset %v to protobuf: %v", ks, err) + } + return k +} + +// Test encoding from spanner.Mutation to protobuf. +func TestMutationToProto(t *testing.T) { + for i, test := range []struct { + m *Mutation + want *sppb.Mutation + }{ + // Delete Mutation + { + &Mutation{opDelete, "t_foo", Keys(Key{"foo"}), nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_foo", + KeySet: keysetProto(t, Keys(Key{"foo"})), + }, + }, + }, + }, + // Insert Mutation + { + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{intProto(1), intProto(2)}, + }, + }, + }, + }, + }, + }, + // InsertOrUpdate Mutation + { + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{floatProto(1.0), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Replace Mutation + { + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Update Mutation + { + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), nullProto()}, + }, + }, + }, + }, + }, + }, + } { + if got, err := test.m.proto(); err != nil || !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: (%#v).proto() = (%v, %v), want (%v, nil)", i, test.m, got, err, test.want) + } + } +} + +// mutationColumnSorter implements sort.Interface for sorting column-value pairs in a Mutation by column names. +type mutationColumnSorter struct { + Mutation +} + +// newMutationColumnSorter creates new instance of mutationColumnSorter by duplicating the input Mutation so that +// sorting won't change the input Mutation. +func newMutationColumnSorter(m *Mutation) *mutationColumnSorter { + return &mutationColumnSorter{ + Mutation{ + m.op, + m.table, + m.keySet, + append([]string(nil), m.columns...), + append([]interface{}(nil), m.values...), + }, + } +} + +// Len implements sort.Interface.Len. +func (ms *mutationColumnSorter) Len() int { + return len(ms.columns) +} + +// Swap implements sort.Interface.Swap. +func (ms *mutationColumnSorter) Swap(i, j int) { + ms.columns[i], ms.columns[j] = ms.columns[j], ms.columns[i] + ms.values[i], ms.values[j] = ms.values[j], ms.values[i] +} + +// Less implements sort.Interface.Less. +func (ms *mutationColumnSorter) Less(i, j int) bool { + return strings.Compare(ms.columns[i], ms.columns[j]) < 0 +} + +// mutationEqual returns true if two mutations in question are equal +// to each other. +func mutationEqual(t *testing.T, m1, m2 Mutation) bool { + // Two mutations are considered to be equal even if their column values have different + // orders. + ms1 := newMutationColumnSorter(&m1) + ms2 := newMutationColumnSorter(&m2) + sort.Sort(ms1) + sort.Sort(ms2) + return reflect.DeepEqual(ms1, ms2) +} + +// Test helper functions which help to generate spanner.Mutation. +func TestMutationHelpers(t *testing.T) { + for _, test := range []struct { + m string + got *Mutation + want *Mutation + }{ + { + "Insert", + Insert("t_foo", []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}), + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertMap", + InsertMap("t_foo", map[string]interface{}{"col1": int64(1), "col2": int64(2)}), + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertStruct", + func() *Mutation { + m, err := InsertStruct( + "t_foo", + struct { + notCol bool + Col1 int64 `spanner:"col1"` + Col2 int64 `spanner:"col2"` + }{false, int64(1), int64(2)}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "Update", + Update("t_foo", []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}), + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateMap", + UpdateMap("t_foo", map[string]interface{}{"col1": "one", "col2": []byte(nil)}), + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateStruct", + func() *Mutation { + m, err := UpdateStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + notCol int + Col2 []byte `spanner:"col2"` + }{"one", 1, nil}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "InsertOrUpdate", + InsertOrUpdate("t_foo", []string{"col1", "col2"}, []interface{}{1.0, 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateMap", + InsertOrUpdateMap("t_foo", map[string]interface{}{"col1": 1.0, "col2": 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateStruct", + func() *Mutation { + m, err := InsertOrUpdateStruct( + "t_foo", + struct { + Col1 float64 `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol float64 + }{1.0, 2.0, 3.0}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "Replace", + Replace("t_foo", []string{"col1", "col2"}, []interface{}{"one", 2.0}), + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceMap", + ReplaceMap("t_foo", map[string]interface{}{"col1": "one", "col2": 2.0}), + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceStruct", + func() *Mutation { + m, err := ReplaceStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol string + }{"one", 2.0, "foo"}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "Delete", + Delete("t_foo", Key{"foo"}), + &Mutation{opDelete, "t_foo", Keys(Key{"foo"}), nil, nil}, + }, + { + "DeleteRange", + DeleteKeyRange("t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}), + &Mutation{opDelete, "t_foo", Range(KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}), nil, nil}, + }, + } { + if !mutationEqual(t, *test.got, *test.want) { + t.Errorf("%v: got Mutation %v, want %v", test.m, test.got, test.want) + } + } +} + +// Test encoding non-struct types by using *Struct helpers. +func TestBadStructs(t *testing.T) { + val := "i_am_not_a_struct" + wantErr := errNotStruct(val) + if _, gotErr := InsertStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("InsertStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := InsertOrUpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("InsertOrUpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := UpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("UpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := ReplaceStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("ReplaceStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } +} + +// Test encoding Mutation into proto. +func TestEncodeMutation(t *testing.T) { + for _, test := range []struct { + name string + mutation Mutation + wantProto *sppb.Mutation + wantErr error + }{ + { + "OpDelete", + Mutation{opDelete, "t_test", Keys(Key{1}), nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(intProto(1))}, + Ranges: []*sppb.KeyRange{}, + }, + }, + }, + }, + nil, + }, + { + "OpDelete - Key error", + Mutation{opDelete, "t_test", Keys(Key{struct{}{}}), nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{}, + }, + }, + }, + errInvdKeyPartType(struct{}{}), + }, + { + "OpInsert", + Mutation{opInsert, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsert - Value Type Error", + Mutation{opInsert, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpInsertOrUpdate", + Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsertOrUpdate - Value Type Error", + Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpReplace", + Mutation{opReplace, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpReplace - Value Type Error", + Mutation{opReplace, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpUpdate", + Mutation{opUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpUpdate - Value Type Error", + Mutation{opUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpKnown - Unknown Mutation Operation Code", + Mutation{op(100), "t_test", KeySet{}, nil, nil}, + &sppb.Mutation{}, + errInvdMutationOp(Mutation{op(100), "t_test", KeySet{}, nil, nil}), + }, + } { + gotProto, gotErr := test.mutation.proto() + if gotErr != nil { + if !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: %v.proto() returns error %v, want %v", test.name, test.mutation, gotErr, test.wantErr) + } + continue + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v: %v.proto() = (%v, nil), want (%v, nil)", test.name, test.mutation, gotProto, test.wantProto) + } + } +} + +// Test Encoding an array of mutations. +func TestEncodeMutationArray(t *testing.T) { + for _, test := range []struct { + name string + ms []*Mutation + want []*sppb.Mutation + wantErr error + }{ + { + "Multiple Mutations", + []*Mutation{ + &Mutation{opDelete, "t_test", Keys(Key{"bar"}), nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + }, + []*sppb.Mutation{ + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(stringProto("bar"))}, + Ranges: []*sppb.KeyRange{}, + }, + }, + }, + }, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + }, + nil, + }, + { + "Multiple Mutations - Bad Mutation", + []*Mutation{ + &Mutation{opDelete, "t_test", Keys(Key{"bar"}), nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", struct{}{}}}, + }, + []*sppb.Mutation{}, + errEncoderUnsupportedType(struct{}{}), + }, + } { + gotProto, gotErr := mutationsProto(test.ms) + if gotErr != nil { + if !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: mutationsProto(%v) returns error %v, want %v", test.name, test.ms, gotErr, test.wantErr) + } + continue + } + if !reflect.DeepEqual(gotProto, test.want) { + t.Errorf("%v: mutationsProto(%v) = (%v, nil), want (%v, nil)", test.name, test.ms, gotProto, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/protoutils.go b/vendor/cloud.google.com/go/spanner/protoutils.go new file mode 100644 index 000000000..df12432d5 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/protoutils.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "strconv" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Helpers to generate protobuf values and Cloud Spanner types. + +func stringProto(s string) *proto3.Value { + return &proto3.Value{Kind: stringKind(s)} +} + +func stringKind(s string) *proto3.Value_StringValue { + return &proto3.Value_StringValue{StringValue: s} +} + +func stringType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRING} +} + +func boolProto(b bool) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_BoolValue{BoolValue: b}} +} + +func boolType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BOOL} +} + +func intProto(n int64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: strconv.FormatInt(n, 10)}} +} + +func intType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_INT64} +} + +func floatProto(n float64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NumberValue{NumberValue: n}} +} + +func floatType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_FLOAT64} +} + +func bytesProto(b []byte) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: base64.StdEncoding.EncodeToString(b)}} +} + +func bytesType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BYTES} +} + +func timeProto(t time.Time) *proto3.Value { + return stringProto(t.UTC().Format(time.RFC3339Nano)) +} + +func timeType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_TIMESTAMP} +} + +func dateProto(d civil.Date) *proto3.Value { + return stringProto(d.String()) +} + +func dateType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_DATE} +} + +func listProto(p ...*proto3.Value) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_ListValue{ListValue: &proto3.ListValue{Values: p}}} +} + +func listValueProto(p ...*proto3.Value) *proto3.ListValue { + return &proto3.ListValue{Values: p} +} + +func listType(t *sppb.Type) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_ARRAY, ArrayElementType: t} +} + +func mkField(n string, t *sppb.Type) *sppb.StructType_Field { + return &sppb.StructType_Field{n, t} +} + +func structType(fields ...*sppb.StructType_Field) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{Fields: fields}} +} + +func nullProto() *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}} +} diff --git a/vendor/cloud.google.com/go/spanner/read.go b/vendor/cloud.google.com/go/spanner/read.go new file mode 100644 index 000000000..d7a1ad0cb --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read.go @@ -0,0 +1,679 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "io" + "sync/atomic" + "time" + + log "github.com/golang/glog" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/net/context" + + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// streamingReceiver is the interface for receiving data from a client side +// stream. +type streamingReceiver interface { + Recv() (*sppb.PartialResultSet, error) +} + +// errEarlyReadEnd returns error for read finishes when gRPC stream is still active. +func errEarlyReadEnd() error { + return spannerErrorf(codes.FailedPrecondition, "read completed with active stream") +} + +// stream is the internal fault tolerant method for streaming data from +// Cloud Spanner. +func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), release func(time.Time, error)) *RowIterator { + ctx, cancel := context.WithCancel(ctx) + return &RowIterator{ + streamd: newResumableStreamDecoder(ctx, rpc), + rowd: &partialResultSetDecoder{}, + release: release, + cancel: cancel, + } +} + +// RowIterator is an iterator over Rows. +type RowIterator struct { + streamd *resumableStreamDecoder + rowd *partialResultSetDecoder + release func(time.Time, error) + cancel func() + err error + rows []*Row +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns Done, all subsequent calls +// will return Done. +func (r *RowIterator) Next() (*Row, error) { + if r.err != nil { + return nil, r.err + } + for len(r.rows) == 0 && r.streamd.next() { + r.rows, r.err = r.rowd.add(r.streamd.get()) + if r.err != nil { + return nil, r.err + } + } + if len(r.rows) > 0 { + row := r.rows[0] + r.rows = r.rows[1:] + return row, nil + } + if err := r.streamd.lastErr(); err != nil { + r.err = toSpannerError(err) + } else if !r.rowd.done() { + r.err = errEarlyReadEnd() + } else { + r.err = iterator.Done + } + return nil, r.err +} + +// Do calls the provided function once in sequence for each row in the iteration. If the +// function returns a non-nil error, Do immediately returns that value. +// +// If there are no rows in the iterator, Do will return nil without calling the +// provided function. +// +// Do always calls Stop on the iterator. +func (r *RowIterator) Do(f func(r *Row) error) error { + defer r.Stop() + for { + row, err := r.Next() + switch err { + case iterator.Done: + return nil + case nil: + if err = f(row); err != nil { + return err + } + default: + return err + } + } +} + +// Stop terminates the iteration. It should be called after every iteration. +func (r *RowIterator) Stop() { + if r.cancel != nil { + r.cancel() + } + if r.release != nil { + r.release(r.rowd.ts, r.err) + if r.err == nil { + r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop") + } + r.release = nil + + } +} + +// partialResultQueue implements a simple FIFO queue. The zero value is a +// valid queue. +type partialResultQueue struct { + q []*sppb.PartialResultSet + first int + last int + n int // number of elements in queue +} + +// empty returns if the partialResultQueue is empty. +func (q *partialResultQueue) empty() bool { + return q.n == 0 +} + +// errEmptyQueue returns error for dequeuing an empty queue. +func errEmptyQueue() error { + return spannerErrorf(codes.OutOfRange, "empty partialResultQueue") +} + +// peekLast returns the last item in partialResultQueue; if the queue +// is empty, it returns error. +func (q *partialResultQueue) peekLast() (*sppb.PartialResultSet, error) { + if q.empty() { + return nil, errEmptyQueue() + } + return q.q[(q.last+cap(q.q)-1)%cap(q.q)], nil +} + +// push adds an item to the tail of partialResultQueue. +func (q *partialResultQueue) push(r *sppb.PartialResultSet) { + if q.q == nil { + q.q = make([]*sppb.PartialResultSet, 8 /* arbitrary */) + } + if q.n == cap(q.q) { + buf := make([]*sppb.PartialResultSet, cap(q.q)*2) + for i := 0; i < q.n; i++ { + buf[i] = q.q[(q.first+i)%cap(q.q)] + } + q.q = buf + q.first = 0 + q.last = q.n + } + q.q[q.last] = r + q.last = (q.last + 1) % cap(q.q) + q.n++ +} + +// pop removes an item from the head of partialResultQueue and returns +// it. +func (q *partialResultQueue) pop() *sppb.PartialResultSet { + if q.n == 0 { + return nil + } + r := q.q[q.first] + q.q[q.first] = nil + q.first = (q.first + 1) % cap(q.q) + q.n-- + return r +} + +// clear empties partialResultQueue. +func (q *partialResultQueue) clear() { + *q = partialResultQueue{} +} + +// dump retrives all items from partialResultQueue and return them in a slice. +// It is used only in tests. +func (q *partialResultQueue) dump() []*sppb.PartialResultSet { + var dq []*sppb.PartialResultSet + for i := q.first; len(dq) < q.n; i = (i + 1) % cap(q.q) { + dq = append(dq, q.q[i]) + } + return dq +} + +// resumableStreamDecoderState encodes resumableStreamDecoder's status. +// See also the comments for resumableStreamDecoder.Next. +type resumableStreamDecoderState int + +const ( + unConnected resumableStreamDecoderState = iota // 0 + queueingRetryable // 1 + queueingUnretryable // 2 + aborted // 3 + finished // 4 +) + +// resumableStreamDecoder provides a resumable interface for receiving +// sppb.PartialResultSet(s) from a given query wrapped by +// resumableStreamDecoder.rpc(). +type resumableStreamDecoder struct { + // state is the current status of resumableStreamDecoder, see also + // the comments for resumableStreamDecoder.Next. + state resumableStreamDecoderState + // stateWitness when non-nil is called to observe state change, + // used for testing. + stateWitness func(resumableStreamDecoderState) + // ctx is the caller's context, used for cancel/timeout Next(). + ctx context.Context + // rpc is a factory of streamingReceiver, which might resume + // a pervious stream from the point encoded in restartToken. + // rpc is always a wrapper of a Cloud Spanner query which is + // resumable. + rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error) + // stream is the current RPC streaming receiver. + stream streamingReceiver + // q buffers received yet undecoded partial results. + q partialResultQueue + // bytesBetweenResumeTokens is the proxy of the byte size of PartialResultSets being queued + // between two resume tokens. Once bytesBetweenResumeTokens is greater than + // maxBytesBetweenResumeTokens, resumableStreamDecoder goes into queueingUnretryable state. + bytesBetweenResumeTokens int32 + // maxBytesBetweenResumeTokens is the max number of bytes that can be buffered + // between two resume tokens. It is always copied from the global maxBytesBetweenResumeTokens + // atomically. + maxBytesBetweenResumeTokens int32 + // np is the next sppb.PartialResultSet ready to be returned + // to caller of resumableStreamDecoder.Get(). + np *sppb.PartialResultSet + // resumeToken stores the resume token that resumableStreamDecoder has + // last revealed to caller. + resumeToken []byte + // retryCount is the number of retries that have been carried out so far + retryCount int + // err is the last error resumableStreamDecoder has encountered so far. + err error + // backoff to compute delays between retries. + backoff exponentialBackoff +} + +// newResumableStreamDecoder creates a new resumeableStreamDecoder instance. +// Parameter rpc should be a function that creates a new stream +// beginning at the restartToken if non-nil. +func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error)) *resumableStreamDecoder { + return &resumableStreamDecoder{ + ctx: ctx, + rpc: rpc, + maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens), + backoff: defaultBackoff, + } +} + +// changeState fulfills state transition for resumableStateDecoder. +func (d *resumableStreamDecoder) changeState(target resumableStreamDecoderState) { + if d.state == queueingRetryable && d.state != target { + // Reset bytesBetweenResumeTokens because it is only meaningful/changed under + // queueingRetryable state. + d.bytesBetweenResumeTokens = 0 + } + d.state = target + if d.stateWitness != nil { + d.stateWitness(target) + } +} + +// isNewResumeToken returns if the observed resume token is different from +// the one returned from server last time. +func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool { + if rt == nil { + return false + } + if bytes.Compare(rt, d.resumeToken) == 0 { + return false + } + return true +} + +// Next advances to the next available partial result set. If error or no +// more, returns false, call Err to determine if an error was encountered. +// The following diagram illustrates the state machine of resumableStreamDecoder +// that Next() implements. Note that state transition can be only triggered by +// RPC activities. +/* + rpc() fails retryable + +---------+ + | | rpc() fails unretryable/ctx timeouts or cancelled + | | +------------------------------------------------+ + | | | | + | v | v + | +---+---+---+ +--------+ +------+--+ + +-----+unConnected| |finished| | aborted |<----+ + | | ++-----+-+ +------+--+ | + +---+----+--+ ^ ^ ^ | + | ^ | | | | + | | | | recv() fails | + | | | | | | + | |recv() fails retryable | | | | + | |with valid ctx | | | | + | | | | | | + rpc() succeeds | +-----------------------+ | | | + | | | recv EOF recv EOF | | + | | | | | | + v | | Queue size exceeds | | | + +---+----+---+----+threshold +-------+-----------+ | | ++---------->+ +--------------->+ +-+ | +| |queueingRetryable| |queueingUnretryable| | +| | +<---------------+ | | +| +---+----------+--+ pop() returns +--+----+-----------+ | +| | | resume token | ^ | +| | | | | | +| | | | | | ++---------------+ | | | | + recv() succeeds | +----+ | + | recv() succeeds | + | | + | | + | | + | | + | | + +--------------------------------------------------+ + recv() fails unretryable + +*/ +var ( + // maxBytesBetweenResumeTokens is the maximum amount of bytes that resumableStreamDecoder + // in queueingRetryable state can use to queue PartialResultSets before getting + // into queueingUnretryable state. + maxBytesBetweenResumeTokens = int32(128 * 1024 * 1024) +) + +func (d *resumableStreamDecoder) next() bool { + for { + select { + case <-d.ctx.Done(): + // Do context check here so that even gRPC failed to do + // so, resumableStreamDecoder can still break the loop + // as expected. + d.err = errContextCanceled(d.err) + d.changeState(aborted) + default: + } + switch d.state { + case unConnected: + // If no gRPC stream is available, try to initiate one. + if d.stream, d.err = d.rpc(d.ctx, d.resumeToken); d.err != nil { + if isRetryable(d.err) { + d.doBackOff() + // Be explicit about state transition, although the + // state doesn't actually change. State transition + // will be triggered only by RPC activity, regardless of + // whether there is an actual state change or not. + d.changeState(unConnected) + continue + } + d.changeState(aborted) + continue + } + d.resetBackOff() + d.changeState(queueingRetryable) + continue + case queueingRetryable: + fallthrough + case queueingUnretryable: + // Receiving queue is not empty. + last, err := d.q.peekLast() + if err != nil { + // Only the case that receiving queue is empty could cause peekLast to + // return error and in such case, we should try to receive from stream. + d.tryRecv() + continue + } + if d.isNewResumeToken(last.ResumeToken) { + // Got new resume token, return buffered sppb.PartialResultSets to caller. + d.np = d.q.pop() + if d.q.empty() { + d.bytesBetweenResumeTokens = 0 + // The new resume token was just popped out from queue, record it. + d.resumeToken = d.np.ResumeToken + d.changeState(queueingRetryable) + } + return true + } + if d.bytesBetweenResumeTokens >= d.maxBytesBetweenResumeTokens && d.state == queueingRetryable { + d.changeState(queueingUnretryable) + continue + } + if d.state == queueingUnretryable { + // When there is no resume token observed, + // only yield sppb.PartialResultSets to caller under + // queueingUnretryable state. + d.np = d.q.pop() + return true + } + // Needs to receive more from gRPC stream till a new resume token + // is observed. + d.tryRecv() + continue + case aborted: + // Discard all pending items because none of them + // should be yield to caller. + d.q.clear() + return false + case finished: + // If query has finished, check if there are still buffered messages. + if d.q.empty() { + // No buffered PartialResultSet. + return false + } + // Although query has finished, there are still buffered PartialResultSets. + d.np = d.q.pop() + return true + + default: + log.Errorf("Unexpected resumableStreamDecoder.state: %v", d.state) + return false + } + } +} + +// tryRecv attempts to receive a PartialResultSet from gRPC stream. +func (d *resumableStreamDecoder) tryRecv() { + var res *sppb.PartialResultSet + if res, d.err = d.stream.Recv(); d.err != nil { + if d.err == io.EOF { + d.err = nil + d.changeState(finished) + return + } + if isRetryable(d.err) && d.state == queueingRetryable { + d.err = nil + // Discard all queue items (none have resume tokens). + d.q.clear() + d.stream = nil + d.changeState(unConnected) + d.doBackOff() + return + } + d.changeState(aborted) + return + } + d.q.push(res) + if d.state == queueingRetryable && !d.isNewResumeToken(res.ResumeToken) { + // adjusting d.bytesBetweenResumeTokens + d.bytesBetweenResumeTokens += int32(proto.Size(res)) + } + d.resetBackOff() + d.changeState(d.state) +} + +// resetBackOff clears the internal retry counter of +// resumableStreamDecoder so that the next exponential +// backoff will start at a fresh state. +func (d *resumableStreamDecoder) resetBackOff() { + d.retryCount = 0 +} + +// doBackoff does an exponential backoff sleep. +func (d *resumableStreamDecoder) doBackOff() { + ticker := time.NewTicker(d.backoff.delay(d.retryCount)) + defer ticker.Stop() + d.retryCount++ + select { + case <-d.ctx.Done(): + case <-ticker.C: + } +} + +// get returns the most recent PartialResultSet generated by a call to next. +func (d *resumableStreamDecoder) get() *sppb.PartialResultSet { + return d.np +} + +// lastErr returns the last non-EOF error encountered. +func (d *resumableStreamDecoder) lastErr() error { + return d.err +} + +// partialResultSetDecoder assembles PartialResultSet(s) into Cloud Spanner +// Rows. +type partialResultSetDecoder struct { + row Row + tx *sppb.Transaction + chunked bool // if true, next value should be merged with last values entry. + ts time.Time // read timestamp +} + +// yield checks we have a complete row, and if so returns it. A row is not +// complete if it doesn't have enough columns, or if this is a chunked response +// and there are no further values to process. +func (p *partialResultSetDecoder) yield(chunked, last bool) *Row { + if len(p.row.vals) == len(p.row.fields) && (!chunked || !last) { + // When partialResultSetDecoder gets enough number of + // Column values, There are two cases that a new Row + // should be yield: + // 1. The incoming PartialResultSet is not chunked; + // 2. The incoming PartialResultSet is chunked, but the + // proto3.Value being merged is not the last one in + // the PartialResultSet. + // + // Use a fresh Row to simplify clients that want to use yielded results + // after the next row is retrieved. Note that fields is never changed + // so it doesn't need to be copied. + fresh := Row{ + fields: p.row.fields, + vals: make([]*proto3.Value, len(p.row.vals)), + } + copy(fresh.vals, p.row.vals) + p.row.vals = p.row.vals[:0] // empty and reuse slice + return &fresh + } + return nil +} + +// yieldTx returns transaction information via caller supplied callback. +func errChunkedEmptyRow() error { + return spannerErrorf(codes.FailedPrecondition, "partialResultSetDecoder gets chunked empty row") +} + +// add tries to merge a new PartialResultSet into buffered Row. It returns +// any rows that have been completed as a result. +func (p *partialResultSetDecoder) add(r *sppb.PartialResultSet) ([]*Row, error) { + var rows []*Row + if r.Metadata != nil { + // Metadata should only be returned in the first result. + if p.row.fields == nil { + p.row.fields = r.Metadata.RowType.Fields + } + if p.tx == nil && r.Metadata.Transaction != nil { + p.tx = r.Metadata.Transaction + if p.tx.ReadTimestamp != nil { + p.ts = time.Unix(p.tx.ReadTimestamp.Seconds, int64(p.tx.ReadTimestamp.Nanos)) + } + } + } + if len(r.Values) == 0 { + return nil, nil + } + if p.chunked { + p.chunked = false + // Try to merge first value in r.Values into + // uncompleted row. + last := len(p.row.vals) - 1 + if last < 0 { // sanity check + return nil, errChunkedEmptyRow() + } + var err error + // If p is chunked, then we should always try to merge p.last with r.first. + if p.row.vals[last], err = p.merge(p.row.vals[last], r.Values[0]); err != nil { + return nil, err + } + r.Values = r.Values[1:] + // Merge is done, try to yield a complete Row. + if row := p.yield(r.ChunkedValue, len(r.Values) == 0); row != nil { + rows = append(rows, row) + } + } + for i, v := range r.Values { + // The rest values in r can be appened into p directly. + p.row.vals = append(p.row.vals, v) + // Again, check to see if a complete Row can be yielded because of + // the newly added value. + if row := p.yield(r.ChunkedValue, i == len(r.Values)-1); row != nil { + rows = append(rows, row) + } + } + if r.ChunkedValue { + // After dealing with all values in r, if r is chunked then p must + // be also chunked. + p.chunked = true + } + return rows, nil +} + +// isMergeable returns if a protobuf Value can be potentially merged with +// other protobuf Values. +func (p *partialResultSetDecoder) isMergeable(a *proto3.Value) bool { + switch a.Kind.(type) { + case *proto3.Value_StringValue: + return true + case *proto3.Value_ListValue: + return true + default: + return false + } +} + +// errIncompatibleMergeTypes returns error for incompatible protobuf types +// that cannot be merged by partialResultSetDecoder. +func errIncompatibleMergeTypes(a, b *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "partialResultSetDecoder merge(%T,%T) - incompatible types", a.Kind, b.Kind) +} + +// errUnsupportedMergeType returns error for protobuf type that cannot be +// merged to other protobufs. +func errUnsupportedMergeType(a *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "unsupported type merge (%T)", a.Kind) +} + +// merge tries to combine two protobuf Values if possible. +func (p *partialResultSetDecoder) merge(a, b *proto3.Value) (*proto3.Value, error) { + var err error + typeErr := errIncompatibleMergeTypes(a, b) + switch t := a.Kind.(type) { + case *proto3.Value_StringValue: + s, ok := b.Kind.(*proto3.Value_StringValue) + if !ok { + return nil, typeErr + } + return &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: t.StringValue + s.StringValue}, + }, nil + case *proto3.Value_ListValue: + l, ok := b.Kind.(*proto3.Value_ListValue) + if !ok { + return nil, typeErr + } + if l.ListValue == nil || len(l.ListValue.Values) <= 0 { + // b is an empty list, just return a. + return a, nil + } + if t.ListValue == nil || len(t.ListValue.Values) <= 0 { + // a is an empty list, just return b. + return b, nil + } + if la := len(t.ListValue.Values) - 1; p.isMergeable(t.ListValue.Values[la]) { + // When the last item in a is of type String, + // List or Struct(encoded into List by Cloud Spanner), + // try to Merge last item in a and first item in b. + t.ListValue.Values[la], err = p.merge(t.ListValue.Values[la], l.ListValue.Values[0]) + if err != nil { + return nil, err + } + l.ListValue.Values = l.ListValue.Values[1:] + } + return &proto3.Value{ + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: append(t.ListValue.Values, l.ListValue.Values...), + }, + }, + }, nil + default: + return nil, errUnsupportedMergeType(a) + } + +} + +// Done returns if partialResultSetDecoder has already done with all buffered +// values. +func (p *partialResultSetDecoder) done() bool { + // There is no explicit end of stream marker, but ending part way + // through a row is obviously bad, or ending with the last column still + // awaiting completion. + return len(p.row.vals) == 0 && !p.chunked +} diff --git a/vendor/cloud.google.com/go/spanner/read_test.go b/vendor/cloud.google.com/go/spanner/read_test.go new file mode 100644 index 000000000..7c28d5bda --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read_test.go @@ -0,0 +1,1731 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "io" + "reflect" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + + "cloud.google.com/go/spanner/internal/testutil" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // Mocked transaction timestamp. + trxTs = time.Unix(1, 2) + // Metadata for mocked KV table, its rows are returned by SingleUse transactions. + kvMeta = func() *sppb.ResultSetMetadata { + meta := testutil.KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: timestampProto(trxTs), + } + return &meta + }() + // Metadata for mocked ListKV table, which uses List for its key and value. + // Its rows are returned by snapshot readonly transactions, as indicated in the transaction metadata. + kvListMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + { + Name: "Value", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{5, 6, 7, 8, 9}, + ReadTimestamp: timestampProto(trxTs), + }, + } + // Metadata for mocked schema of a query result set, which has two struct + // columns named "Col1" and "Col2", the struct's schema is like the + // following: + // + // STRUCT { + // INT + // LIST + // } + // + // Its rows are returned in readwrite transaction, as indicated in the transaction metadata. + kvObjectMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Col1", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "foo-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "foo-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + { + Name: "Col2", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "bar-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "bar-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{1, 2, 3, 4, 5}, + }, + } +) + +// String implements fmt.stringer. +func (r *Row) String() string { + return fmt.Sprintf("{fields: %s, val: %s}", r.fields, r.vals) +} + +func describeRows(l []*Row) string { + // generate a nice test failure description + var s = "[" + for i, r := range l { + if i != 0 { + s += ",\n " + } + s += fmt.Sprint(r) + } + s += "]" + return s +} + +// Helper for generating proto3 Value_ListValue instances, making +// test code shorter and readable. +func genProtoListValue(v ...string) *proto3.Value_ListValue { + r := &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{}, + }, + } + for _, e := range v { + r.ListValue.Values = append( + r.ListValue.Values, + &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: e}, + }, + ) + } + return r +} + +// Test Row generation logics of partialResultSetDecoder. +func TestPartialResultSetDecoder(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + var tests = []struct { + input []*sppb.PartialResultSet + wantF []*Row + wantTxID transactionID + wantTs time.Time + wantD bool + }{ + { + // Empty input. + wantD: true, + }, + // String merging examples. + { + // Single KV result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Incomplete partial result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + wantTs: trxTs, + wantD: false, + }, + { + // Complete splitted result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Multi-row example with splitted row in the middle. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Merging example in result_set.proto. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orl"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "d"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // More complex example showing completing a merge and + // starting a new merge in the same partialResultSet. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, // start split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orld"}}, // complete value + {Kind: &proto3.Value_StringValue{StringValue: "i"}}, // start split in key + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "s"}}, // complete key + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "qu"}}, // split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "estion"}}, // complete value + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "is"}}, + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "question"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + // List merging examples. + { + // Non-splitting Lists. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Simple List merge case: splitted string element. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-"), + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Struct merging is also implemented by List merging. Note that + // Cloud Spanner uses proto.ListValue to encode Structs as well. + input: []*sppb.PartialResultSet{ + { + Metadata: kvObjectMeta, + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "fo")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("o-2", "f")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("oo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvObjectMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "foo-2", "foo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantTxID: transactionID{1, 2, 3, 4, 5}, + wantD: true, + }, + } + +nextTest: + for i, test := range tests { + var rows []*Row + p := &partialResultSetDecoder{} + for j, v := range test.input { + rs, err := p.add(v) + if err != nil { + t.Errorf("test %d.%d: partialResultSetDecoder.add(%v) = %v; want nil", i, j, v, err) + continue nextTest + } + rows = append(rows, rs...) + } + if !reflect.DeepEqual(p.ts, test.wantTs) { + t.Errorf("got transaction(%v), want %v", p.ts, test.wantTs) + } + if !reflect.DeepEqual(rows, test.wantF) { + t.Errorf("test %d: rows=\n%v\n; want\n%v\n; p.row:\n%v\n", i, describeRows(rows), describeRows(test.wantF), p.row) + } + if got := p.done(); got != test.wantD { + t.Errorf("test %d: partialResultSetDecoder.done() = %v", i, got) + } + } +} + +const ( + maxBuffers = 16 // max number of PartialResultSets that will be buffered in tests. +) + +// setMaxBytesBetweenResumeTokens sets the global maxBytesBetweenResumeTokens to a smaller +// value more suitable for tests. It returns a function which should be called to restore +// the maxBytesBetweenResumeTokens to its old value +func setMaxBytesBetweenResumeTokens() func() { + o := atomic.LoadInt32(&maxBytesBetweenResumeTokens) + atomic.StoreInt32(&maxBytesBetweenResumeTokens, int32(maxBuffers*proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }))) + return func() { + atomic.StoreInt32(&maxBytesBetweenResumeTokens, o) + } +} + +// keyStr generates key string for kvMeta schema. +func keyStr(i int) string { + return fmt.Sprintf("foo-%02d", i) +} + +// valStr generates value string for kvMeta schema. +func valStr(i int) string { + return fmt.Sprintf("bar-%02d", i) +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a non-blocking state(resumableStreamDecoder.Next returns +// on non-blocking state). +func TestRsdNonblockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected->queueingRetryable->finished + name: "unConnected->queueingRetryable->finished", + msgs: []testutil.MockCtlMsg{ + {}, + {}, + {Err: io.EOF, ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + finished, // got EOF + }, + }, + { + // unConnected->queueingRetryable->aborted + name: "unConnected->queueingRetryable->aborted", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {}, + {Err: errors.New("I quit"), ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + aborted, // got error + }, + wantErr: grpc.Errorf(codes.Unknown, "I quit"), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+1; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // the internal queue of resumableStreamDecoder fills up + } + // the first item fills up the queue and triggers state transition; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + s = append(s, queueingUnretryable) + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->aborted + name: "unConnected->queueingRetryable->queueingUnretryable->aborted", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: errors.New("Just Abort It"), ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // the last row triggers state change + s = append(s, aborted) // Error happens + return s + }(), + wantErr: grpc.Errorf(codes.Unknown, "Just Abort It"), + }, + } +nextTest: + for _, test := range tests { + t.Logf("Testing %v", test.name) + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("%v: Dial(%q) = %v", test.name, ms.Addr(), err) + } + mc := sppb.NewSpannerClient(cc) + if test.rpc == nil { + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: test.sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + st := []resumableStreamDecoderState{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal by setting stateDone = true. + stateDone := false + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + if !stateDone { + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + stateDone = true + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + for { + select { + case <-ctx.Done(): + t.Errorf("context cancelled or timeout during test") + continue nextTest + default: + } + if stateDone { + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !reflect.DeepEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !reflect.DeepEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + var q []*sppb.PartialResultSet + for { + item := r.q.pop() + if item == nil { + break + } + q = append(q, item) + } + if !reflect.DeepEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !reflect.DeepEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + // Proceed to next test + continue nextTest + } + // Receive next decoded item. + if r.next() { + rs = append(rs, r.get()) + } + } + } +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a blocking state(resumableStreamDecoder.Next blocks +// on blocking state). +func TestRsdBlockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected -> unConnected + name: "unConnected -> unConnected", + rpc: func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return nil, grpc.Errorf(codes.Unavailable, "trust me: server is unavailable") + }, + sql: "SELECT * from t_whatever", + stateHistory: []resumableStreamDecoderState{unConnected, unConnected, unConnected}, + wantErr: grpc.Errorf(codes.Unavailable, "trust me: server is unavailable"), + }, + { + // unConnected -> queueingRetryable + name: "unConnected -> queueingRetryable", + sql: "SELECT t.key key, t.value value FROM t_mock t", + stateHistory: []resumableStreamDecoderState{queueingRetryable}, + }, + { + // unConnected->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingRetryable", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {Err: nil, ResumeToken: true}, + {}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + ResumeToken: testutil.EncodeResumeToken(2), + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(3)}}, + }, + }, + }, + resumeToken: testutil.EncodeResumeToken(2), + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + queueingRetryable, // foo-02, resume token + queueingRetryable, // got foo-03 + }, + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: nil, ResumeToken: true}) + m = append(m, testutil.MockCtlMsg{}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+2; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + s[maxBuffers+1].ResumeToken = testutil.EncodeResumeToken(maxBuffers + 1) + return s + }(), + resumeToken: testutil.EncodeResumeToken(maxBuffers + 1), + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 2)}}, + }, + }, + }, + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder filles up + } + for i := maxBuffers - 1; i < maxBuffers+1; i++ { + // the first item fills up the queue and triggers state change; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + } + s = append(s, queueingUnretryable) // got (maxBuffers+1)th row under Unretryable state + s = append(s, queueingRetryable) // (maxBuffers+1)th row has resume token + s = append(s, queueingRetryable) // (maxBuffers+2)th row has no resume token + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->finished + name: "unConnected->queueingRetryable->queueingUnretryable->finished", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: io.EOF, ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // last row triggers state change + s = append(s, finished) // query finishes + return s + }(), + }, + } + for _, test := range tests { + t.Logf("Testing %v", test.name) + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("%v: Dial(%q) = %v", test.name, ms.Addr(), err) + } + mc := sppb.NewSpannerClient(cc) + if test.rpc == nil { + // Avoid using test.sql directly in closure because for loop changes test. + sql := test.sql + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + // Override backoff to make the test run faster. + r.backoff = exponentialBackoff{1 * time.Nanosecond, 1 * time.Nanosecond} + // st is the set of observed state transitions. + st := []resumableStreamDecoderState{} + // q is the content of the decoder's partial result queue when expected number of state transitions are done. + q := []*sppb.PartialResultSet{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal to channel stateDone. + stateDone := make(chan int) + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + select { + case <-stateDone: + // Noop after expected number of state transitions + default: + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + q = r.q.dump() + close(stateDone) + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + go func() { + for { + if !r.next() { + // Note that r.Next also exits on context cancel/timeout. + return + } + rs = append(rs, r.get()) + } + }() + // Verify that resumableStreamDecoder reaches expected state. + select { + case <-stateDone: // Note that at this point, receiver is still blocking on r.next(). + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !reflect.DeepEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !reflect.DeepEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + if !reflect.DeepEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !reflect.DeepEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + case <-time.After(1 * time.Second): + t.Errorf("%v: Timeout in waiting for state change", test.name) + } + ms.Stop() + cc.Close() + } +} + +// sReceiver signals every receiving attempt through a channel, +// used by TestResumeToken to determine if the receiving of a certain +// PartialResultSet will be attempted next. +type sReceiver struct { + c chan int + rpcReceiver sppb.Spanner_ExecuteStreamingSqlClient +} + +// Recv() implements streamingReceiver.Recv for sReceiver. +func (sr *sReceiver) Recv() (*sppb.PartialResultSet, error) { + sr.c <- 1 + return sr.rpcReceiver.Recv() +} + +// waitn waits for nth receiving attempt from now on, until +// the signal for nth Recv() attempts is received or timeout. +// Note that because the way stream() works, the signal for the +// nth Recv() means that the previous n - 1 PartialResultSets +// has already been returned to caller or queued, if no error happened. +func (sr *sReceiver) waitn(n int) error { + for i := 0; i < n; i++ { + select { + case <-sr.c: + case <-time.After(10 * time.Second): + return fmt.Errorf("timeout in waiting for %v-th Recv()", i+1) + } + } + return nil +} + +// Test the handling of resumableStreamDecoder.bytesBetweenResumeTokens. +func TestQueueBytes(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + wantQueueBytes := 0 + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + ) + go func() { + for r.next() { + } + }() + // Let server send maxBuffers / 2 rows. + for i := 0; i < maxBuffers/2; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers/2 + 1); err != nil { + t.Fatalf("failed to wait for the first %v recv() calls: %v", maxBuffers, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want %v", r.bytesBetweenResumeTokens, wantQueueBytes) + } + // Now send a resume token to drain the queue. + ms.AddMsg(nil, true) + // Wait for all rows to be processes. + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for rows to be processed: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Let server send maxBuffers - 1 rows. + wantQueueBytes = 0 + for i := 0; i < maxBuffers-1; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for %v rows to be processed: %v", maxBuffers-1, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Trigger a state transition: queueingRetryable -> queueingUnretryable. + ms.AddMsg(nil, false) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for state transition: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } +} + +// Verify that client can deal with resume token correctly +func TestResumeToken(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer func() { + ms.Stop() + cc.Close() + }() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + rows := []*Row{} + done := make(chan int) + streaming := func() { + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + var row *Row + row, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + rows = append(rows, row) + } + done <- 1 + } + go streaming() + // Server streaming row 0 - 2, only row 1 has resume token. + // Client will receive row 0 - 2, so it will try receiving for + // 4 times (the last recv will block), and only row 0 - 1 will + // be yielded. + for i := 0; i < 3; i++ { + if i == 1 { + ms.AddMsg(nil, true) + } else { + ms.AddMsg(nil, false) + } + } + // Wait for 4 receive attempts, as explained above. + if err = sr.waitn(4); err != nil { + t.Fatalf("failed to wait for row 0 - 2: %v", err) + } + want := []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } + // Inject resumable failure. + ms.AddMsg( + grpc.Errorf(codes.Unavailable, "mock server unavailable"), + false, + ) + // Test if client detects the resumable failure and retries. + if err = sr.waitn(1); err != nil { + t.Fatalf("failed to wait for client to retry: %v", err) + } + // Client has resumed the query, now server resend row 2. + ms.AddMsg(nil, true) + if err = sr.waitn(1); err != nil { + t.Fatalf("failed to wait for resending row 2: %v", err) + } + // Now client should have received row 0 - 2. + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + }) + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n, want\n%v\n", rows, want) + } + // Sending 3rd - (maxBuffers+1)th rows without resume tokens, client should buffer them. + for i := 3; i < maxBuffers+2; i++ { + ms.AddMsg(nil, false) + } + if err = sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for row 3-%v: %v", maxBuffers+1, err) + } + // Received rows should be unchanged. + if !reflect.DeepEqual(rows, want) { + t.Errorf("receive rows: \n%v\n, want\n%v\n", rows, want) + } + // Send (maxBuffers+2)th row to trigger state change of resumableStreamDecoder: + // queueingRetryable -> queueingUnretryable + ms.AddMsg(nil, false) + if err = sr.waitn(1); err != nil { + t.Fatalf("failed to wait for row %v: %v", maxBuffers+2, err) + } + // Client should yield row 3rd - (maxBuffers+2)th to application. Therefore, application should + // see row 0 - (maxBuffers+2)th so far. + for i := 3; i < maxBuffers+3; i++ { + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; want\n%v\n", rows, want) + } + // Inject resumable error, but since resumableStreamDecoder is already at queueingUnretryable + // state, query will just fail. + ms.AddMsg( + grpc.Errorf(codes.Unavailable, "mock server wants some sleep"), + false, + ) + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return.") + } + if wantErr := toSpannerError(grpc.Errorf(codes.Unavailable, "mock server wants some sleep")); !reflect.DeepEqual(err, wantErr) { + t.Fatalf("stream() returns error: %v, but want error: %v", err, wantErr) + } + + // Reconnect to mock Cloud Spanner. + rows = []*Row{} + go streaming() + // Let server send two rows without resume token. + for i := maxBuffers + 3; i < maxBuffers+5; i++ { + ms.AddMsg(nil, false) + } + if err = sr.waitn(3); err != nil { + t.Fatalf("failed to wait for row %v - %v: %v", maxBuffers+3, maxBuffers+5, err) + } + if len(rows) > 0 { + t.Errorf("client received some rows unexpectedly: %v, want nothing", rows) + } + // Let server end the query. + ms.AddMsg(io.EOF, false) + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return") + } + if err != nil { + t.Fatalf("stream() returns unexpected error: %v, but want no error", err) + } + // Verify if a normal server side EOF flushes all queued rows. + want = []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 3)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 4)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 4)}}, + }, + }, + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } +} + +// Verify that streaming query get retried upon real gRPC server transport failures. +func TestGrpcReconnect(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + retry := make(chan int) + row := make(chan int) + go func() { + r := 0 + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + if r > 0 { + // This RPC attempt is a retry, signal it. + retry <- r + } + r++ + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + row <- 0 + } + }() + // Add a message and wait for the receipt. + ms.AddMsg(nil, true) + select { + case <-row: + t.Logf("Stream established.") + case <-time.After(10 * time.Second): + t.Fatalf("expect stream to be established within 10 seconds, but it didn't") + } + // Error injection: force server to close all connections. + ms.Stop() + // Test to see if client respond to the real RPC failure correctly by + // retrying RPC. + select { + case r, ok := <-retry: + if ok && r == 1 { + t.Logf("RPC retried.") + break + } + t.Errorf("retry count = %v, want 1", r) + case <-time.After(10 * time.Second): + t.Errorf("client library failed to respond after 10 seconds, aborting") + return + } +} + +// Test cancel/timeout for client operations. +func TestCancelTimeout(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + defer cc.Close() + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + mc := sppb.NewSpannerClient(cc) + done := make(chan int) + go func() { + for { + ms.AddMsg(nil, true) + } + }() + // Test cancelling query. + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + break + } + if err != nil { + done <- 0 + break + } + } + }() + cancel() + select { + case <-done: + if ErrCode(err) != codes.Canceled { + t.Errorf("streaming query is canceled and returns error %v, want error code %v", err, codes.Canceled) + } + case <-time.After(1 * time.Second): + t.Errorf("query doesn't exit timely after being cancelled") + } + // Test query timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + } + done <- 0 + }() + select { + case <-done: + if ErrCode(err) != codes.Canceled { + t.Errorf("streaming query timeout returns error %v, want error code %v", err, codes.Canceled) + } + case <-time.After(2 * time.Second): + t.Errorf("query doesn't timeout as expected") + } +} + +func TestRowIteratorDo(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + for i := 0; i < 3; i++ { + ms.AddMsg(nil, false) + } + ms.AddMsg(io.EOF, true) + nRows := 0 + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + err = iter.Do(func(r *Row) error { nRows++; return nil }) + if err != nil { + t.Errorf("Using Do: %v", err) + } + if nRows != 3 { + t.Errorf("got %d rows, want 3", nRows) + } +} + +func TestIteratorStopEarly(t *testing.T) { + ctx := context.Background() + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + ms.AddMsg(nil, false) + ms.AddMsg(nil, false) + ms.AddMsg(io.EOF, true) + + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + _, err = iter.Next() + if err != nil { + t.Fatalf("before Stop: %v", err) + } + iter.Stop() + // Stop sets r.err to the FailedPrecondition error "Next called after Stop". + // Override that here so this test can observe the Canceled error from the stream. + iter.err = nil + iter.Next() + if ErrCode(iter.streamd.lastErr()) != codes.Canceled { + t.Errorf("after Stop: got %v, wanted Canceled", err) + } +} + +func TestIteratorWithError(t *testing.T) { + injected := errors.New("Failed iterator") + iter := RowIterator{err: injected} + defer iter.Stop() + if _, err := iter.Next(); err != injected { + t.Fatalf("Expected error: %v, got %v", injected, err) + } +} diff --git a/vendor/cloud.google.com/go/spanner/retry.go b/vendor/cloud.google.com/go/spanner/retry.go new file mode 100644 index 000000000..83a3826b0 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry.go @@ -0,0 +1,189 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const ( + retryInfoKey = "google.rpc.retryinfo-bin" +) + +// errRetry returns an unavailable error under error namespace EsOther. It is a +// generic retryable error that is used to mask and recover unretryable errors +// in a retry loop. +func errRetry(err error) error { + if se, ok := err.(*Error); ok { + return &Error{codes.Unavailable, fmt.Sprintf("generic Cloud Spanner retryable error: { %v }", se.Error()), se.trailers} + } + return spannerErrorf(codes.Unavailable, "generic Cloud Spanner retryable error: { %v }", err.Error()) +} + +// isErrorClosing reports whether the error is generated by gRPC layer talking to a closed server. +func isErrorClosing(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "transport is closing") { + // Handle the case when connection is closed unexpectedly. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorRST reports whether the error is generated by gRPC client receiving a RST frame from server. +func isErrorRST(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "stream terminated by RST_STREAM") { + // TODO: once gRPC is able to categorize this error as "go away" or "retryable", + // we should stop parsing the error message. + return true + } + return false +} + +// isErrorUnexpectedEOF returns true if error is generated by gRPC layer +// receiving io.EOF unexpectedly. +func isErrorUnexpectedEOF(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unknown && strings.Contains(ErrDesc(err), "unexpected EOF") { + // Unexpected EOF is an transport layer issue that + // could be recovered by retries. The most likely + // scenario is a flaky RecvMsg() call due to network + // issues. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorUnavailable returns true if the error is about server being unavailable. +func isErrorUnavailable(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unavailable { + return true + } + return false +} + +// isRetryable returns true if the Cloud Spanner error being checked is a retryable error. +func isRetryable(err error) bool { + if isErrorClosing(err) { + return true + } + if isErrorUnexpectedEOF(err) { + return true + } + if isErrorRST(err) { + return true + } + if isErrorUnavailable(err) { + return true + } + return false +} + +// errContextCanceled returns *spanner.Error for canceled context. +func errContextCanceled(lastErr error) error { + return spannerErrorf(codes.Canceled, "context is canceled, lastErr is <%v>", lastErr) +} + +// extractRetryDelay extracts retry backoff if present. +func extractRetryDelay(err error) (time.Duration, bool) { + trailers := errTrailers(err) + if trailers == nil { + return 0, false + } + elem, ok := trailers[retryInfoKey] + if !ok || len(elem) <= 0 { + return 0, false + } + _, b, err := metadata.DecodeKeyValue(retryInfoKey, elem[0]) + if err != nil { + return 0, false + } + var retryInfo edpb.RetryInfo + if proto.Unmarshal([]byte(b), &retryInfo) != nil { + return 0, false + } + delay, err := ptypes.Duration(retryInfo.RetryDelay) + if err != nil { + return 0, false + } + return delay, true +} + +// runRetryable keeps attempting to run f until one of the following happens: +// 1) f returns nil error or an unretryable error; +// 2) context is cancelled or timeout. +// TODO: consider using https://github.com/googleapis/gax-go once it +// becomes available internally. +func runRetryable(ctx context.Context, f func(context.Context) error) error { + var funcErr error + retryCount := 0 + for { + select { + case <-ctx.Done(): + // Do context check here so that even f() failed to do + // so (for example, gRPC implementation bug), the loop + // can still have a chance to exit as expected. + return errContextCanceled(funcErr) + default: + } + funcErr = f(ctx) + if funcErr == nil { + return nil + } + if isRetryable(funcErr) { + // Error is retryable, do exponential backoff and continue. + b, ok := extractRetryDelay(funcErr) + if !ok { + b = defaultBackoff.delay(retryCount) + } + select { + case <-ctx.Done(): + return errContextCanceled(funcErr) + case <-time.After(b): + } + retryCount++ + continue + } + // Error isn't retryable / no error, return immediately. + return toSpannerError(funcErr) + } +} diff --git a/vendor/cloud.google.com/go/spanner/retry_test.go b/vendor/cloud.google.com/go/spanner/retry_test.go new file mode 100644 index 000000000..225062e05 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Test if runRetryable loop deals with various errors correctly. +func TestRetry(t *testing.T) { + responses := []error{ + grpc.Errorf(codes.Internal, "transport is closing"), + grpc.Errorf(codes.Unknown, "unexpected EOF"), + grpc.Errorf(codes.Internal, "stream terminated by RST_STREAM with error code: 2"), + grpc.Errorf(codes.Unavailable, "service is currently unavailable"), + errRetry(fmt.Errorf("just retry it")), + } + err := runRetryable(context.Background(), func(ct context.Context) error { + var r error + if len(responses) > 0 { + r = responses[0] + responses = responses[1:] + } + return r + }) + if err != nil { + t.Errorf("runRetryable should be able to survive all retryable errors, but it returns %v", err) + } + // Unretryable errors + injErr := errors.New("this is unretryable") + err = runRetryable(context.Background(), func(ct context.Context) error { + return injErr + }) + if wantErr := toSpannerError(injErr); !reflect.DeepEqual(err, wantErr) { + t.Errorf("runRetryable returns error %v, want %v", err, wantErr) + } + // Timeout + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + retryErr := errRetry(fmt.Errorf("still retrying")) + err = runRetryable(ctx, func(ct context.Context) error { + // Expect to trigger timeout in retryable runner after 10 executions. + <-time.After(100 * time.Millisecond) + // Let retryable runner to retry so that timeout will eventually happen. + return retryErr + }) + if wantErr := errContextCanceled(retryErr); !reflect.DeepEqual(err, wantErr) { + t.Errorf("runRetryable returns error: %v, want error: %v", err, wantErr) + } + // Cancellation + ctx, cancel = context.WithCancel(context.Background()) + retries := 3 + retryErr = errRetry(fmt.Errorf("retry before cancel")) + err = runRetryable(ctx, func(ct context.Context) error { + retries-- + if retries == 0 { + cancel() + } + return retryErr + }) + if wantErr := errContextCanceled(retryErr); !reflect.DeepEqual(err, wantErr) || retries != 0 { + t.Errorf("=<%v, %v>, want <%v, %v>", err, retries, wantErr, 0) + } +} + +func TestRetryInfo(t *testing.T) { + b, _ := proto.Marshal(&edpb.RetryInfo{ + RetryDelay: ptypes.DurationProto(time.Second), + }) + trailers := map[string]string{ + retryInfoKey: string(b), + } + gotDelay, ok := extractRetryDelay(errRetry(toSpannerErrorWithMetadata(grpc.Errorf(codes.Aborted, ""), metadata.New(trailers)))) + if !ok || !reflect.DeepEqual(time.Second, gotDelay) { + t.Errorf(" = <%t, %v>, want ", ok, gotDelay, time.Second) + } +} diff --git a/vendor/cloud.google.com/go/spanner/row.go b/vendor/cloud.google.com/go/spanner/row.go new file mode 100644 index 000000000..70c2861b4 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row.go @@ -0,0 +1,307 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Row is a view of a row of data produced by a Cloud Spanner read. +// +// A row consists of a number of columns; the number depends on the columns +// used to construct the read. +// +// The column values can be accessed by index, where the indices are with +// respect to the columns. For instance, if the read specified +// []string{"photo_id", "caption", "metadata"}, then each row will +// contain three columns: the 0th column corresponds to "photo_id", the +// 1st column corresponds to "caption", etc. +// +// Column values are decoded by using one of the Column, ColumnByName, or +// Columns methods. The valid values passed to these methods depend on the +// column type. For example: +// +// var photoID int64 +// err := row.Column(0, &photoID) // Decode column 0 as an integer. +// +// var caption string +// err := row.Column(1, &caption) // Decode column 1 as a string. +// +// // The above two operations at once. +// err := row.Columns(&photoID, &caption) +// +// Supported types and their corresponding Cloud Spanner column type(s) are: +// +// *string(not NULL), *NullString - STRING +// *[]NullString - STRING ARRAY +// *[]byte - BYTES +// *[][]byte - BYTES ARRAY +// *int64(not NULL), *NullInt64 - INT64 +// *[]NullInt64 - INT64 ARRAY +// *bool(not NULL), *NullBool - BOOL +// *[]NullBool - BOOL ARRAY +// *float64(not NULL), *NullFloat64 - FLOAT64 +// *[]NullFloat64 - FLOAT64 ARRAY +// *time.Time(not NULL), *NullTime - TIMESTAMP +// *[]NullTime - TIMESTAMP ARRAY +// *Date(not NULL), *NullDate - DATE +// *[]NullDate - DATE ARRAY +// *[]*some_go_struct, *[]NullRow - STRUCT ARRAY +// *GenericColumnValue - any Cloud Spanner type +// +// For TIMESTAMP columns, returned time.Time object will be in UTC. +// +// To fetch an array of BYTES, pass a *[][]byte. To fetch an array of +// (sub)rows, pass a *[]spanner.NullRow or a *[]*some_go_struct where +// some_go_struct holds all information of the subrow, see spannr.Row.ToStruct +// for the mapping between Cloud Spanner row and Go struct. To fetch an array of +// other types, pass a *[]spanner.Null* type of the appropriate type. Use +// *GenericColumnValue when you don't know in advance what column type to +// expect. +// +// Row decodes the row contents lazily; as a result, each call to a getter has +// a chance of returning an error. +// +// A column value may be NULL if the corresponding value is not present in +// Cloud Spanner. The spanner.Null* types (spanner.NullInt64 et al.) allow fetching +// values that may be null. A NULL BYTES can be fetched into a *[]byte as nil. +// It is an error to fetch a NULL value into any other type. +type Row struct { + fields []*sppb.StructType_Field + vals []*proto3.Value // keep decoded for now +} + +// errNamesValuesMismatch returns error for when columnNames count is not equal +// to columnValues count. +func errNamesValuesMismatch(columnNames []string, columnValues []interface{}) error { + return spannerErrorf(codes.FailedPrecondition, + "different number of names(%v) and values(%v)", len(columnNames), len(columnValues)) +} + +// NewRow returns a Row containing the supplied data. This can be useful for +// mocking Cloud Spanner Read and Query responses for unit testing. +func NewRow(columnNames []string, columnValues []interface{}) (*Row, error) { + if len(columnValues) != len(columnNames) { + return nil, errNamesValuesMismatch(columnNames, columnValues) + } + r := Row{ + fields: make([]*sppb.StructType_Field, len(columnValues)), + vals: make([]*proto3.Value, len(columnValues)), + } + for i := range columnValues { + val, typ, err := encodeValue(columnValues[i]) + if err != nil { + return nil, err + } + r.fields[i] = &sppb.StructType_Field{ + Name: columnNames[i], + Type: typ, + } + r.vals[i] = val + } + return &r, nil +} + +// Size is the number of columns in the row. +func (r *Row) Size() int { + return len(r.fields) +} + +// ColumnName returns the name of column i, or empty string for invalid column. +func (r *Row) ColumnName(i int) string { + if i < 0 || i >= len(r.fields) { + return "" + } + return r.fields[i].Name +} + +// ColumnIndex returns the index of the column with the given name. The +// comparison is case-sensitive. +func (r *Row) ColumnIndex(name string) (int, error) { + found := false + var index int + if len(r.vals) != len(r.fields) { + return 0, errFieldsMismatchVals(r) + } + for i, f := range r.fields { + if f == nil { + return 0, errNilColType(i) + } + if name == f.Name { + if found { + return 0, errDupColName(name) + } + found = true + index = i + } + } + if !found { + return 0, errColNotFound(name) + } + return index, nil +} + +// ColumnNames returns all column names of the row. +func (r *Row) ColumnNames() []string { + var n []string + for _, c := range r.fields { + n = append(n, c.Name) + } + return n +} + +// errColIdxOutOfRange returns error for requested column index is out of the +// range of the target Row's columns. +func errColIdxOutOfRange(i int, r *Row) error { + return spannerErrorf(codes.OutOfRange, "column index %d out of range [0,%d)", i, len(r.vals)) +} + +// errDecodeColumn returns error for not being able to decode a indexed column. +func errDecodeColumn(i int, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to decode column %v, error = <%v>", i, err) + } + se.decorate(fmt.Sprintf("failed to decode column %v", i)) + return se +} + +// errFieldsMismatchVals returns error for field count isn't equal to value count in a Row. +func errFieldsMismatchVals(r *Row) error { + return spannerErrorf(codes.FailedPrecondition, "row has different number of fields(%v) and values(%v)", + len(r.fields), len(r.vals)) +} + +// errNilColType returns error for column type for column i being nil in the row. +func errNilColType(i int) error { + return spannerErrorf(codes.FailedPrecondition, "column(%v)'s type is nil", i) +} + +// Column fetches the value from the ith column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +func (r *Row) Column(i int, ptr interface{}) error { + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + if i < 0 || i >= len(r.fields) { + return errColIdxOutOfRange(i, r) + } + if r.fields[i] == nil { + return errNilColType(i) + } + if err := decodeValue(r.vals[i], r.fields[i].Type, ptr); err != nil { + return errDecodeColumn(i, err) + } + return nil +} + +// errDupColName returns error for duplicated column name in the same row. +func errDupColName(n string) error { + return spannerErrorf(codes.FailedPrecondition, "ambiguous column name %q", n) +} + +// errColNotFound returns error for not being able to find a named column. +func errColNotFound(n string) error { + return spannerErrorf(codes.NotFound, "column %q not found", n) +} + +// ColumnByName fetches the value from the named column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +func (r *Row) ColumnByName(name string, ptr interface{}) error { + index, err := r.ColumnIndex(name) + if err != nil { + return err + } + return r.Column(index, ptr) +} + +// errNumOfColValue returns error for providing wrong number of values to Columns. +func errNumOfColValue(n int, r *Row) error { + return spannerErrorf(codes.InvalidArgument, + "Columns(): number of arguments (%d) does not match row size (%d)", n, len(r.vals)) +} + +// Columns fetches all the columns in the row at once. +// +// The value of the kth column will be decoded into the kth argument to +// Columns. See above for the list of acceptable argument types. The number of +// arguments must be equal to the number of columns. Pass nil to specify that a +// column should be ignored. +func (r *Row) Columns(ptrs ...interface{}) error { + if len(ptrs) != len(r.vals) { + return errNumOfColValue(len(ptrs), r) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + for i, p := range ptrs { + if p == nil { + continue + } + if err := r.Column(i, p); err != nil { + return err + } + } + return nil +} + +// errToStructArgType returns error for p not having the correct data type(pointer to Go struct) to +// be the argument of Row.ToStruct. +func errToStructArgType(p interface{}) error { + return spannerErrorf(codes.InvalidArgument, "ToStruct(): type %T is not a valid pointer to Go struct", p) +} + +// ToStruct fetches the columns in a row into the fields of a struct. +// The rules for mapping a row's columns into a struct's exported fields +// are as the following: +// 1. If a field has a `spanner: "column_name"` tag, then decode column +// 'column_name' into the field. A special case is the `spanner: "-"` +// tag, which instructs ToStruct to ignore the field during decoding. +// 2. Otherwise, if the name of a field matches the name of a column (ignoring case), +// decode the column into the field. +// +// The fields of the destination struct can be of any type that is acceptable +// to (*spanner.Row).Column. +// +// Slice and pointer fields will be set to nil if the source column +// is NULL, and a non-nil value if the column is not NULL. To decode NULL +// values of other types, use one of the spanner.Null* as the type of the +// destination field. +func (r *Row) ToStruct(p interface{}) error { + // Check if p is a pointer to a struct + if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return errToStructArgType(p) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + // Call decodeStruct directly to decode the row as a typed proto.ListValue. + return decodeStruct( + &sppb.StructType{Fields: r.fields}, + &proto3.ListValue{Values: r.vals}, + p, + ) +} diff --git a/vendor/cloud.google.com/go/spanner/row_test.go b/vendor/cloud.google.com/go/spanner/row_test.go new file mode 100644 index 000000000..45c58403e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row_test.go @@ -0,0 +1,1776 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "cloud.google.com/go/civil" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + tm = time.Now().UTC() + dt, _ = civil.ParseDate("2016-11-15") + // row contains a column for each unique Cloud Spanner type. + row = Row{ + []*sppb.StructType_Field{ + // STRING / STRING ARRAY + {"STRING", stringType()}, + {"NULL_STRING", stringType()}, + {"STRING_ARRAY", listType(stringType())}, + {"NULL_STRING_ARRAY", listType(stringType())}, + // BYTES / BYTES ARRAY + {"BYTES", bytesType()}, + {"NULL_BYTES", bytesType()}, + {"BYTES_ARRAY", listType(bytesType())}, + {"NULL_BYTES_ARRAY", listType(bytesType())}, + // INT64 / INT64 ARRAY + {"INT64", intType()}, + {"NULL_INT64", intType()}, + {"INT64_ARRAY", listType(intType())}, + {"NULL_INT64_ARRAY", listType(intType())}, + // BOOL / BOOL ARRAY + {"BOOL", boolType()}, + {"NULL_BOOL", boolType()}, + {"BOOL_ARRAY", listType(boolType())}, + {"NULL_BOOL_ARRAY", listType(boolType())}, + // FLOAT64 / FLOAT64 ARRAY + {"FLOAT64", floatType()}, + {"NULL_FLOAT64", floatType()}, + {"FLOAT64_ARRAY", listType(floatType())}, + {"NULL_FLOAT64_ARRAY", listType(floatType())}, + // TIMESTAMP / TIMESTAMP ARRAY + {"TIMESTAMP", timeType()}, + {"NULL_TIMESTAMP", timeType()}, + {"TIMESTAMP_ARRAY", listType(timeType())}, + {"NULL_TIMESTAMP_ARRAY", listType(timeType())}, + // DATE / DATE ARRAY + {"DATE", dateType()}, + {"NULL_DATE", dateType()}, + {"DATE_ARRAY", listType(dateType())}, + {"NULL_DATE_ARRAY", listType(dateType())}, + + // STRUCT ARRAY + { + "STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + { + "NULL_STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{ + // STRING / STRING ARRAY + stringProto("value"), + nullProto(), + listProto(stringProto("value1"), nullProto(), stringProto("value3")), + nullProto(), + // BYTES / BYTES ARRAY + bytesProto([]byte("value")), + nullProto(), + listProto(bytesProto([]byte("value1")), nullProto(), bytesProto([]byte("value3"))), + nullProto(), + // INT64 / INT64 ARRAY + intProto(17), + nullProto(), + listProto(intProto(1), intProto(2), nullProto()), + nullProto(), + // BOOL / BOOL ARRAY + boolProto(true), + nullProto(), + listProto(nullProto(), boolProto(true), boolProto(false)), + nullProto(), + // FLOAT64 / FLOAT64 ARRAY + floatProto(1.7), + nullProto(), + listProto(nullProto(), nullProto(), floatProto(1.7)), + nullProto(), + // TIMESTAMP / TIMESTAMP ARRAY + timeProto(tm), + nullProto(), + listProto(nullProto(), timeProto(tm)), + nullProto(), + // DATE / DATE ARRAY + dateProto(dt), + nullProto(), + listProto(nullProto(), dateProto(dt)), + nullProto(), + // STRUCT ARRAY + listProto( + nullProto(), + listProto(intProto(3), floatProto(33.3), stringProto("three")), + nullProto(), + ), + nullProto(), + }, + } +) + +// Test helpers for getting column values. +func TestColumnValues(t *testing.T) { + vals := []interface{}{} + wantVals := []interface{}{} + // Test getting column values. + for i, wants := range [][]interface{}{ + // STRING / STRING ARRAY + {"value", NullString{"value", true}}, + {NullString{}}, + {[]NullString{{"value1", true}, {}, {"value3", true}}}, + {[]NullString(nil)}, + // BYTES / BYTES ARRAY + {[]byte("value")}, + {[]byte(nil)}, + {[][]byte{[]byte("value1"), nil, []byte("value3")}}, + {[][]byte(nil)}, + // INT64 / INT64 ARRAY + {int64(17), NullInt64{17, true}}, + {NullInt64{}}, + {[]NullInt64{{1, true}, {2, true}, {}}}, + {[]NullInt64(nil)}, + // BOOL / BOOL ARRAY + {true, NullBool{true, true}}, + {NullBool{}}, + {[]NullBool{{}, {true, true}, {false, true}}}, + {[]NullBool(nil)}, + // FLOAT64 / FLOAT64 ARRAY + {1.7, NullFloat64{1.7, true}}, + {NullFloat64{}}, + {[]NullFloat64{{}, {}, {1.7, true}}}, + {[]NullFloat64(nil)}, + // TIMESTAMP / TIMESTAMP ARRAY + {tm, NullTime{tm, true}}, + {NullTime{}}, + {[]NullTime{{}, {tm, true}}}, + {[]NullTime(nil)}, + // DATE / DATE ARRAY + {dt, NullDate{dt, true}}, + {NullDate{}}, + {[]NullDate{{}, {dt, true}}}, + {[]NullDate(nil)}, + // STRUCT ARRAY + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + nil, + &struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + NullInt64{3, true}, + NullFloat64{33.3, true}, + "three", + }, + nil, + }, + []NullRow{ + {}, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + vals: []*proto3.Value{ + intProto(3), + floatProto(33.3), + stringProto("three"), + }, + }, + Valid: true, + }, + {}, + }, + }, + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }(nil), + []NullRow(nil), + }, + } { + t.Logf("Testing Column(%v): %v", i, row.fields[i]) + for j, want := range wants { + // Prepare Value vector to test Row.Columns. + if j == 0 { + vals = append(vals, reflect.New(reflect.TypeOf(want)).Interface()) + wantVals = append(wantVals, want) + } + // Column + gotp := reflect.New(reflect.TypeOf(want)) + err := row.Column(i, gotp.Interface()) + if err != nil { + t.Errorf("\t row.Column(%v, %T) returns error: %v, want nil", i, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t row.Column(%v, %T) retrives %v, want %v", i, gotp.Interface(), got, want) + } + // ColumnByName + gotp = reflect.New(reflect.TypeOf(want)) + err = row.ColumnByName(row.fields[i].Name, gotp.Interface()) + if err != nil { + t.Errorf("\t row.ColumnByName(%v, %T) returns error: %v, want nil", row.fields[i].Name, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t row.ColumnByName(%v, %T) retrives %v, want %v", row.fields[i].Name, gotp.Interface(), got, want) + } + } + } + // Test Row.Columns. + if err := row.Columns(vals...); err != nil { + t.Errorf("row.Columns() returns error: %v, want nil", err) + } + for i, want := range wantVals { + if got := reflect.Indirect(reflect.ValueOf(vals[i])).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t got %v(%T) for column[%v], want %v(%T)", got, got, row.fields[i].Name, want, want) + } + } +} + +// Test decoding into nil destination. +func TestNilDst(t *testing.T) { + for i, test := range []struct { + r *Row + dst interface{} + wantErr error + structDst interface{} + wantToStructErr error + }{ + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + nil, + errDecodeColumn(0, errNilDst(nil)), + nil, + errToStructArgType(nil), + }, + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + (*string)(nil), + errDecodeColumn(0, errNilDst((*string)(nil))), + (*struct{ STRING string })(nil), + errNilDst((*struct{ STRING string })(nil)), + }, + { + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + ), + ), + }, + }, + []*proto3.Value{listProto( + listProto(intProto(3), floatProto(33.3)), + )}, + }, + (*[]*struct { + Col1 int + Col2 float64 + })(nil), + errDecodeColumn(0, errNilDst((*[]*struct { + Col1 int + Col2 float64 + })(nil))), + (*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil), + errNilDst((*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil)), + }, + } { + if gotErr := test.r.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.Column() returns error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.r.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.ColumnByName() returns error %v, want %v", i, gotErr, test.wantErr) + } + // Row.Columns(T) should return nil on T == nil, otherwise, it should return test.wantErr. + wantColumnsErr := test.wantErr + if test.dst == nil { + wantColumnsErr = nil + } + if gotErr := test.r.Columns(test.dst); !reflect.DeepEqual(gotErr, wantColumnsErr) { + t.Errorf("%v: test.r.Columns() returns error %v, want %v", i, gotErr, wantColumnsErr) + } + if gotErr := test.r.ToStruct(test.structDst); !reflect.DeepEqual(gotErr, test.wantToStructErr) { + t.Errorf("%v: test.r.ToStruct() returns error %v, want %v", i, gotErr, test.wantToStructErr) + } + } +} + +// Test decoding NULL columns using Go types that don't support NULL. +func TestNullTypeErr(t *testing.T) { + var tm time.Time + ntoi := func(n string) int { + for i, f := range row.fields { + if f.Name == n { + return i + } + } + t.Errorf("cannot find column name %q in row", n) + return 0 + } + for _, test := range []struct { + colName string + dst interface{} + }{ + { + "NULL_STRING", + proto.String(""), + }, + { + "NULL_INT64", + proto.Int64(0), + }, + { + "NULL_BOOL", + proto.Bool(false), + }, + { + "NULL_FLOAT64", + proto.Float64(0.0), + }, + { + "NULL_TIMESTAMP", + &tm, + }, + { + "NULL_DATE", + &dt, + }, + } { + wantErr := errDecodeColumn(ntoi(test.colName), errDstNotForNull(test.dst)) + if gotErr := row.ColumnByName(test.colName, test.dst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("row.ColumnByName(%v) returns error %v, want %v", test.colName, gotErr, wantErr) + } + } +} + +// Test using wrong destination type in column decoders. +func TestColumnTypeErr(t *testing.T) { + // badDst cannot hold any of the column values. + badDst := &struct{}{} + for i, f := range row.fields { // For each of the columns, try to decode it into badDst. + tc := f.Type.Code + isArray := strings.Contains(f.Name, "ARRAY") + if isArray { + tc = f.Type.ArrayElementType.Code + } + wantErr := errDecodeColumn(i, errTypeMismatch(tc, isArray, badDst)) + if gotErr := row.Column(i, badDst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("Column(%v): decoding into destination with wrong type %T returns error %v, want %v", + i, badDst, gotErr, wantErr) + } + if gotErr := row.ColumnByName(f.Name, badDst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("ColumnByName(%v): decoding into destination with wrong type %T returns error %v, want %v", + f.Name, badDst, gotErr, wantErr) + } + } + wantErr := errDecodeColumn(1, errTypeMismatch(sppb.TypeCode_STRING, false, badDst)) + // badDst is used to receive column 1. + vals := []interface{}{nil, badDst} // Row.Column() is expected to fail at column 1. + // Skip decoding the rest columns by providing nils as the destinations. + for i := 2; i < len(row.fields); i++ { + vals = append(vals, nil) + } + if gotErr := row.Columns(vals...); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("Columns(): decoding column 1 with wrong type %T returns error %v, want %v", + badDst, gotErr, wantErr) + } +} + +// Test the handling of invalid column decoding requests which cannot be mapped to correct column(s). +func TestInvalidColumnRequest(t *testing.T) { + for _, test := range []struct { + desc string + f func() error + wantErr error + }{ + { + "Request column index is out of range", + func() error { + return row.Column(10000, &struct{}{}) + }, + errColIdxOutOfRange(10000, &row), + }, + { + "Cannot find the named column", + func() error { + return row.ColumnByName("string", &struct{}{}) + }, + errColNotFound("string"), + }, + { + "Not enough arguments to call row.Columns()", + func() error { + return row.Columns(nil, nil) + }, + errNumOfColValue(2, &row), + }, + { + "Call ColumnByName on row with duplicated column names", + func() error { + var s string + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ColumnByName("Val", &s) + }, + errDupColName("Val"), + }, + { + "Call ToStruct on row with duplicated column names", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ToStruct(s) + }, + errDupSpannerField("Val", &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + }), + }, + { + "Call ToStruct on a row with unnamed field", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"", stringType()}, + }, + []*proto3.Value{stringProto("value1")}, + } + return r.ToStruct(s) + }, + errUnnamedField(&sppb.StructType{Fields: []*sppb.StructType_Field{{"", stringType()}}}, 0), + }, + } { + if gotErr := test.f(); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.f() returns error %v, want %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding the row with row.ToStruct into an invalid destination. +func TestToStructInvalidDst(t *testing.T) { + for _, test := range []struct { + desc string + dst interface{} + wantErr error + }{ + { + "Decode row as STRUCT into int32", + proto.Int(1), + errToStructArgType(proto.Int(1)), + }, + { + "Decode row as STRUCT to nil Go struct", + (*struct{})(nil), + errNilDst((*struct{})(nil)), + }, + { + "Decode row as STRUCT to Go struct with duplicated fields for the PK column", + &struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with no field for the PK column", + &struct { + PK1 string `spanner:"_STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"_STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with wrong type for the PK column", + &struct { + PK1 int64 `spanner:"STRING"` + }{}, + errDecodeStructField(&sppb.StructType{Fields: row.fields}, "STRING", + errTypeMismatch(sppb.TypeCode_STRING, false, proto.Int64(0))), + }, + } { + if gotErr := row.ToStruct(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: decoding:\ngot %v\nwant %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding a broken row. +func TestBrokenRow(t *testing.T) { + for i, test := range []struct { + row *Row + dst interface{} + wantErr error + }{ + { + // A row with no field. + &Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errFieldsMismatchVals(&Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }), + }, + { + // A row with nil field. + &Row{ + []*sppb.StructType_Field{nil}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errNilColType(0), + }, + { + // Field is not nil, but its type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + nil, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilSpannerType()), + }, + { + // Field is not nil, field type is not nil, but it is an array and its array element type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + }, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilArrElemType(&sppb.Type{Code: sppb.TypeCode_ARRAY})), + }, + { + // Field specifies valid type, value is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{nil}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errNilSrc()), + }, + { + // Field specifies INT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies INT64 type, but value is for Number type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "String")), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + proto.Int64(0), + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + &NullInt64{}, + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies STRING type, but value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies STRING type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{listProto(stringProto("value"))}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(listProto(stringProto("value")), "String")), + }, + { + // Field specifies FLOAT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_NumberValue)(nil)}}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_NumberValue)(nil)}, "Number")), + }, + { + // Field specifies FLOAT64 type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(boolProto(true), "Number")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + &NullFloat64{}, + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + proto.Float64(0), + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies BYTES type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies BYTES type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies BYTES type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{stringProto("&&")}, + }, + &[]byte{}, + errDecodeColumn(0, errBadEncoding(stringProto("&&"), func() error { + _, err := base64.StdEncoding.DecodeString("&&") + return err + }())), + }, + { + // Field specifies BOOL type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_BoolValue)(nil)}}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_BoolValue)(nil)}, "Bool")), + }, + { + // Field specifies BOOL type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{stringProto("false")}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(stringProto("false"), "Bool")), + }, + { + // Field specifies TIMESTAMP type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies TIMESTAMP type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies TIMESTAMP type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := time.Parse(time.RFC3339Nano, "junk") + return err + }())), + }, + { + // Field specifies DATE type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies DATE type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies DATE type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := civil.ParseDate("junk") + return err + }())), + }, + + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errNilListValue("INT64")), + }, + { + // Field specifies ARRAY type, but value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "INT64", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilListValue("STRING")), + }, + { + // Field specifies ARRAY type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(boolProto(true), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullString{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "STRING", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errNilListValue("FLOAT64")), + }, + { + // Field specifies ARRAY type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{stringProto("value")}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(stringProto("value"), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "FLOAT64", errSrcVal(boolProto(true), "Number"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[][]byte{}, + errDecodeColumn(0, errNilListValue("BYTES")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[][]byte{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BYTES", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errNilListValue("BOOL")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullBool{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BOOL", errSrcVal(floatProto(1.0), "Bool"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errNilListValue("TIMESTAMP")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullTime{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "TIMESTAMP", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errNilListValue("DATE")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullDate{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "DATE", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNotStructElement(0, bytesProto([]byte("value")))), + }, + { + // Field specifies ARRAY type, value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, bytesProto([]byte("value")), + "STRUCT", errSrcVal(bytesProto([]byte("value")), "List"))), + }, + { + // Field specifies ARRAY, but is having nil StructType. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + &sppb.Type{Code: sppb.TypeCode_STRUCT}, + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), floatProto(2.0), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, listProto(intProto(1), floatProto(2.0), stringProto("3")), + "STRUCT", errNilSpannerStructType())), + }, + { + // Field specifies ARRAY, but the second struct value is for BOOL type instead of FLOAT64. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), boolProto(true), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn( + 0, + errDecodeArrayElement( + 0, listProto(intProto(1), boolProto(true), stringProto("3")), "STRUCT", + errDecodeStructField( + &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + }, + "Col2", + errSrcVal(boolProto(true), "Number"), + ), + ), + ), + }, + } { + if gotErr := test.row.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Column(0) got error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.row.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.ColumnByName(%q) got error %v, want %v", i, "Col0", gotErr, test.wantErr) + } + if gotErr := test.row.Columns(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Columns(%T) got error %v, want %v", i, test.dst, gotErr, test.wantErr) + } + } +} + +// Test Row.ToStruct(). +func TestToStruct(t *testing.T) { + s := []struct { + // STRING / STRING ARRAY + PrimaryKey string `spanner:"STRING"` + NullString NullString `spanner:"NULL_STRING"` + StringArray []NullString `spanner:"STRING_ARRAY"` + NullStringArray []NullString `spanner:"NULL_STRING_ARRAY"` + // BYTES / BYTES ARRAY + Bytes []byte `spanner:"BYTES"` + NullBytes []byte `spanner:"NULL_BYTES"` + BytesArray [][]byte `spanner:"BYTES_ARRAY"` + NullBytesArray [][]byte `spanner:"NULL_BYTES_ARRAY"` + // INT64 / INT64 ARRAY + Int64 int64 `spanner:"INT64"` + NullInt64 NullInt64 `spanner:"NULL_INT64"` + Int64Array []NullInt64 `spanner:"INT64_ARRAY"` + NullInt64Array []NullInt64 `spanner:"NULL_INT64_ARRAY"` + // BOOL / BOOL ARRAY + Bool bool `spanner:"BOOL"` + NullBool NullBool `spanner:"NULL_BOOL"` + BoolArray []NullBool `spanner:"BOOL_ARRAY"` + NullBoolArray []NullBool `spanner:"NULL_BOOL_ARRAY"` + // FLOAT64 / FLOAT64 ARRAY + Float64 float64 `spanner:"FLOAT64"` + NullFloat64 NullFloat64 `spanner:"NULL_FLOAT64"` + Float64Array []NullFloat64 `spanner:"FLOAT64_ARRAY"` + NullFloat64Array []NullFloat64 `spanner:"NULL_FLOAT64_ARRAY"` + // TIMESTAMP / TIMESTAMP ARRAY + Timestamp time.Time `spanner:"TIMESTAMP"` + NullTimestamp NullTime `spanner:"NULL_TIMESTAMP"` + TimestampArray []NullTime `spanner:"TIMESTAMP_ARRAY"` + NullTimestampArray []NullTime `spanner:"NULL_TIMESTAMP_ARRAY"` + // DATE / DATE ARRAY + Date civil.Date `spanner:"DATE"` + NullDate NullDate `spanner:"NULL_DATE"` + DateArray []NullDate `spanner:"DATE_ARRAY"` + NullDateArray []NullDate `spanner:"NULL_DATE_ARRAY"` + + // STRUCT ARRAY + StructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"STRUCT_ARRAY"` + NullStructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"NULL_STRUCT_ARRAY"` + }{ + {}, // got + { + // STRING / STRING ARRAY + "value", + NullString{}, + []NullString{{"value1", true}, {}, {"value3", true}}, + []NullString(nil), + // BYTES / BYTES ARRAY + []byte("value"), + []byte(nil), + [][]byte{[]byte("value1"), nil, []byte("value3")}, + [][]byte(nil), + // INT64 / INT64 ARRAY + int64(17), + NullInt64{}, + []NullInt64{{int64(1), true}, {int64(2), true}, {}}, + []NullInt64(nil), + // BOOL / BOOL ARRAY + true, + NullBool{}, + []NullBool{{}, {true, true}, {false, true}}, + []NullBool(nil), + // FLOAT64 / FLOAT64 ARRAY + 1.7, + NullFloat64{}, + []NullFloat64{{}, {}, {1.7, true}}, + []NullFloat64(nil), + // TIMESTAMP / TIMESTAMP ARRAY + tm, + NullTime{}, + []NullTime{{}, {tm, true}}, + []NullTime(nil), + // DATE / DATE ARRAY + dt, + NullDate{}, + []NullDate{{}, {dt, true}}, + []NullDate(nil), + // STRUCT ARRAY + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }{ + nil, + &struct { + Col1 int64 + Col2 float64 + Col3 string + }{3, 33.3, "three"}, + nil, + }, + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }(nil), + }, // want + } + err := row.ToStruct(&s[0]) + if err != nil { + t.Errorf("row.ToStruct() returns error: %v, want nil", err) + } + if !reflect.DeepEqual(s[0], s[1]) { + t.Errorf("row.ToStruct() fetches struct %v, want %v", s[0], s[1]) + } +} + +// Test helpers for getting column names. +func TestColumnNameAndIndex(t *testing.T) { + // Test Row.Size(). + if rs := row.Size(); rs != len(row.fields) { + t.Errorf("row.Size() returns %v, want %v", rs, len(row.fields)) + } + // Test Row.Size() on empty Row. + if rs := (&Row{}).Size(); rs != 0 { + t.Errorf("empty_row.Size() returns %v, want %v", rs, 0) + } + // Test Row.ColumnName() + for i, col := range row.fields { + if cn := row.ColumnName(i); cn != col.Name { + t.Errorf("row.ColumnName(%v) returns %q, want %q", i, cn, col.Name) + } + goti, err := row.ColumnIndex(col.Name) + if err != nil { + t.Errorf("ColumnIndex(%q) error %v", col.Name, err) + continue + } + if goti != i { + t.Errorf("ColumnIndex(%q) = %d, want %d", col.Name, goti, i) + } + } + // Test Row.ColumnName on empty Row. + if cn := (&Row{}).ColumnName(0); cn != "" { + t.Errorf("empty_row.ColumnName(%v) returns %q, want %q", 0, cn, "") + } + // Test Row.ColumnIndex on empty Row. + if _, err := (&Row{}).ColumnIndex(""); err == nil { + t.Error("empty_row.ColumnIndex returns nil, want error") + } +} + +func TestNewRow(t *testing.T) { + for _, test := range []struct { + names []string + values []interface{} + want *Row + wantErr error + }{ + { + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{}, + values: []interface{}{}, + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{"a", "b"}, + values: []interface{}{}, + want: nil, + wantErr: errNamesValuesMismatch([]string{"a", "b"}, []interface{}{}), + }, + { + names: []string{"a", "b", "c"}, + values: []interface{}{5, "abc", GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}}, + want: &Row{ + []*sppb.StructType_Field{ + {"a", intType()}, + {"b", stringType()}, + {"c", listType(intType())}, + }, + []*proto3.Value{ + intProto(5), + stringProto("abc"), + listProto(intProto(91), nullProto(), intProto(87)), + }, + }, + }, + } { + got, err := NewRow(test.names, test.values) + if !reflect.DeepEqual(err, test.wantErr) { + t.Errorf("NewRow(%v,%v).err = %s, want %s", test.names, test.values, err, test.wantErr) + continue + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("NewRow(%v,%v) = %s, want %s", test.names, test.values, got, test.want) + continue + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/session.go b/vendor/cloud.google.com/go/spanner/session.go new file mode 100644 index 000000000..5ab1386c5 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session.go @@ -0,0 +1,965 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "container/heap" + "container/list" + "fmt" + "math/rand" + "strings" + "sync" + "time" + + log "github.com/golang/glog" + "golang.org/x/net/context" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// sessionHandle is an interface for transactions to access Cloud Spanner sessions safely. It is generated by sessionPool.take(). +type sessionHandle struct { + // mu guarantees that inner session object is returned / destroyed only once. + mu sync.Mutex + // session is a pointer to a session object. Transactions never need to access it directly. + session *session +} + +// recycle gives the inner session object back to its home session pool. It is safe to call recycle multiple times but only the first one would take effect. +func (sh *sessionHandle) recycle() { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled. + return + } + sh.session.recycle() + sh.session = nil +} + +// getID gets the Cloud Spanner session ID from the internal session object. getID returns empty string if the sessionHandle is nil or the inner session +// object has been released by recycle / destroy. +func (sh *sessionHandle) getID() string { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled/destroyed. + return "" + } + return sh.session.getID() +} + +// getClient gets the Cloud Spanner RPC client associated with the session ID in sessionHandle. +func (sh *sessionHandle) getClient() sppb.SpannerClient { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.client +} + +// getMetadata returns the metadata associated with the session in sessionHandle. +func (sh *sessionHandle) getMetadata() metadata.MD { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.md +} + +// getTransactionID returns the transaction id in the session if available. +func (sh *sessionHandle) getTransactionID() transactionID { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.tx +} + +// destroy destroys the inner session object. It is safe to call destroy multiple times and only the first call would attempt to +// destroy the inner session object. +func (sh *sessionHandle) destroy() { + sh.mu.Lock() + s := sh.session + sh.session = nil + sh.mu.Unlock() + if s == nil { + // sessionHandle has already been destroyed. + return + } + s.destroy(false) +} + +// session wraps a Cloud Spanner session ID through which transactions are created and executed. +type session struct { + // client is the RPC channel to Cloud Spanner. It is set only once during session's creation. + client sppb.SpannerClient + // id is the unique id of the session in Cloud Spanner. It is set only once during session's creation. + id string + // pool is the session's home session pool where it was created. It is set only once during session's creation. + pool *sessionPool + // createTime is the timestamp of the session's creation. It is set only once during session's creation. + createTime time.Time + + // mu protects the following fields from concurrent access: both healthcheck workers and transactions can modify them. + mu sync.Mutex + // valid marks the validity of a session. + valid bool + // hcIndex is the index of the session inside the global healthcheck queue. If hcIndex < 0, session has been unregistered from the queue. + hcIndex int + // idleList is the linkedlist node which links the session to its home session pool's idle list. If idleList == nil, the + // session is not in idle list. + idleList *list.Element + // nextCheck is the timestamp of next scheduled healthcheck of the session. It is maintained by the global health checker. + nextCheck time.Time + // checkingHelath is true if currently this session is being processed by health checker. Must be modified under health checker lock. + checkingHealth bool + // md is the Metadata to be sent with each request. + md metadata.MD + // tx contains the transaction id if the session has been prepared for write. + tx transactionID +} + +// isValid returns true if the session is still valid for use. +func (s *session) isValid() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.valid +} + +// isWritePrepared returns true if the session is prepared for write. +func (s *session) isWritePrepared() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.tx != nil +} + +// String implements fmt.Stringer for session. +func (s *session) String() string { + s.mu.Lock() + defer s.mu.Unlock() + return fmt.Sprintf("", + s.id, s.hcIndex, s.idleList, s.valid, s.createTime, s.nextCheck) +} + +// ping verifies if the session is still alive in Cloud Spanner. +func (s *session) ping() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + return runRetryable(ctx, func(ctx context.Context) error { + _, err := s.client.GetSession(contextWithMetadata(ctx, s.pool.md), &sppb.GetSessionRequest{Name: s.getID()}) // s.getID is safe even when s is invalid. + return err + }) +} + +// refreshIdle refreshes the session's session ID if it is in its home session pool's idle list +// and returns true if successful. +func (s *session) refreshIdle() bool { + s.mu.Lock() + validAndIdle := s.valid && s.idleList != nil + s.mu.Unlock() + if !validAndIdle { + // Optimization: return early if s is not valid or if s is not in idle list. + return false + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + var sid string + err := runRetryable(ctx, func(ctx context.Context) error { + session, e := s.client.CreateSession(contextWithMetadata(ctx, s.pool.md), &sppb.CreateSessionRequest{Database: s.pool.db}) + if e != nil { + return e + } + sid = session.Name + return nil + }) + if err != nil { + return false + } + s.pool.mu.Lock() + s.mu.Lock() + var recycle bool + if s.valid && s.idleList != nil { + // session is in idle list, refresh its session id. + sid, s.id = s.id, sid + if s.tx != nil { + s.tx = nil + s.pool.idleWriteList.Remove(s.idleList) + // We need to put this session back into the pool. + recycle = true + } + } + s.mu.Unlock() + s.pool.mu.Unlock() + if recycle { + s.pool.recycle(s) + } + // If we fail to explicitly destroy the session, it will be eventually garbage collected by + // Cloud Spanner. + if err = runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(contextWithMetadata(ctx, s.pool.md), &sppb.DeleteSessionRequest{Name: sid}) + return e + }); err != nil { + return false + } + return true +} + +// setHcIndex atomically sets the session's index in the healthcheck queue and returns the old index. +func (s *session) setHcIndex(i int) int { + s.mu.Lock() + defer s.mu.Unlock() + oi := s.hcIndex + s.hcIndex = i + return oi +} + +// setIdleList atomically sets the session's idle list link and returns the old link. +func (s *session) setIdleList(le *list.Element) *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + old := s.idleList + s.idleList = le + return old +} + +// invalidate marks a session as invalid and returns the old validity. +func (s *session) invalidate() bool { + s.mu.Lock() + defer s.mu.Unlock() + ov := s.valid + s.valid = false + return ov +} + +// setNextCheck sets the timestamp for next healthcheck on the session. +func (s *session) setNextCheck(t time.Time) { + s.mu.Lock() + defer s.mu.Unlock() + s.nextCheck = t +} + +// setTransactionID sets the transaction id in the session +func (s *session) setTransactionID(tx transactionID) { + s.mu.Lock() + defer s.mu.Unlock() + s.tx = tx +} + +// getID returns the session ID which uniquely identifies the session in Cloud Spanner. +func (s *session) getID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.id +} + +// getHcIndex returns the session's index into the global healthcheck priority queue. +func (s *session) getHcIndex() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.hcIndex +} + +// getIdleList returns the session's link in its home session pool's idle list. +func (s *session) getIdleList() *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + return s.idleList +} + +// getNextCheck returns the timestamp for next healthcheck on the session. +func (s *session) getNextCheck() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.nextCheck +} + +// recycle turns the session back to its home session pool. +func (s *session) recycle() { + s.setTransactionID(nil) + if !s.pool.recycle(s) { + // s is rejected by its home session pool because it expired and the session pool is currently having enough number of open sessions. + s.destroy(false) + } +} + +// destroy removes the session from its home session pool, healthcheck queue and Cloud Spanner service. +func (s *session) destroy(isExpire bool) bool { + // Remove s from session pool. + if !s.pool.remove(s, isExpire) { + return false + } + // Unregister s from healthcheck queue. + s.pool.hc.unregister(s) + // Remove s from Cloud Spanner service. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + // Ignore the error returned by runRetryable because even if we fail to explicitly destroy the session, + // it will be eventually garbage collected by Cloud Spanner. + runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) + return e + }) + return true +} + +// prepareForWrite prepares the session for write if it is not already in that state. +func (s *session) prepareForWrite(ctx context.Context) error { + if s.isWritePrepared() { + return nil + } + tx, err := beginTransaction(ctx, s.getID(), s.client) + if err != nil { + return err + } + s.setTransactionID(tx) + return nil +} + +// SessionPoolConfig stores configurations of a session pool. +type SessionPoolConfig struct { + // getRPCClient is the caller supplied method for getting a gRPC client to Cloud Spanner, this makes session pool able to use client pooling. + getRPCClient func() (sppb.SpannerClient, error) + // MaxOpened is the maximum number of opened sessions that is allowed by the + // session pool, zero means unlimited. + MaxOpened uint64 + // MinOpened is the minimum number of opened sessions that the session pool + // tries to maintain. Session pool won't continue to expire sessions if number + // of opened connections drops below MinOpened. However, if session is found + // to be broken, it will still be evicted from session pool, therefore it is + // posssible that the number of opened sessions drops below MinOpened. + MinOpened uint64 + // MaxSessionAge is the maximum duration that a session can be reused, zero + // means session pool will never expire sessions. + MaxSessionAge time.Duration + // MaxBurst is the maximum number of concurrent session creation requests, + MaxBurst uint64 + // WriteSessions is the fraction of sessions we try to keep prepared for write. + WriteSessions float64 + // HealthCheckWorkers is number of workers used by health checker for this pool. + HealthCheckWorkers int + // HealthCheckInterval is how often the health checker pings a session. + HealthCheckInterval time.Duration +} + +// errNoRPCGetter returns error for SessionPoolConfig missing getRPCClient method. +func errNoRPCGetter() error { + return spannerErrorf(codes.InvalidArgument, "require SessionPoolConfig.getRPCClient != nil, got nil") +} + +// errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set. +func errMinOpenedGTMaxOpened(spc *SessionPoolConfig) error { + return spannerErrorf(codes.InvalidArgument, + "require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %v and %v", spc.MaxOpened, spc.MinOpened) +} + +// validate verifies that the SessionPoolConfig is good for use. +func (spc *SessionPoolConfig) validate() error { + if spc.getRPCClient == nil { + return errNoRPCGetter() + } + if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 { + return errMinOpenedGTMaxOpened(spc) + } + return nil +} + +// sessionPool creates and caches Cloud Spanner sessions. +type sessionPool struct { + // mu protects sessionPool from concurrent access. + mu sync.Mutex + // valid marks the validity of the session pool. + valid bool + // db is the database name that all sessions in the pool are associated with. + db string + // idleList caches idle session IDs. Session IDs in this list can be allocated for use. + idleList list.List + // idleWriteList caches idle sessions which have been prepared for write. + idleWriteList list.List + // mayGetSession is for broadcasting that session retrival/creation may proceed. + mayGetSession chan struct{} + // numOpened is the total number of open sessions from the session pool. + numOpened uint64 + // createReqs is the number of ongoing session creation requests. + createReqs uint64 + // prepareReqs is the number of ongoing session preparation request. + prepareReqs uint64 + // configuration of the session pool. + SessionPoolConfig + // Metadata to be sent with each request + md metadata.MD + // hc is the health checker + hc *healthChecker +} + +// newSessionPool creates a new session pool. +func newSessionPool(db string, config SessionPoolConfig, md metadata.MD) (*sessionPool, error) { + if err := config.validate(); err != nil { + return nil, err + } + pool := &sessionPool{ + db: db, + valid: true, + mayGetSession: make(chan struct{}), + SessionPoolConfig: config, + md: md, + } + if config.HealthCheckWorkers == 0 { + // With 10 workers and assuming average latency of 5 ms for BeginTransaction, we will be able to + // prepare 2000 tx/sec in advance. If the rate of takeWriteSession is more than that, it will + // degrade to doing BeginTransaction inline. + // TODO: consider resizing the worker pool dynamically according to the load. + config.HealthCheckWorkers = 10 + } + if config.HealthCheckInterval == 0 { + config.HealthCheckInterval = 5 * time.Minute + } + // On GCE VM, within the same region an healthcheck ping takes on average 10ms to finish, given a 5 minutes interval and + // 10 healthcheck workers, a healthChecker can effectively mantain 100 checks_per_worker/sec * 10 workers * 300 seconds = 300K sessions. + pool.hc = newHealthChecker(config.HealthCheckInterval, config.HealthCheckWorkers, pool) + return pool, nil +} + +// isValid checks if the session pool is still valid. +func (p *sessionPool) isValid() bool { + if p == nil { + return false + } + p.mu.Lock() + defer p.mu.Unlock() + return p.valid +} + +// close marks the session pool as closed. +func (p *sessionPool) close() { + if p == nil { + return + } + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return + } + p.valid = false + p.mu.Unlock() + p.hc.close() + // destroy all the sessions + p.hc.mu.Lock() + allSessions := make([]*session, len(p.hc.queue.sessions)) + copy(allSessions, p.hc.queue.sessions) + p.hc.mu.Unlock() + for _, s := range allSessions { + s.destroy(false) + } +} + +// errInvalidSessionPool returns error for using an invalid session pool. +func errInvalidSessionPool() error { + return spannerErrorf(codes.InvalidArgument, "invalid session pool") +} + +// errGetSessionTimeout returns error for context timeout during sessionPool.take(). +func errGetSessionTimeout() error { + return spannerErrorf(codes.Canceled, "timeout / context canceled during getting session") +} + +// shouldPrepareWrite returns true if we should prepare more sessions for write. +func (p *sessionPool) shouldPrepareWrite() bool { + return float64(p.numOpened)*p.WriteSessions > float64(p.idleWriteList.Len()+int(p.prepareReqs)) +} + +func (p *sessionPool) createSession(ctx context.Context) (*session, error) { + doneCreate := func(done bool) { + p.mu.Lock() + if !done { + // Session creation failed, give budget back. + p.numOpened-- + } + p.createReqs-- + // Notify other waiters blocking on session creation. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + p.mu.Unlock() + } + sc, err := p.getRPCClient() + if err != nil { + doneCreate(false) + return nil, err + } + var s *session + err = runRetryable(ctx, func(ctx context.Context) error { + sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: p.db}) + if e != nil { + return e + } + // If no error, construct the new session. + s = &session{valid: true, client: sc, id: sid.Name, pool: p, createTime: time.Now(), md: p.md} + p.hc.register(s) + return nil + }) + if err != nil { + doneCreate(false) + // Should return error directly because of the previous retries on CreateSession RPC. + return nil, err + } + doneCreate(true) + return s, nil +} + +func (p *sessionPool) isHealthy(s *session) bool { + if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) { + // TODO: figure out if we need to schedule a new healthcheck worker here. + if err := s.ping(); shouldDropSession(err) { + // The session is already bad, continue to fetch/create a new one. + s.destroy(false) + return false + } + p.hc.scheduledHC(s) + } + return true +} + +// take returns a cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned by take should be used for read operations. +func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) { + ctx = contextWithMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleList.Remove(p.idleList.Front()).(*session) + } else if p.idleWriteList.Len() > 0 { + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + return &sessionHandle{session: s}, nil + } + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + select { + case <-ctx.Done(): + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + return nil, toSpannerError(err) + } + return &sessionHandle{session: s}, nil + } +} + +// takeWriteSession returns a write prepared cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned should be used for read write transactions. +func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, error) { + ctx = contextWithMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleWriteList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + } else if p.idleList.Len() > 0 { + s = p.idleList.Remove(p.idleList.Front()).(*session) + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + if !s.isWritePrepared() { + if err = s.prepareForWrite(ctx); err != nil { + return nil, toSpannerError(err) + } + } + return &sessionHandle{session: s}, nil + } + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + select { + case <-ctx.Done(): + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + return nil, toSpannerError(err) + } + if err = s.prepareForWrite(ctx); err != nil { + return nil, toSpannerError(err) + } + return &sessionHandle{session: s}, nil + } +} + +// recycle puts session s back to the session pool's idle list, it returns true if the session pool successfully recycles session s. +func (p *sessionPool) recycle(s *session) bool { + p.mu.Lock() + defer p.mu.Unlock() + if !s.isValid() || !p.valid { + // Reject the session if session is invalid or pool itself is invalid. + return false + } + if p.MaxSessionAge != 0 && s.createTime.Add(p.MaxSessionAge).Before(time.Now()) && p.numOpened > p.MinOpened { + // session expires and number of opened sessions exceeds MinOpened, let the session destroy itself. + return false + } + // Hot sessions will be converging at the front of the list, cold sessions will be evicted by healthcheck workers. + if s.isWritePrepared() { + s.setIdleList(p.idleWriteList.PushFront(s)) + } else { + s.setIdleList(p.idleList.PushFront(s)) + } + // Broadcast that a session has been returned to idle list. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true +} + +// remove atomically removes session s from the session pool and invalidates s. +// If isExpire == true, the removal is triggered by session expiration and in such cases, only idle sessions can be removed. +func (p *sessionPool) remove(s *session, isExpire bool) bool { + p.mu.Lock() + defer p.mu.Unlock() + if isExpire && (p.numOpened <= p.MinOpened || s.getIdleList() == nil) { + // Don't expire session if the session is not in idle list (in use), or if number of open sessions is going below p.MinOpened. + return false + } + ol := s.setIdleList(nil) + // If the session is in the idlelist, remove it. + if ol != nil { + // Remove from whichever list it is in. + p.idleList.Remove(ol) + p.idleWriteList.Remove(ol) + } + if s.invalidate() { + // Decrease the number of opened sessions. + p.numOpened-- + // Broadcast that a session has been destroyed. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true + } + return false +} + +// hcHeap implements heap.Interface. It is used to create the priority queue for session healthchecks. +type hcHeap struct { + sessions []*session +} + +// Len impelemnts heap.Interface.Len. +func (h hcHeap) Len() int { + return len(h.sessions) +} + +// Less implements heap.Interface.Less. +func (h hcHeap) Less(i, j int) bool { + return h.sessions[i].getNextCheck().Before(h.sessions[j].getNextCheck()) +} + +// Swap implements heap.Interface.Swap. +func (h hcHeap) Swap(i, j int) { + h.sessions[i], h.sessions[j] = h.sessions[j], h.sessions[i] + h.sessions[i].setHcIndex(i) + h.sessions[j].setHcIndex(j) +} + +// Push implements heap.Interface.Push. +func (h *hcHeap) Push(s interface{}) { + ns := s.(*session) + ns.setHcIndex(len(h.sessions)) + h.sessions = append(h.sessions, ns) +} + +// Pop implements heap.Interface.Pop. +func (h *hcHeap) Pop() interface{} { + old := h.sessions + n := len(old) + s := old[n-1] + h.sessions = old[:n-1] + s.setHcIndex(-1) + return s +} + +// healthChecker performs periodical healthchecks on registered sessions. +type healthChecker struct { + // mu protects concurrent access to hcQueue. + mu sync.Mutex + // queue is the priority queue for session healthchecks. Sessions with lower nextCheck rank higher in the queue. + queue hcHeap + // interval is the average interval between two healthchecks on a session. + interval time.Duration + // workers is the number of concurrent healthcheck workers. + workers int + // waitWorkers waits for all healthcheck workers to exit + waitWorkers sync.WaitGroup + // pool is the underlying session pool. + pool *sessionPool + // closed marks if a healthChecker has been closed. + closed bool +} + +// newHealthChecker initializes new instance of healthChecker. +func newHealthChecker(interval time.Duration, workers int, pool *sessionPool) *healthChecker { + if workers <= 0 { + workers = 1 + } + hc := &healthChecker{ + interval: interval, + workers: workers, + pool: pool, + } + for i := 0; i < hc.workers; i++ { + hc.waitWorkers.Add(1) + go hc.worker(i) + } + return hc +} + +// close closes the healthChecker and waits for all healthcheck workers to exit. +func (hc *healthChecker) close() { + hc.mu.Lock() + hc.closed = true + hc.mu.Unlock() + hc.waitWorkers.Wait() +} + +// isClosing checks if a healthChecker is already closing. +func (hc *healthChecker) isClosing() bool { + hc.mu.Lock() + defer hc.mu.Unlock() + return hc.closed +} + +// getInterval gets the healthcheck interval. +func (hc *healthChecker) getInterval() time.Duration { + hc.mu.Lock() + defer hc.mu.Unlock() + return hc.interval +} + +// scheduledHCLocked schedules next healthcheck on session s with the assumption that hc.mu is being held. +func (hc *healthChecker) scheduledHCLocked(s *session) { + // The next healthcheck will be scheduled after [interval*0.5, interval*1.5) nanoseconds. + nsFromNow := rand.Int63n(int64(hc.interval)) + int64(hc.interval)/2 + s.setNextCheck(time.Now().Add(time.Duration(nsFromNow))) + if hi := s.getHcIndex(); hi != -1 { + // Session is still being tracked by healthcheck workers. + heap.Fix(&hc.queue, hi) + } +} + +// scheduledHC schedules next healthcheck on session s. It is safe to be called concurrently. +func (hc *healthChecker) scheduledHC(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) +} + +// register registers a session with healthChecker for periodical healthcheck. +func (hc *healthChecker) register(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) + heap.Push(&hc.queue, s) +} + +// unregister unregisters a session from healthcheck queue. +func (hc *healthChecker) unregister(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + oi := s.setHcIndex(-1) + if oi >= 0 { + heap.Remove(&hc.queue, oi) + } +} + +// markDone marks that health check for session has been performed. +func (hc *healthChecker) markDone(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + s.checkingHealth = false +} + +// healthCheck checks the health of the session and pings it if needed. +func (hc *healthChecker) healthCheck(s *session) { + defer hc.markDone(s) + if !s.pool.isValid() { + // Session pool is closed, perform a garbage collection. + s.destroy(false) + return + } + if s.pool.MaxSessionAge != 0 && s.createTime.Add(s.pool.MaxSessionAge).Before(time.Now()) { + // Session reaches its maximum age, retire it. Failing that try to refresh it. + if s.destroy(true) || !s.refreshIdle() { + return + } + } + if err := s.ping(); shouldDropSession(err) { + // Ping failed, destroy the session. + s.destroy(false) + } +} + +// worker performs the healthcheck on sessions in healthChecker's priority queue. +func (hc *healthChecker) worker(i int) { + if log.V(2) { + log.Info("Starting health check worker %v", i) + } + // Returns a session which we should ping to keep it alive. + getNextForPing := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.queue.Len() <= 0 { + // Queue is empty. + return nil + } + s := hc.queue.sessions[0] + if s.getNextCheck().After(time.Now()) && hc.pool.valid { + // All sessions have been checked recently. + return nil + } + hc.scheduledHCLocked(s) + if !s.checkingHealth { + s.checkingHealth = true + return s + } + return nil + } + + // Returns a session which we should prepare for write. + getNextForTx := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + if hc.pool.shouldPrepareWrite() { + if hc.pool.idleList.Len() > 0 && hc.pool.valid { + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.pool.idleList.Front().Value.(*session).checkingHealth { + return nil + } + session := hc.pool.idleList.Remove(hc.pool.idleList.Front()).(*session) + session.checkingHealth = true + hc.pool.prepareReqs++ + return session + } + } + return nil + } + + for { + if hc.isClosing() { + if log.V(2) { + log.Info("Closing health check worker %v", i) + } + // Exit when the pool has been closed and all sessions have been destroyed + // or when health checker has been closed. + hc.waitWorkers.Done() + return + } + ws := getNextForTx() + if ws != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + ws.prepareForWrite(contextWithMetadata(ctx, hc.pool.md)) + hc.pool.recycle(ws) + hc.pool.mu.Lock() + hc.pool.prepareReqs-- + hc.pool.mu.Unlock() + hc.markDone(ws) + } + rs := getNextForPing() + if rs == nil { + if ws == nil { + // No work to be done so sleep to avoid burning cpu + pause := int64(100 * time.Millisecond) + if pause > int64(hc.interval) { + pause = int64(hc.interval) + } + <-time.After(time.Duration(rand.Int63n(pause) + pause/2)) + } + continue + } + hc.healthCheck(rs) + } +} + +// shouldDropSession returns true if a particular error leads to the removal of a session +func shouldDropSession(err error) bool { + if err == nil { + return false + } + // If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller + // should not try to return the session back into the session pool. + // TODO: once gRPC can return auxilary error information, stop parsing the error message. + if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/session_test.go b/vendor/cloud.google.com/go/spanner/session_test.go new file mode 100644 index 000000000..7ad9bb563 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session_test.go @@ -0,0 +1,762 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "container/heap" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/spanner/internal/testutil" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// setup prepares test environment for regular session pool tests. +func setup(t *testing.T, spc SessionPoolConfig) (sp *sessionPool, sc *testutil.MockCloudSpannerClient, cancel func()) { + sc = testutil.NewMockCloudSpannerClient(t) + spc.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + spc.HealthCheckInterval = 50 * time.Millisecond + sp, err := newSessionPool("mockdb", spc, nil) + if err != nil { + t.Fatalf("cannot create session pool: %v", err) + } + cancel = func() { + sp.close() + } + return +} + +// TestSessionCreation tests session creation during sessionPool.Take(). +func TestSessionCreation(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Take three sessions from session pool, this should trigger session pool to create three new sessions. + shs := make([]*sessionHandle, 3) + // gotDs holds the unique sessions taken from session pool. + gotDs := map[string]bool{} + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + gotDs[shs[i].getID()] = true + } + if len(gotDs) != len(shs) { + t.Errorf("session pool created %v sessions, want %v", len(gotDs), len(shs)) + } + if wantDs := sc.DumpSessions(); !reflect.DeepEqual(gotDs, wantDs) { + t.Errorf("session pool creates sessions %v, want %v", gotDs, wantDs) + } + // Verify that created sessions are recorded correctly in session pool. + sp.mu.Lock() + if int(sp.numOpened) != len(shs) { + t.Errorf("session pool reports %v open sessions, want %v", sp.numOpened, len(shs)) + } + if sp.createReqs != 0 { + t.Errorf("session pool reports %v session create requests, want 0", int(sp.createReqs)) + } + sp.mu.Unlock() + // Verify that created sessions are tracked correctly by healthcheck queue. + hc := sp.hc + hc.mu.Lock() + if hc.queue.Len() != len(shs) { + t.Errorf("healthcheck queue length = %v, want %v", hc.queue.Len(), len(shs)) + } + for _, s := range hc.queue.sessions { + if !gotDs[s.getID()] { + t.Errorf("session %v is in healthcheck queue, but it is not created by session pool", s.getID()) + } + } + hc.mu.Unlock() +} + +// TestTakeFromIdleList tests taking sessions from session pool's idle list. +func TestTakeFromIdleList(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !reflect.DeepEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TesttakeWriteSessionFromIdleList tests taking write sessions from session pool's idle list. +func TestTakeWriteSessionFromIdleList(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + act := testutil.NewAction("Begin", nil) + acts := make([]testutil.Action, 20) + for i := 0; i < len(acts); i++ { + acts[i] = act + } + sc.SetActions(acts...) + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !reflect.DeepEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TestTakeFromIdleListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleListChecked(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + wantSid := sh.getID() + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestTakeFromIdleWriteListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleWriteListChecked(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + sc.MakeNice() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + wantSid := sh.getID() + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestMaxOpenedSessions tests max open sessions constraint. +func TestMaxOpenedSessions(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MaxOpened: 1}) + defer cancel() + sh1, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // Session request will timeout due to the max open sessions constraint. + sh2, gotErr := sp.take(ctx) + if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("the second session retrival returns error %v, want %v", gotErr, wantErr) + } + go func() { + <-time.After(time.Second) + // destroy the first session to allow the next session request to proceed. + sh1.destroy() + }() + // Now session request can be processed because the first session will be destroyed. + sh2, err = sp.take(context.Background()) + if err != nil { + t.Errorf("after the first session is destroyed, session retrival still returns error %v, want nil", err) + } + if !sh2.session.isValid() || sh2.getID() == "" { + t.Errorf("got invalid session: %v", sh2.session) + } +} + +// TestMinOpenedSessions tests min open session constraint. +func TestMinOpenedSessions(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + // Take ten sessions from session pool and recycle them. + var ss []*session + var shs []*sessionHandle + for i := 0; i < 10; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + ss = append(ss, sh.session) + shs = append(shs, sh) + sh.recycle() + } + for _, sh := range shs { + sh.recycle() + } + // Simulate session expiration. + for _, s := range ss { + s.destroy(true) + } + sp.mu.Lock() + defer sp.mu.Unlock() + // There should be still one session left in idle list due to the min open sessions constraint. + if sp.idleList.Len() != 1 { + t.Errorf("got %v sessions in idle list, want 1", sp.idleList.Len()) + } +} + +// TestMaxBurst tests max burst constraint. +func TestMaxBurst(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{MaxBurst: 1}) + defer cancel() + // Will cause session creation RPC to be retried forever. + sc.InjectError("CreateSession", grpc.Errorf(codes.Unavailable, "try later")) + // This session request will never finish until the injected error is cleared. + go sp.take(context.Background()) + // Poll for the execution of the first session request. + for { + sp.mu.Lock() + cr := sp.createReqs + sp.mu.Unlock() + if cr == 0 { + <-time.After(time.Second) + continue + } + // The first session request is being executed. + break + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + sh, gotErr := sp.take(ctx) + // Since MaxBurst == 1, the second session request should block. + if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("session retrival returns error %v, want %v", gotErr, wantErr) + } + // Let the first session request succeed. + sc.InjectError("CreateSession", nil) + // Now new session request can proceed because the first session request will eventually succeed. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("session retrival returns error %v, want nil", err) + } + if !sh.session.isValid() || sh.getID() == "" { + t.Errorf("got invalid session: %v", sh.session) + } +} + +// TestSessionrecycle tests recycling sessions. +func TestSessionRecycle(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MaxSessionAge: 100 * time.Millisecond, MinOpened: 1}) + // Healthcheck is explicitly turned off in this test because it might aggressively expire sessions in idle list. + sp.hc.close() + defer cancel() + var ss []*session + shs := make([]*sessionHandle, 2) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get the session %v: %v", i, err) + } + ss = append(ss, shs[i].session) + } + // recycle the first session immediately. + shs[0].recycle() + // Let the second session expire. + <-time.After(time.Second) + // recycle the second session. + shs[1].recycle() + // Now the first session should be still valid, but the second session should have been destroyed. + if !ss[0].isValid() { + t.Errorf("the first session (%v) is invalid, want it to be valid", ss[0]) + } + if ss[1].isValid() { + t.Errorf("the second session (%v) is valid, want it to be invalid", ss[1]) + } +} + +// TestSessionDestroy tests destroying sessions. +func TestSessionDestroy(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + s := sh.session + sh.recycle() + if d := s.destroy(true); d || !s.isValid() { + // Session should be remaining because of min open sessions constraint. + t.Errorf("session %v was destroyed in expiration mode, want it to stay alive", s) + } + if d := s.destroy(false); !d || s.isValid() { + // Session should be destroyed. + t.Errorf("failed to destroy session %s", s) + } +} + +// TestHcHeap tests heap operation on top of hcHeap. +func TestHcHeap(t *testing.T) { + in := []*session{ + &session{nextCheck: time.Unix(10, 0)}, + &session{nextCheck: time.Unix(0, 5)}, + &session{nextCheck: time.Unix(1, 8)}, + &session{nextCheck: time.Unix(11, 7)}, + &session{nextCheck: time.Unix(6, 3)}, + } + want := []*session{ + &session{nextCheck: time.Unix(1, 8), hcIndex: 0}, + &session{nextCheck: time.Unix(6, 3), hcIndex: 1}, + &session{nextCheck: time.Unix(8, 2), hcIndex: 2}, + &session{nextCheck: time.Unix(10, 0), hcIndex: 3}, + &session{nextCheck: time.Unix(11, 7), hcIndex: 4}, + } + hh := hcHeap{} + for _, s := range in { + heap.Push(&hh, s) + } + // Change top of the heap and do a adjustment. + hh.sessions[0].nextCheck = time.Unix(8, 2) + heap.Fix(&hh, 0) + for idx := 0; hh.Len() > 0; idx++ { + got := heap.Pop(&hh).(*session) + want[idx].hcIndex = -1 + if !reflect.DeepEqual(got, want[idx]) { + t.Errorf("%v: heap.Pop returns %v, want %v", idx, got, want[idx]) + } + } +} + +// TestHealthCheckScheduler tests if healthcheck workers can schedule and perform healthchecks properly. +func TestHealthCheckScheduler(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Create 50 sessions. + ss := []string{} + for i := 0; i < 50; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + ss = append(ss, sh.getID()) + } + // Sleep for 1s, allowing healthcheck workers to perform some session pings. + <-time.After(time.Second) + dp := sc.DumpPings() + gotPings := map[string]int64{} + for _, p := range dp { + gotPings[p]++ + } + for _, s := range ss { + // The average ping interval is 50ms. + want := int64(time.Second) / int64(50*time.Millisecond) + if got := gotPings[s]; got < want/2 || got > want+want/2 { + t.Errorf("got %v healthchecks on session %v, want it between (%v, %v)", got, s, want/2, want+want/2) + } + } +} + +// Tests that a fractions of sessions are prepared for write by health checker. +func TestWriteSessionsPrepared(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{WriteSessions: 0.5}) + sc.MakeNice() + defer cancel() + shs := make([]*sessionHandle, 10) + var err error + for i := 0; i < 10; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 10 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + // Sleep for 1s, allowing healthcheck workers to invoke begin transaction. + <-time.After(time.Second) + wshs := make([]*sessionHandle, 5) + for i := 0; i < 5; i++ { + wshs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + if wshs[i].getTransactionID() == nil { + t.Errorf("got nil transaction id from session pool") + } + } + for _, sh := range wshs { + sh.recycle() + } + <-time.After(time.Second) + // Now force creation of 10 more sessions. + shs = make([]*sessionHandle, 20) + for i := 0; i < 20; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 20 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + <-time.After(time.Second) + if sp.idleWriteList.Len() != 10 { + t.Errorf("Expect 10 write prepared session, got: %d", sp.idleWriteList.Len()) + } +} + +// TestTakeFromWriteQueue tests that sessionPool.take() returns write prepared sessions as well. +func TestTakeFromWriteQueue(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{MaxOpened: 1, WriteSessions: 1.0}) + sc.MakeNice() + defer cancel() + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() + <-time.After(time.Second) + // The session should now be in write queue but take should also return it. + if sp.idleWriteList.Len() == 0 { + t.Errorf("write queue unexpectedly empty") + } + if sp.idleList.Len() != 0 { + t.Errorf("read queue not empty") + } + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() +} + +// TestSessionHealthCheck tests healthchecking cases. +func TestSessionHealthCheck(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{MaxSessionAge: 2 * time.Second}) + defer cancel() + // Test pinging sessions. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + <-time.After(time.Second) + pings := sc.DumpPings() + if len(pings) == 0 || pings[0] != sh.getID() { + t.Errorf("healthchecker didn't send any ping to session %v", sh.getID()) + } + // Test expiring sessions. + s := sh.session + sh.recycle() + // Sleep enough long for session in idle list to expire. + <-time.After(2 * time.Second) + if s.isValid() { + t.Errorf("session(%v) is still alive, want it to expire", s) + } + // Test broken session detection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Wait for healthcheck workers to find the broken session and tear it down. + <-time.After(1 * time.Second) + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be dropped by healthcheck workers", s) + } + sc.InjectError("GetSession", nil) + // Test garbage collection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sp.close() + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be garbage collected", s) + } + // Test session id refresh. + // Recreate the session pool with min open sessions constraint. + sp, err = newSessionPool("mockdb", SessionPoolConfig{ + MaxSessionAge: time.Second, + MinOpened: 1, + getRPCClient: func() (sppb.SpannerClient, error) { + return sc, nil + }, + HealthCheckInterval: 50 * time.Millisecond, + }, nil) + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + oid := sh.getID() + s = sh.session + sh.recycle() + <-time.After(2 * time.Second) + nid := s.getID() + if nid == "" || nid == oid { + t.Errorf("healthcheck workers failed to refresh session: oid=%v, nid=%v", oid, nid) + } + if gotDs, wantDs := sc.DumpSessions(), (map[string]bool{nid: true}); !reflect.DeepEqual(gotDs, wantDs) { + t.Errorf("sessions in mockclient: %v, want %v", gotDs, wantDs) + } +} + +// TestStressSessionPool does stress test on session pool by the following concurrent operations: +// 1) Test worker gets a session from the pool. +// 2) Test worker turns a session back into the pool. +// 3) Test worker destroys a session got from the pool. +// 4) Healthcheck retires an old session from the pool's idlelist by refreshing its session id. +// 5) Healthcheck destroys a broken session (because a worker has already destroyed it). +// 6) Test worker closes the session pool. +// +// During the test, it is expected that all sessions that are taken from session pool remains valid and +// when all test workers and healthcheck workers exit, mockclient, session pool and healthchecker should be in consistent state. +func TestStressSessionPool(t *testing.T) { + // Use concurrent workers to test different session pool built from different configurations. + for ti, cfg := range []SessionPoolConfig{ + SessionPoolConfig{}, + SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 100}, + SessionPoolConfig{MaxBurst: 50}, + SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond, MinOpened: 10, MaxOpened: 200, MaxBurst: 5}, + SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond, MinOpened: 10, MaxOpened: 200, MaxBurst: 5, WriteSessions: 0.2}, + } { + var wg sync.WaitGroup + // Create a more aggressive session healthchecker to increase test concurrency. + cfg.HealthCheckInterval = 50 * time.Millisecond + cfg.HealthCheckWorkers = 50 + sc := testutil.NewMockCloudSpannerClient(t) + sc.MakeNice() + cfg.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + sp, _ := newSessionPool("mockdb", cfg, nil) + for i := 0; i < 100; i++ { + wg.Add(1) + // Schedule a test worker. + go func(idx int, pool *sessionPool, client sppb.SpannerClient) { + defer wg.Done() + // Test worker iterates 1K times and tries different session / session pool operations. + for j := 0; j < 1000; j++ { + if idx%10 == 0 && j >= 900 { + // Close the pool in selected set of workers during the middle of the test. + pool.close() + } + // Take a write sessions ~ 20% of the times. + takeWrite := rand.Intn(5) == 4 + var ( + sh *sessionHandle + gotErr error + ) + if takeWrite { + sh, gotErr = pool.takeWriteSession(context.Background()) + } else { + sh, gotErr = pool.take(context.Background()) + } + if gotErr != nil { + if pool.isValid() { + t.Errorf("%v.%v: pool.take returns error when pool is still valid: %v", ti, idx, gotErr) + } + if wantErr := errInvalidSessionPool(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("%v.%v: got error when pool is closed: %v, want %v", ti, idx, gotErr, wantErr) + } + continue + } + // Verify if session is valid when session pool is valid. Note that if session pool is invalid after sh is taken, + // then sh might be invalidated by healthcheck workers. + if (sh.getID() == "" || sh.session == nil || !sh.session.isValid()) && pool.isValid() { + t.Errorf("%v.%v.%v: pool.take returns invalid session %v", ti, idx, takeWrite, sh.session) + } + if takeWrite && sh.getTransactionID() == nil { + t.Errorf("%v.%v: pool.takeWriteSession returns session %v without transaction", ti, idx, sh.session) + } + if int64(cfg.MaxSessionAge) > 0 && rand.Intn(100) < idx { + // Random sleep before destroying/recycling the session, to give healthcheck worker a chance to step in. + <-time.After(time.Duration(rand.Int63n(int64(cfg.MaxSessionAge)))) + } + if rand.Intn(100) < idx { + // destroy the session. + sh.destroy() + continue + } + // recycle the session. + sh.recycle() + } + }(i, sp, sc) + } + wg.Wait() + sp.hc.close() + // Here the states of healthchecker, session pool and mockclient are stable. + idleSessions := map[string]bool{} + hcSessions := map[string]bool{} + mockSessions := sc.DumpSessions() + // Dump session pool's idle list. + for sl := sp.idleList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + for sl := sp.idleWriteList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle write list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + if int(sp.numOpened) != len(idleSessions) { + t.Errorf("%v: number of opened sessions (%v) != number of idle sessions (%v)", ti, sp.numOpened, len(idleSessions)) + } + if sp.createReqs != 0 { + t.Errorf("%v: number of pending session creations = %v, want 0", ti, sp.createReqs) + } + // Dump healthcheck queue. + for _, s := range sp.hc.queue.sessions { + if hcSessions[s.getID()] { + t.Errorf("%v: found duplicated session in healthcheck queue: %v", ti, s.getID()) + } + hcSessions[s.getID()] = true + } + // Verify that idleSessions == hcSessions == mockSessions. + if !reflect.DeepEqual(idleSessions, hcSessions) { + t.Errorf("%v: sessions in idle list (%v) != sessions in healthcheck queue (%v)", ti, idleSessions, hcSessions) + } + if !reflect.DeepEqual(hcSessions, mockSessions) { + t.Errorf("%v: sessions in healthcheck queue (%v) != sessions in mockclient (%v)", ti, hcSessions, mockSessions) + } + sp.close() + mockSessions = sc.DumpSessions() + if len(mockSessions) != 0 { + t.Errorf("Found live sessions: %v", mockSessions) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/spanner_test.go b/vendor/cloud.google.com/go/spanner/spanner_test.go new file mode 100644 index 000000000..e09a256f7 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/spanner_test.go @@ -0,0 +1,989 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" + database "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + + adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +var ( + // testProjectID specifies the project used for testing. + // It can be changed by setting environment variable GCLOUD_TESTS_GOLANG_PROJECT_ID. + testProjectID = testutil.ProjID() + // testInstanceID specifies the Cloud Spanner instance used for testing. + testInstanceID = "go-integration-test" + + // client is a spanner.Client. + client *Client + // admin is a spanner.DatabaseAdminClient. + admin *database.DatabaseAdminClient + // db is the path of the testing database. + db string + // dbName is the short name of the testing database. + dbName string +) + +// skipTest returns true if testProjectID is empty. +func skipTest(t *testing.T) bool { + if testProjectID == "" { + t.Logf("skipping because not all environment variables are provided: GCLOUD_TESTS_GOLANG_PROJECT_ID=%q", testProjectID) + return true + } + return false +} + +// prepare initializes Cloud Spanner testing DB and clients. +func prepare(ctx context.Context, t *testing.T) error { + var err error + ts := testutil.TokenSource(ctx, AdminScope, Scope) + if ts == nil { + t.Logf("cannot get service account credential from environment variable %v, skiping test", "GCLOUD_TESTS_GOLANG_KEY") + t.SkipNow() + } + // Create Admin client and Data client. + // TODO: Remove the EndPoint option once this is the default. + admin, err = database.NewDatabaseAdminClient(ctx, option.WithTokenSource(ts), option.WithEndpoint("spanner.googleapis.com:443")) + if err != nil { + t.Errorf("cannot create admin client: %v", err) + return err + } + // Construct test DB name. + dbName = fmt.Sprintf("gotest_%v", time.Now().UnixNano()) + db = fmt.Sprintf("projects/%v/instances/%v/databases/%v", testProjectID, testInstanceID, dbName) + // Create database and tables. + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + `CREATE INDEX SingerByName ON Singers(FirstName, LastName)`, + `CREATE TABLE Accounts ( + AccountId INT64 NOT NULL, + Nickname STRING(100), + Balance INT64 NOT NULL, + ) PRIMARY KEY (AccountId)`, + `CREATE INDEX AccountByNickname ON Accounts(Nickname) STORING (Balance)`, + `CREATE TABLE Types ( + RowID INT64 NOT NULL, + String STRING(MAX), + StringArray ARRAY, + Bytes BYTES(MAX), + BytesArray ARRAY, + Int64a INT64, + Int64Array ARRAY, + Bool BOOL, + BoolArray ARRAY, + Float64 FLOAT64, + Float64Array ARRAY, + Date DATE, + DateArray ARRAY, + Timestamp TIMESTAMP, + TimestampArray ARRAY, + ) PRIMARY KEY (RowID)`, + }, + }) + if err != nil { + t.Errorf("cannot create testing DB %v: %v", db, err) + return err + } + if _, err := op.Wait(ctx); err != nil { + t.Errorf("cannot create testing DB %v: %v", db, err) + return err + } + t.Logf("created database: %v", db) + client, err = NewClientWithConfig(ctx, db, ClientConfig{ + SessionPoolConfig: SessionPoolConfig{ + WriteSessions: 0.2, + }, + }, option.WithTokenSource(ts)) + if err != nil { + t.Errorf("cannot create data client on DB %v: %v", db, err) + return err + } + return nil +} + +// tearDown tears down the testing environment created by prepare(). +func tearDown(ctx context.Context, t *testing.T) { + if admin != nil { + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{db}); err != nil { + t.Logf("failed to drop testing database: %v, might need a manual removal", db) + } + t.Logf("dropped database: %v", db) + admin.Close() + } + if client != nil { + client.Close() + } + admin = nil + client = nil + db = "" +} + +// Test SingleUse transaction. +func TestSingleUse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + if skipTest(t) { + // Skip inegration test if not all flags are provided. + t.SkipNow() + } + // Set up testing environment. + if err := prepare(ctx, t); err != nil { + // If prepare() fails, tear down whatever that's already up. + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + // After all tests, tear down testing environment. + defer tearDown(ctx, t) + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + t.Logf("Timestamp of the %vth mutation: %v", i, writes[i].ts) + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + // writes[3] is the last write, all subsequent strong read should have a timestamp larger than that. + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // min_read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MinReadTimestamp(writes[3].ts), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // max_staleness + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MaxStaleness(time.Second), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // SingleUse.Query + su := client.Single().WithTimestampBound(test.tb) + got, err := readAll(su.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: SingleUse.Query returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Query: %v, want %v", i, got, test.want) + } + rts, err := su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Query doesn't return expected timestamp: %v", i, err) + } + // SingleUse.Read + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.Read(ctx, "Singers", Keys(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.Read returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Read: %v, want %v", i, got, test.want) + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Read doesn't return expected timestamp: %v", i, err) + } + // SingleUse.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + su = client.Single().WithTimestampBound(test.tb) + r, err := su.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected results from SingleUse.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.ReadUsingIndex(ctx, "Singers", "SingerByName", Keys(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: SingleUse.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if reflect.DeepEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + } +} + +// Test ReadOnlyTransaction. The testsuite is mostly like SingleUse, except it +// also tests for a single timestamp across multiple reads. +func TestReadOnlyTransaction(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + if skipTest(t) { + // Skip inegration test if not all flags are provided. + t.SkipNow() + } + // Set up testing environment. + if err := prepare(ctx, t); err != nil { + // If prepare() fails, tear down whatever that's already up. + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + // After all tests, tear down testing environment. + defer tearDown(ctx, t) + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + t.Logf("Timestamp of the %vth mutation: %v", i, writes[i].ts) + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + // Note: min_read_timestamp and max_staleness are not supported by ReadOnlyTransaction. See + // API document for more details. + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // ReadOnlyTransaction.Query + ro := client.ReadOnlyTransaction().WithTimestampBound(test.tb) + got, err := readAll(ro.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Query: %v, want %v", i, got, test.want) + } + rts, err := ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return expected timestamp: %v", i, err) + } + roTs := rts + // ReadOnlyTransaction.Read + got, err = readAll(ro.Read(ctx, "Singers", Keys(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Read: %v, want %v", i, got, test.want) + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + // ReadOnlyTransaction.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + r, err := ro.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected results from ReadOnlyTransaction.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + got, err = readAll(ro.ReadUsingIndex(ctx, "Singers", "SingerByName", Keys(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if reflect.DeepEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + ro.Close() + } +} + +// Test ReadWriteTransaction. +func TestReadWriteTransaction(t *testing.T) { + // Give a longer deadline because of transaction backoffs. + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + if skipTest(t) { + t.SkipNow() + } + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + + // Set up two accounts + accounts := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + wg := sync.WaitGroup{} + + readBalance := func(iter *RowIterator) (int64, error) { + defer iter.Stop() + var bal int64 + for { + row, err := iter.Next() + if err == iterator.Done { + return bal, nil + } + if err != nil { + return 0, err + } + if err := row.Column(0, &bal); err != nil { + return 0, err + } + } + } + + for i := 0; i < 20; i++ { + wg.Add(1) + go func(iter int) { + defer wg.Done() + _, err := client.ReadWriteTransaction(ctx, func(tx *ReadWriteTransaction) error { + t.Logf("executing %v-th transfer", iter) + // Query Foo's balance and Bar's balance. + bf, e := readBalance(tx.Query(ctx, + Statement{"SELECT Balance FROM Accounts WHERE AccountId = @id", map[string]interface{}{"id": int64(1)}})) + if e != nil { + return e + } + bb, e := readBalance(tx.Read(ctx, "Accounts", Keys(Key{int64(2)}), []string{"Balance"})) + if e != nil { + return e + } + t.Logf("\t %d: Foo's balance: %v, Bar's balance: %v", iter, bf, bb) + if bf <= 0 { + t.Logf("Foo's balance is already 0.") + return nil + } + bf-- + bb++ + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), bf}), + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), bb}), + }) + return nil + }) + if err != nil { + t.Fatalf("%d: failed to execute transaction: %v", iter, err) + } + }(i) + } + // Because of context timeout, all goroutines will eventually return. + wg.Wait() + _, err := client.ReadWriteTransaction(ctx, func(tx *ReadWriteTransaction) error { + var bf, bb int64 + r, e := tx.ReadRow(ctx, "Accounts", Key{int64(1)}, []string{"Balance"}) + if e != nil { + return e + } + if ce := r.Column(0, &bf); ce != nil { + return ce + } + bb, e = readBalance(tx.ReadUsingIndex(ctx, "Accounts", "AccountByNickname", Keys(Key{"Bar"}), []string{"Balance"})) + if e != nil { + return e + } + if bf != 30 || bb != 21 { + t.Errorf("Foo's balance is now %v and Bar's balance is now %v, want %v and %v", bf, bb, 30, 21) + } + return nil + }) + if err != nil { + t.Errorf("failed to check balances: %v", err) + } +} + +// Test client recovery on database recreation. +func TestDbRemovalRecovery(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + if skipTest(t) { + t.SkipNow() + } + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + + // Drop the testing database. + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{db}); err != nil { + t.Fatalf("failed to drop testing database %v: %v", db, err) + } + t.Logf("dropped database: %v", db) + + // Now, send the query. + iter := client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + _, err := iter.Next() + if err == nil { + t.Errorf("client sends query to removed database successfully, want it to fail") + } + t.Logf("observed the expected failure to send query to database %v: %v", db, err) + + // Recreate database and table. + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + }, + }) + if _, err := op.Wait(ctx); err != nil { + t.Errorf("cannot recreate testing DB %v: %v", db, err) + } + + // Now, send the query again. + iter = client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + _, err = iter.Next() + if err != nil && err != iterator.Done { + t.Fatalf("failed to send query to database %v: %v", db, err) + } + t.Logf("client session has recovered from database recreation") +} + +// Test encoding/decoding non-struct Cloud Spanner types. +func TestBasicTypes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if skipTest(t) { + t.SkipNow() + } + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + t1, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + // Boundaries + t2, _ := time.Parse(time.RFC3339Nano, "0001-01-01T00:00:00.000000000Z") + t3, _ := time.Parse(time.RFC3339Nano, "9999-12-31T23:59:59.999999999Z") + d1, _ := civil.ParseDate("2016-11-15") + // Boundaries + d2, _ := civil.ParseDate("0001-01-01") + d3, _ := civil.ParseDate("9999-12-31") + + tests := []struct { + col string + val interface{} + want interface{} + }{ + {col: "String", val: ""}, + {col: "String", val: "", want: NullString{"", true}}, + {col: "String", val: "foo"}, + {col: "String", val: "foo", want: NullString{"foo", true}}, + {col: "String", val: NullString{"bar", true}, want: "bar"}, + {col: "String", val: NullString{"bar", false}, want: NullString{"", false}}, + {col: "StringArray", val: []string(nil), want: []NullString(nil)}, + {col: "StringArray", val: []string{}, want: []NullString{}}, + {col: "StringArray", val: []string{"foo", "bar"}, want: []NullString{{"foo", true}, {"bar", true}}}, + {col: "StringArray", val: []NullString(nil)}, + {col: "StringArray", val: []NullString{}}, + {col: "StringArray", val: []NullString{{"foo", true}, {}}}, + {col: "Bytes", val: []byte{}}, + {col: "Bytes", val: []byte{1, 2, 3}}, + {col: "Bytes", val: []byte(nil)}, + {col: "BytesArray", val: [][]byte(nil)}, + {col: "BytesArray", val: [][]byte{}}, + {col: "BytesArray", val: [][]byte{[]byte{1}, []byte{2, 3}}}, + {col: "Int64a", val: 0, want: int64(0)}, + {col: "Int64a", val: -1, want: int64(-1)}, + {col: "Int64a", val: 2, want: int64(2)}, + {col: "Int64a", val: int64(3)}, + {col: "Int64a", val: 4, want: NullInt64{4, true}}, + {col: "Int64a", val: NullInt64{5, true}, want: int64(5)}, + {col: "Int64a", val: NullInt64{6, true}, want: int64(6)}, + {col: "Int64a", val: NullInt64{7, false}, want: NullInt64{0, false}}, + {col: "Int64Array", val: []int(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []int64(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int64{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int64{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []NullInt64(nil)}, + {col: "Int64Array", val: []NullInt64{}}, + {col: "Int64Array", val: []NullInt64{{1, true}, {}}}, + {col: "Bool", val: false}, + {col: "Bool", val: true}, + {col: "Bool", val: false, want: NullBool{false, true}}, + {col: "Bool", val: true, want: NullBool{true, true}}, + {col: "Bool", val: NullBool{true, true}}, + {col: "Bool", val: NullBool{false, false}}, + {col: "BoolArray", val: []bool(nil), want: []NullBool(nil)}, + {col: "BoolArray", val: []bool{}, want: []NullBool{}}, + {col: "BoolArray", val: []bool{true, false}, want: []NullBool{{true, true}, {false, true}}}, + {col: "BoolArray", val: []NullBool(nil)}, + {col: "BoolArray", val: []NullBool{}}, + {col: "BoolArray", val: []NullBool{{false, true}, {true, true}, {}}}, + {col: "Float64", val: 0.0}, + {col: "Float64", val: 3.14}, + {col: "Float64", val: math.NaN()}, + {col: "Float64", val: math.Inf(1)}, + {col: "Float64", val: math.Inf(-1)}, + {col: "Float64", val: 2.78, want: NullFloat64{2.78, true}}, + {col: "Float64", val: NullFloat64{2.71, true}, want: 2.71}, + {col: "Float64", val: NullFloat64{1.41, true}, want: NullFloat64{1.41, true}}, + {col: "Float64", val: NullFloat64{0, false}}, + {col: "Float64Array", val: []float64(nil), want: []NullFloat64(nil)}, + {col: "Float64Array", val: []float64{}, want: []NullFloat64{}}, + {col: "Float64Array", val: []float64{2.72, 3.14, math.Inf(1)}, want: []NullFloat64{{2.72, true}, {3.14, true}, {math.Inf(1), true}}}, + {col: "Float64Array", val: []NullFloat64(nil)}, + {col: "Float64Array", val: []NullFloat64{}}, + {col: "Float64Array", val: []NullFloat64{{2.72, true}, {math.Inf(1), true}, {}}}, + {col: "Date", val: d1}, + {col: "Date", val: d1, want: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}, want: d1}, + {col: "Date", val: NullDate{civil.Date{}, false}}, + {col: "DateArray", val: []civil.Date(nil), want: []NullDate(nil)}, + {col: "DateArray", val: []civil.Date{}, want: []NullDate{}}, + {col: "DateArray", val: []civil.Date{d1, d2, d3}, want: []NullDate{{d1, true}, {d2, true}, {d3, true}}}, + {col: "Timestamp", val: t1}, + {col: "Timestamp", val: t1, want: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}, want: t1}, + {col: "Timestamp", val: NullTime{}}, + {col: "TimestampArray", val: []time.Time(nil), want: []NullTime(nil)}, + {col: "TimestampArray", val: []time.Time{}, want: []NullTime{}}, + {col: "TimestampArray", val: []time.Time{t1, t2, t3}, want: []NullTime{{t1, true}, {t2, true}, {t3, true}}}, + } + + // Write rows into table first. + var muts []*Mutation + for i, test := range tests { + muts = append(muts, InsertOrUpdate("Types", []string{"RowID", test.col}, []interface{}{i, test.val})) + } + if _, err := client.Apply(ctx, muts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + for i, test := range tests { + row, err := client.Single().ReadRow(ctx, "Types", []interface{}{i}, []string{test.col}) + if err != nil { + t.Fatalf("Unable to fetch row %v: %v", i, err) + } + // Create new instance of type of test.want. + want := test.want + if want == nil { + want = test.val + } + gotp := reflect.New(reflect.TypeOf(want)) + if err := row.Column(0, gotp.Interface()); err != nil { + t.Errorf("%d: col:%v val:%#v, %v", i, test.col, test.val, err) + continue + } + got := reflect.Indirect(gotp).Interface() + + // One of the test cases is checking NaN handling. Given + // NaN!=NaN, we can't use reflect to test for it. + isNaN := func(t interface{}) bool { + f, ok := t.(float64) + if !ok { + return false + } + return math.IsNaN(f) + } + if isNaN(got) && isNaN(want) { + continue + } + + // Check non-NaN cases. + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: col:%v val:%#v, got %#v, want %#v", i, test.col, test.val, got, want) + continue + } + } +} + +// Test decoding Cloud Spanner STRUCT type. +func TestStructTypes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + if skipTest(t) { + t.SkipNow() + } + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + + tests := []struct { + q Statement + want func(r *Row) error + }{ + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1, 2))`}, + want: func(r *Row) error { + // Test STRUCT ARRAY decoding to []NullRow. + var rows []NullRow + if err := r.Column(0, &rows); err != nil { + return err + } + if len(rows) != 1 { + return fmt.Errorf("len(rows) = %d; want 1", len(rows)) + } + if !rows[0].Valid { + return fmt.Errorf("rows[0] is NULL") + } + var i, j int64 + if err := rows[0].Row.Columns(&i, &j); err != nil { + return err + } + if i != 1 || j != 2 { + return fmt.Errorf("got (%d,%d), want (1,2)", i, j) + } + return nil + }, + }, + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1 as foo, 2 as bar)) as col1`}, + want: func(r *Row) error { + // Test Row.ToStruct. + s := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{} + if err := r.ToStruct(&s); err != nil { + return err + } + want := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{ + Col1: []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + }{ + { + Foo: 1, + Bar: 2, + }, + }, + } + if !reflect.DeepEqual(want, s) { + return fmt.Errorf("unexpected decoding result: %v, want %v", s, want) + } + return nil + }, + }, + } + for i, test := range tests { + iter := client.Single().Query(ctx, test.q) + defer iter.Stop() + row, err := iter.Next() + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + if err := test.want(row); err != nil { + t.Errorf("%d: %v", i, err) + continue + } + } +} + +func rowToValues(r *Row) ([]interface{}, error) { + var x int64 + var y, z string + if err := r.Column(0, &x); err != nil { + return nil, err + } + if err := r.Column(1, &y); err != nil { + return nil, err + } + if err := r.Column(2, &z); err != nil { + return nil, err + } + return []interface{}{x, y, z}, nil +} + +func readAll(iter *RowIterator) ([][]interface{}, error) { + defer iter.Stop() + var vals [][]interface{} + for { + row, err := iter.Next() + if err == iterator.Done { + return vals, nil + } + if err != nil { + return nil, err + } + v, err := rowToValues(row) + if err != nil { + return nil, err + } + vals = append(vals, v) + } +} diff --git a/vendor/cloud.google.com/go/spanner/statement.go b/vendor/cloud.google.com/go/spanner/statement.go new file mode 100644 index 000000000..8e422b09c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement.go @@ -0,0 +1,78 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Statement is a SQL query with named parameters. +// +// A parameter placeholder consists of '@' followed by the parameter name. +// Parameter names consist of any combination of letters, numbers, and +// underscores. Names may be entirely numeric (e.g., "WHERE m.id = @5"). +// Parameters may appear anywhere that a literal value is expected. The same +// parameter name may be used more than once. It is an error to execute a +// statement with unbound parameters. On the other hand, it is allowable to +// bind parameter names that are not used. +// +// See the documentation of the Row type for how Go types are mapped to Cloud +// Spanner types. +type Statement struct { + SQL string + Params map[string]interface{} +} + +// NewStatement returns a Statement with the given SQL and an empty Params map. +func NewStatement(sql string) Statement { + return Statement{SQL: sql, Params: map[string]interface{}{}} +} + +// errBindParam returns error for not being able to bind parameter to query request. +func errBindParam(k string, v interface{}, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to bind query parameter(name: %q, value: %q), error = <%v>", k, v, err) + } + se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %q)", k, v)) + return se +} + +// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest. +func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error { + r.Params = &proto3.Struct{ + Fields: map[string]*proto3.Value{}, + } + r.ParamTypes = map[string]*sppb.Type{} + for k, v := range s.Params { + val, t, err := encodeValue(v) + if err != nil { + return errBindParam(k, v, err) + } + r.Params.Fields[k] = val + r.ParamTypes[k] = t + } + return nil +} diff --git a/vendor/cloud.google.com/go/spanner/statement_test.go b/vendor/cloud.google.com/go/spanner/statement_test.go new file mode 100644 index 000000000..a441e0e82 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Statement.bindParams. +func TestBindParams(t *testing.T) { + // Verify Statement.bindParams generates correct values and types. + want := sppb.ExecuteSqlRequest{ + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{ + "var1": stringProto("abc"), + "var2": intProto(1), + }, + }, + ParamTypes: map[string]*sppb.Type{ + "var1": stringType(), + "var2": intType(), + }, + } + st := Statement{ + SQL: "SELECT id from t_foo WHERE col1 = @var1 AND col2 = @var2", + Params: map[string]interface{}{"var1": "abc", "var2": int64(1)}, + } + got := sppb.ExecuteSqlRequest{} + if err := st.bindParams(&got); err != nil || !reflect.DeepEqual(got, want) { + t.Errorf("bind result: \n(%v, %v)\nwant\n(%v, %v)\n", got, err, want, nil) + } + // Verify type error reporting. + st.Params["var2"] = struct{}{} + wantErr := errBindParam("var2", struct{}{}, errEncoderUnsupportedType(struct{}{})) + if err := st.bindParams(&got); !reflect.DeepEqual(err, wantErr) { + t.Errorf("got unexpected error: %v, want: %v", err, wantErr) + } +} + +func TestNewStatement(t *testing.T) { + s := NewStatement("query") + if got, want := s.SQL, "query"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound.go b/vendor/cloud.google.com/go/spanner/timestampbound.go new file mode 100644 index 000000000..068d96600 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound.go @@ -0,0 +1,245 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// timestampBoundType specifies the timestamp bound mode. +type timestampBoundType int + +const ( + strong timestampBoundType = iota // strong reads + exactStaleness // read with exact staleness + maxStaleness // read with max staleness + minReadTimestamp // read with min freshness + readTimestamp // read data at exact timestamp +) + +// TimestampBound defines how Cloud Spanner will choose a timestamp for a single +// read/query or read-only transaction. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, stale +// read-only transactions can execute more quickly than strong or read-write +// transactions, because they are able to execute far from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. A TimestampBound +// can be specified when creating transactions, see the documentation of +// spanner.Client for an example. +// +// Strong reads +// +// Strong reads are guaranteed to see the effects of all transactions that have +// committed before the start of the read. Furthermore, all rows yielded by a +// single read are consistent with each other - if any part of the read +// observes a transaction, all parts of the read see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are concurrent +// writes. If consistency across reads is required, the reads should be +// executed within a transaction or at an exact read timestamp. +// +// Use StrongRead() to create a bound of this type. +// +// Exact staleness +// +// These timestamp bounds execute reads at a user-specified timestamp. Reads at +// a timestamp are guaranteed to see a consistent prefix of the global +// transaction history: they observe modifications done by all transactions +// with a commit timestamp less than or equal to the read timestamp, and +// observe none of the modifications done by transactions with a larger commit +// timestamp. They will block until all conflicting transactions that may be +// assigned commit timestamps less than or equal to the read timestamp have +// finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a timestamp. As a +// result, they execute slightly faster than the equivalent boundedly stale +// concurrency modes. On the other hand, boundedly stale reads usually return +// fresher results. +// +// Use ReadTimestamp() and ExactStaleness() to create a bound of this type. +// +// Bounded staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to +// a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within +// the staleness bound that allows execution of the reads at the closest +// available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of the read +// observes a transaction, all parts of the read see the transaction. Boundedly +// stale reads are not repeatable: two stale reads, even if they use the same +// staleness bound, can execute at different timestamps and thus return +// inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase negotiates a +// timestamp among all replicas needed to serve the read. In the second phase, +// reads are executed at the negotiated timestamp. +// +// As a result of the two phase execution, bounded staleness reads are usually +// a little slower than comparable exact staleness reads. However, they are +// typically able to return fresher results, and are more likely to execute at +// the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of which rows +// will be read, it can only be used with single-use reads and single-use +// read-only transactions. +// +// Use MinReadTimestamp() and MaxStaleness() to create a bound of this type. +// +// Old read timestamps and garbage collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data in the +// background to reclaim storage space. This process is known as "version +// GC". By default, version GC reclaims versions after they are four hours +// old. Because of this, Cloud Spanner cannot perform reads at read timestamps more +// than four hours in the past. This restriction also applies to in-progress +// reads and/or SQL queries whose timestamp become too old while +// executing. Reads and SQL queries with too-old read timestamps fail with the +// error ErrorCode.FAILED_PRECONDITION. +type TimestampBound struct { + mode timestampBoundType + d time.Duration + t time.Time +} + +// StrongRead returns a TimestampBound that will perform reads and queries at a +// timestamp where all previously committed transactions are visible. +func StrongRead() TimestampBound { + return TimestampBound{mode: strong} +} + +// ExactStaleness returns a TimestampBound that will perform reads and queries +// at an exact staleness. +func ExactStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: exactStaleness, + d: d, + } +} + +// MaxStaleness returns a TimestampBound that will perform reads and queries at +// a time chosen to be at most "d" stale. +func MaxStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: maxStaleness, + d: d, + } +} + +// MinReadTimestamp returns a TimestampBound that bound that will perform reads +// and queries at a time chosen to be at least "t". +func MinReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: minReadTimestamp, + t: t, + } +} + +// ReadTimestamp returns a TimestampBound that will peform reads and queries at +// the given time. +func ReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: readTimestamp, + t: t, + } +} + +// String implements fmt.Stringer. +func (tb TimestampBound) String() string { + switch tb.mode { + case strong: + return fmt.Sprintf("(strong)") + case exactStaleness: + return fmt.Sprintf("(exactStaleness: %s)", tb.d) + case maxStaleness: + return fmt.Sprintf("(maxStaleness: %s)", tb.d) + case minReadTimestamp: + return fmt.Sprintf("(minReadTimestamp: %s)", tb.t) + case readTimestamp: + return fmt.Sprintf("(readTimestamp: %s)", tb.t) + default: + return fmt.Sprintf("{mode=%v, d=%v, t=%v}", tb.mode, tb.d, tb.t) + } +} + +// durationProto takes a time.Duration and converts it into pdb.Duration for +// calling gRPC APIs. +func durationProto(d time.Duration) *pbd.Duration { + n := d.Nanoseconds() + return &pbd.Duration{ + Seconds: n / int64(time.Second), + Nanos: int32(n % int64(time.Second)), + } +} + +// timestampProto takes a time.Time and converts it into pbt.Timestamp for calling +// gRPC APIs. +func timestampProto(t time.Time) *pbt.Timestamp { + return &pbt.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// buildTransactionOptionsReadOnly converts a spanner.TimestampBound into a sppb.TransactionOptions_ReadOnly +// transaction option, which is then used in transactional reads. +func buildTransactionOptionsReadOnly(tb TimestampBound, returnReadTimestamp bool) *sppb.TransactionOptions_ReadOnly { + pb := &sppb.TransactionOptions_ReadOnly{ + ReturnReadTimestamp: returnReadTimestamp, + } + switch tb.mode { + case strong: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + } + case exactStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: durationProto(tb.d), + } + case maxStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: durationProto(tb.d), + } + case minReadTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: timestampProto(tb.t), + } + case readTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: timestampProto(tb.t), + } + default: + panic(fmt.Sprintf("buildTransactionOptionsReadOnly(%v,%v)", tb, returnReadTimestamp)) + } + return pb +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound_test.go b/vendor/cloud.google.com/go/spanner/timestampbound_test.go new file mode 100644 index 000000000..47fb481db --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound_test.go @@ -0,0 +1,208 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test generating TimestampBound for strong reads. +func TestStrong(t *testing.T) { + got := StrongRead() + want := TimestampBound{mode: strong} + if !reflect.DeepEqual(got, want) { + t.Errorf("Strong() = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with exact staleness. +func TestExactStaleness(t *testing.T) { + got := ExactStaleness(10 * time.Second) + want := TimestampBound{mode: exactStaleness, d: 10 * time.Second} + if !reflect.DeepEqual(got, want) { + t.Errorf("ExactStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with max staleness. +func TestMaxStaleness(t *testing.T) { + got := MaxStaleness(10 * time.Second) + want := TimestampBound{mode: maxStaleness, d: 10 * time.Second} + if !reflect.DeepEqual(got, want) { + t.Errorf("MaxStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with minimum freshness requirement. +func TestMinReadTimestamp(t *testing.T) { + ts := time.Now() + got := MinReadTimestamp(ts) + want := TimestampBound{mode: minReadTimestamp, t: ts} + if !reflect.DeepEqual(got, want) { + t.Errorf("MinReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test generating TimestampBound for reads requesting data at a exact timestamp. +func TestReadTimestamp(t *testing.T) { + ts := time.Now() + got := ReadTimestamp(ts) + want := TimestampBound{mode: readTimestamp, t: ts} + if !reflect.DeepEqual(got, want) { + t.Errorf("ReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test TimestampBound.String. +func TestTimestampBoundString(t *testing.T) { + ts := time.Unix(1136239445, 0).UTC() + var tests = []struct { + tb TimestampBound + want string + }{ + { + tb: TimestampBound{mode: strong}, + want: "(strong)", + }, + { + tb: TimestampBound{mode: exactStaleness, d: 10 * time.Second}, + want: "(exactStaleness: 10s)", + }, + { + tb: TimestampBound{mode: maxStaleness, d: 10 * time.Second}, + want: "(maxStaleness: 10s)", + }, + { + tb: TimestampBound{mode: minReadTimestamp, t: ts}, + want: "(minReadTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + { + tb: TimestampBound{mode: readTimestamp, t: ts}, + want: "(readTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + } + for _, test := range tests { + got := test.tb.String() + if got != test.want { + t.Errorf("%#v.String():\ngot %q\nwant %q", test.tb, got, test.want) + } + } +} + +// Test time.Duration to pdb.Duration conversion. +func TestDurationProto(t *testing.T) { + var tests = []struct { + d time.Duration + want pbd.Duration + }{ + {time.Duration(0), pbd.Duration{Seconds: 0, Nanos: 0}}, + {time.Second, pbd.Duration{Seconds: 1, Nanos: 0}}, + {time.Millisecond, pbd.Duration{Seconds: 0, Nanos: 1e6}}, + {15 * time.Nanosecond, pbd.Duration{Seconds: 0, Nanos: 15}}, + {42 * time.Hour, pbd.Duration{Seconds: 151200}}, + {-(1*time.Hour + 4*time.Millisecond), pbd.Duration{Seconds: -3600, Nanos: -4e6}}, + } + for _, test := range tests { + got := durationProto(test.d) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("durationProto(%v) = %v; want %v", test.d, got, test.want) + } + } +} + +// Test time.Time to pbt.Timestamp conversion. +func TestTimeProto(t *testing.T) { + var tests = []struct { + t time.Time + want pbt.Timestamp + }{ + {time.Unix(0, 0), pbt.Timestamp{}}, + {time.Unix(1136239445, 12345), pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + {time.Unix(-1000, 12345), pbt.Timestamp{Seconds: -1000, Nanos: 12345}}, + } + for _, test := range tests { + got := timestampProto(test.t) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("timestampProto(%v) = %v; want %v", test.t, got, test.want) + } + } +} + +// Test readonly transaction option builder. +func TestBuildTransactionOptionsReadOnly(t *testing.T) { + ts := time.Unix(1136239445, 12345) + var tests = []struct { + tb TimestampBound + ts bool + want sppb.TransactionOptions_ReadOnly + }{ + { + StrongRead(), false, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true}, + ReturnReadTimestamp: false, + }, + }, + { + ExactStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + { + MaxStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + + { + MinReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + { + ReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + } + for _, test := range tests { + got := buildTransactionOptionsReadOnly(test.tb, test.ts) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("buildTransactionOptionsReadOnly(%v,%v) = %v; want %v", test.tb, test.ts, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/transaction.go b/vendor/cloud.google.com/go/spanner/transaction.go new file mode 100644 index 000000000..1d5a27977 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/transaction.go @@ -0,0 +1,821 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "sync" + "time" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// transactionID stores a transaction ID which uniquely identifies a transaction in Cloud Spanner. +type transactionID []byte + +// txReadEnv manages a read-transaction environment consisting of a session handle and a transaction selector. +type txReadEnv interface { + // acquire returns a read-transaction environment that can be used to perform a transactional read. + acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) + // release should be called at the end of every transactional read to deal with session recycling and read timestamp recording. + release(time.Time, error) +} + +// txReadOnly contains methods for doing transactional reads. +type txReadOnly struct { + // read-transaction environment for performing transactional read operations. + txReadEnv +} + +// errSessionClosed returns error for using a recycled/destroyed session +func errSessionClosed(sh *sessionHandle) error { + return spannerErrorf(codes.FailedPrecondition, + "session is already recycled / destroyed: session_id = %q, rpc_client = %v", sh.getID(), sh.getClient()) +} + +// Read reads multiple rows from the database. +// +// The provided function is called once in serial for each row read. If the +// function returns a non-nil error, Read immediately returns that value. +// +// If no rows are read, Read will return nil without calling the provided +// function. +func (t *txReadOnly) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator { + // ReadUsingIndex will use primary index if an empty index name is provided. + return t.ReadUsingIndex(ctx, table, "", keys, columns) +} + +// ReadUsingIndex reads multiple rows from the database using an index. +// +// Currently, this function can only read columns that are part of the index +// key, part of the primary key, or stored in the index due to a STORING clause +// in the index definition. +// +// The provided function is called once in serial for each row read. If the +// function returns a non-nil error, ReadUsingIndex immediately returns that +// value. +// +// If no rows are read, ReadUsingIndex will return nil without calling the +// provided function. +func (t *txReadOnly) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator { + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + kset, err := keys.proto() + if err != nil { + return &RowIterator{err: err} + } + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + return stream( + contextWithMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + return client.StreamingRead(ctx, + &sppb.ReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + ResumeToken: resumeToken, + }) + }, + t.release, + ) +} + +// errRowNotFound returns error for not being able to read the row identified by key. +func errRowNotFound(table string, key Key) error { + return spannerErrorf(codes.NotFound, "row not found(Table: %v, PrimaryKey: %v)", table, key) +} + +// ReadRow reads a single row from the database. +// +// If no row is present with the given key, then ReadRow returns an error where +// IsRowNotFound(err) is true. +func (t *txReadOnly) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) { + iter := t.Read(ctx, table, Keys(key), columns) + defer iter.Stop() + row, err := iter.Next() + switch err { + case iterator.Done: + return nil, errRowNotFound(table, key) + case nil: + return row, nil + default: + return nil, err + } +} + +// Query executes a query against the database. +// +// The provided function is called once in serial for each row read. If the +// function returns a non-nil error, Query immediately returns that value. +// +// If no rows are read, Query will return nil without calling the provided +// function. +func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterator { + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + req := &sppb.ExecuteSqlRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + } + if err := statement.bindParams(req); err != nil { + return &RowIterator{err: err} + } + return stream( + contextWithMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + req.ResumeToken = resumeToken + return client.ExecuteStreamingSql(ctx, req) + }, + t.release) +} + +// txState is the status of a transaction. +type txState int + +const ( + // transaction is new, waiting to be initialized. + txNew txState = iota + // transaction is being initialized. + txInit + // transaction is active and can perform read/write. + txActive + // transaction is closed, cannot be used anymore. + txClosed +) + +// errRtsUnavailable returns error for read transaction's read timestamp being unavailable. +func errRtsUnavailable() error { + return spannerErrorf(codes.Internal, "read timestamp is unavailable") +} + +// errTxNotInitialized returns error for using an uninitialized transaction. +func errTxNotInitialized() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a uninitialized transaction") +} + +// errTxClosed returns error for using a closed transaction. +func errTxClosed() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction") +} + +// errUnexpectedTxState returns error for transaction enters an unexpected state. +func errUnexpectedTxState(ts txState) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected transaction state: %v", ts) +} + +// ReadOnlyTransaction provides a snapshot transaction with guaranteed +// consistency across reads, but does not allow writes. Read-only +// transactions can be configured to read at timestamps in the past. +// +// Read-only transactions do not take locks. Instead, they work by choosing a +// Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do +// not acquire locks, they do not block concurrent read-write transactions. +// +// Unlike locking read-write transactions, read-only transactions never +// abort. They can fail if the chosen read timestamp is garbage collected; +// however, the default garbage collection policy is generous enough that most +// applications do not need to worry about this in practice. See the +// documentation of TimestampBound for more details. +// +// A ReadOnlyTransaction consumes resources on the server until Close() is +// called. +type ReadOnlyTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + + // singleUse indicates that the transaction can be used for only one read. + singleUse bool + + // sp is the session pool for allocating a session to execute the read-only transaction. It is set only once during initialization of the ReadOnlyTransaction. + sp *sessionPool + // mu protects concurrent access to the internal states of ReadOnlyTransaction. + mu sync.Mutex + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadOnlyTransaction. + tx transactionID + // txReadyOrClosed is for broadcasting that transaction ID has been returned by Cloud Spanner or that transaction is closed. + txReadyOrClosed chan struct{} + // state is the current transaction status of the ReadOnly transaction. + state txState + // sh is the sessionHandle allocated from sp. + sh *sessionHandle + // rts is the read timestamp returned by transactional reads. + rts time.Time + // tb is the read staleness bound specification for transactional reads. + tb TimestampBound +} + +// errTxInitTimeout returns error for timeout in waiting for initialization of the transaction. +func errTxInitTimeout() error { + return spannerErrorf(codes.Canceled, "timeout/context canceled in waiting for transaction's initialization") +} + +// getTimestampBound returns the read staleness bound specified for the ReadOnlyTransaction. +func (t *ReadOnlyTransaction) getTimestampBound() TimestampBound { + t.mu.Lock() + defer t.mu.Unlock() + return t.tb +} + +// begin starts a snapshot read-only Transaction on Cloud Spanner. +func (t *ReadOnlyTransaction) begin(ctx context.Context) error { + var ( + locked bool + tx transactionID + rts time.Time + sh *sessionHandle + err error + ) + defer func() { + if !locked { + t.mu.Lock() + // Not necessary, just to make it clear that t.mu is being held when locked == true. + locked = true + } + if t.state != txClosed { + // Signal other initialization routines. + close(t.txReadyOrClosed) + t.txReadyOrClosed = make(chan struct{}) + } + t.mu.Unlock() + if err != nil && sh != nil { + // Got a valid session handle, but failed to initalize transaction on Cloud Spanner. + if shouldDropSession(err) { + sh.destroy() + } + // If sh.destroy was already executed, this becomes a noop. + sh.recycle() + } + }() + sh, err = t.sp.take(ctx) + if err != nil { + return err + } + err = runRetryable(contextWithMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { + res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sh.getID(), + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.getTimestampBound(), true), + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + if res.ReadTimestamp != nil { + rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos)) + } + return nil + }) + t.mu.Lock() + locked = true // defer function will be executed with t.mu being held. + if t.state == txClosed { // During the execution of t.begin(), t.Close() was invoked. + return errSessionClosed(sh) + } + // If begin() fails, this allows other queries to take over the initialization. + t.tx = nil + if err == nil { + t.tx = tx + t.rts = rts + t.sh = sh + // State transite to txActive. + t.state = txActive + } + return err +} + +// acquire implements txReadEnv.acquire. +func (t *ReadOnlyTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + if t.singleUse { + return t.acquireSingleUse(ctx) + } + return t.acquireMultiUse(ctx) +} + +func (t *ReadOnlyTransaction) acquireSingleUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + // A closed single-use transaction can never be reused. + return nil, nil, errTxClosed() + case txNew: + t.state = txClosed + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.tb, true), + }, + }, + }, + } + sh, err := t.sp.take(ctx) + if err != nil { + return nil, nil, err + } + // Install session handle into t, which can be used for readonly operations later. + t.sh = sh + return sh, ts, nil + } + us := t.state + // SingleUse transaction should only be in either txNew state or txClosed state. + return nil, nil, errUnexpectedTxState(us) +} + +func (t *ReadOnlyTransaction) acquireMultiUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + for { + t.mu.Lock() + switch t.state { + case txClosed: + t.mu.Unlock() + return nil, nil, errTxClosed() + case txNew: + // State transit to txInit so that no further TimestampBound change is accepted. + t.state = txInit + t.mu.Unlock() + continue + case txInit: + if t.tx != nil { + // Wait for a transaction ID to become ready. + txReadyOrClosed := t.txReadyOrClosed + t.mu.Unlock() + select { + case <-txReadyOrClosed: + // Need to check transaction state again. + continue + case <-ctx.Done(): + // The waiting for initialization is timeout, return error directly. + return nil, nil, errTxInitTimeout() + } + } + // Take the ownership of initializing the transaction. + t.tx = transactionID{} + t.mu.Unlock() + // Begin a read-only transaction. + // TODO: consider adding a transaction option which allow queries to initiate transactions by themselves. Note that this option might not be + // always good because the ID of the new transaction won't be ready till the query returns some data or completes. + if err := t.begin(ctx); err != nil { + return nil, nil, err + } + // If t.begin() succeeded, t.state should have been changed to txActive, so we can just continue here. + continue + case txActive: + sh := t.sh + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Unlock() + return sh, ts, nil + } + state := t.state + t.mu.Unlock() + return nil, nil, errUnexpectedTxState(state) + } +} + +// release implements txReadEnv.release. +func (t *ReadOnlyTransaction) release(rts time.Time, err error) { + t.mu.Lock() + if t.singleUse && !rts.IsZero() { + t.rts = rts + } + sh := t.sh + t.mu.Unlock() + if sh != nil { // sh could be nil if t.acquire() fails. + if shouldDropSession(err) { + sh.destroy() + } + if t.singleUse { + // If session handle is already destroyed, this becomes a noop. + sh.recycle() + } + } +} + +// Close closes a ReadOnlyTransaction, the transaction cannot perform any reads after being closed. +func (t *ReadOnlyTransaction) Close() { + if t.singleUse { + return + } + t.mu.Lock() + if t.state != txClosed { + t.state = txClosed + close(t.txReadyOrClosed) + } + sh := t.sh + t.mu.Unlock() + // If session handle is already destroyed, this becomes a noop. + // If there are still active queries and if the recycled session is reused before they complete, Cloud Spanner will cancel them + // on behalf of the new transaction on the session. + sh.recycle() +} + +// Timestamp returns the timestamp chosen to perform reads and +// queries in this transaction. The value can only be read after some +// read or query has either returned some data or completed without +// returning any data. +func (t *ReadOnlyTransaction) Timestamp() (time.Time, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.rts.IsZero() { + return t.rts, errRtsUnavailable() + } + return t.rts, nil +} + +// WithTimestampBound specifies the TimestampBound to use for read or query. +// This can only be used before the first read or query is invoked. Note: +// bounded staleness is not available with general ReadOnlyTransactions; use a +// single-use ReadOnlyTransaction instead. +// +// The returned value is the ReadOnlyTransaction so calls can be chained. +func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txNew { + // Only allow to set TimestampBound before the first query. + t.tb = tb + } + return t +} + +// ReadWriteTransaction provides a locking read-write transaction. +// +// This type of transaction is the only way to write data into Cloud Spanner; +// (*Client).Apply and (*Client).ApplyAtLeastOnce use transactions +// internally. These transactions rely on pessimistic locking and, if +// necessary, two-phase commit. Locking read-write transactions may abort, +// requiring the application to retry. However, the interface exposed by +// (*Client).ReadWriteTransaction eliminates the need for applications to write +// retry loops explicitly. +// +// Locking transactions may be used to atomically read-modify-write data +// anywhere in a database. This type of transaction is externally consistent. +// +// Clients should attempt to minimize the amount of time a transaction is +// active. Faster transactions commit with higher probability and cause less +// contention. Cloud Spanner attempts to keep read locks active as long as the +// transaction continues to do reads. Long periods of inactivity at the client +// may cause Cloud Spanner to release a transaction's locks and abort it. +// +// Reads performed within a transaction acquire locks on the data being +// read. Writes can only be done at commit time, after all reads have been +// completed. Conceptually, a read-write transaction consists of zero or more +// reads or SQL queries followed by a commit. +// +// See (*Client).ReadWriteTransaction for an example. +// +// Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired are still +// valid at commit time, and it is able to acquire write locks for all +// writes. Cloud Spanner can abort the transaction for any reason. If a commit +// attempt returns ABORTED, Cloud Spanner guarantees that the transaction has not +// modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about how long +// the transaction's locks were held for. It is an error to use Cloud Spanner locks +// for any sort of mutual exclusion other than between Cloud Spanner transactions +// themselves. +// +// Aborted transactions +// +// Application code does not need to retry explicitly; RunInTransaction will +// automatically retry a transaction if an attempt results in an abort. The +// lock priority of a transaction increases after each prior aborted +// transaction, meaning that the next attempt has a slightly better chance of +// success than before. +// +// Under some circumstances (e.g., many transactions attempting to modify the +// same row(s)), a transaction can abort many times in a short period before +// successfully committing. Thus, it is not a good idea to cap the number of +// retries a transaction can attempt; instead, it is better to limit the total +// amount of wall time spent retrying. +// +// Idle transactions +// +// A transaction is considered idle if it has no outstanding reads or SQL +// queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold +// on to locks indefinitely. In that case, the commit will fail with error +// ABORTED. +// +// If this behavior is undesirable, periodically executing a simple SQL query +// in the transaction (e.g., SELECT 1) prevents the transaction from becoming +// idle. +type ReadWriteTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + // sh is the sessionHandle allocated from sp. It is set only once during the initialization of ReadWriteTransaction. + sh *sessionHandle + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadWriteTransaction. + // It is set only once in ReadWriteTransaction.begin() during the initialization of ReadWriteTransaction. + tx transactionID + // mu protects concurrent access to the internal states of ReadWriteTransaction. + mu sync.Mutex + // state is the current transaction status of the read-write transaction. + state txState + // wb is the set of buffered mutations waiting to be commited. + wb []*Mutation +} + +// BufferWrite adds a list of mutations to the set of updates that will be +// applied when the transaction is committed. It does not actually apply the +// write until the transaction is committed, so the operation does not +// block. The effects of the write won't be visible to any reads (including +// reads done in the same transaction) until the transaction commits. +// +// See the example for Client.ReadWriteTransaction. +func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txClosed { + return errTxClosed() + } + if t.state != txActive { + return errUnexpectedTxState(t.state) + } + t.wb = append(t.wb, ms...) + return nil +} + +// acquire implements txReadEnv.acquire. +func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + return nil, nil, errTxClosed() + case txActive: + return t.sh, ts, nil + } + return nil, nil, errUnexpectedTxState(t.state) +} + +// release implements txReadEnv.release. +func (t *ReadWriteTransaction) release(_ time.Time, err error) { + t.mu.Lock() + sh := t.sh + t.mu.Unlock() + if sh != nil && shouldDropSession(err) { + sh.destroy() + } +} + +func beginTransaction(ctx context.Context, sid string, client sppb.SpannerClient) (transactionID, error) { + var tx transactionID + err := runRetryable(ctx, func(ctx context.Context) error { + res, e := client.BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sid, + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + return nil + }) + if err != nil { + return nil, err + } + return tx, nil +} + +// begin starts a read-write transacton on Cloud Spanner, it is always called before any of the public APIs. +func (t *ReadWriteTransaction) begin(ctx context.Context) error { + if t.tx != nil { + t.state = txActive + return nil + } + tx, err := beginTransaction(contextWithMetadata(ctx, t.sh.getMetadata()), t.sh.getID(), t.sh.getClient()) + if err == nil { + t.tx = tx + t.state = txActive + return nil + } + if shouldDropSession(err) { + t.sh.destroy() + } + return err +} + +// commit tries to commit a readwrite transaction to Cloud Spanner. It also returns the commit timestamp for the transactions. +func (t *ReadWriteTransaction) commit(ctx context.Context) (time.Time, error) { + var ts time.Time + t.mu.Lock() + t.state = txClosed // No futher operations after commit. + mPb, err := mutationsProto(t.wb) + t.mu.Unlock() + if err != nil { + return ts, err + } + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return ts, errSessionClosed(t.sh) + } + err = runRetryable(contextWithMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + var trailer metadata.MD + res, e := client.Commit(ctx, &sppb.CommitRequest{ + Session: sid, + Transaction: &sppb.CommitRequest_TransactionId{ + TransactionId: t.tx, + }, + Mutations: mPb, + }, grpc.Trailer(&trailer)) + if e != nil { + return toSpannerErrorWithMetadata(e, trailer) + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return ts, err +} + +// rollback is called when a commit is aborted or the transaction body runs into error. +func (t *ReadWriteTransaction) rollback(ctx context.Context) { + t.mu.Lock() + // Forbid further operations on rollbacked transaction. + t.state = txClosed + t.mu.Unlock() + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return + } + err := runRetryable(contextWithMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + _, e := client.Rollback(ctx, &sppb.RollbackRequest{ + Session: sid, + TransactionId: t.tx, + }) + return e + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return +} + +// runInTransaction executes f under a read-write transaction context. +func (t *ReadWriteTransaction) runInTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error) { + var ( + ts time.Time + err error + ) + if err = f(t); err == nil { + // Try to commit if transaction body returns no error. + ts, err = t.commit(ctx) + } + if err != nil { + if isAbortErr(err) { + // Retry the transaction using the same session on ABORT error. + // Cloud Spanner will create the new transaction with the previous one's wound-wait priority. + err = errRetry(err) + return ts, err + } + // Not going to commit, according to API spec, should rollback the transaction. + t.rollback(ctx) + return ts, err + } + // err == nil, return commit timestamp. + return ts, err +} + +// writeOnlyTransaction provides the most efficient way of doing write-only transactions. It essentially does blind writes to Cloud Spanner. +type writeOnlyTransaction struct { + // sp is the session pool which writeOnlyTransaction uses to get Cloud Spanner sessions for blind writes. + sp *sessionPool +} + +// applyAtLeastOnce commits a list of mutations to Cloud Spanner for at least once, unless one of the following happends: +// 1) Context is timeout. +// 2) An unretryable error(e.g. database not found) occurs. +// 3) There is a malformed Mutation object. +func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) { + var ( + ts time.Time + sh *sessionHandle + ) + mPb, err := mutationsProto(ms) + if err != nil { + // Malformed mutation found, just return the error. + return ts, err + } + err = runRetryable(ctx, func(ct context.Context) error { + var e error + var trailers metadata.MD + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // No usable session for doing the commit, take one from pool. + sh, e = t.sp.take(ctx) + if e != nil { + // sessionPool.Take already retries for session creations/retrivals. + return e + } + } + res, e := sh.getClient().Commit(contextWithMetadata(ctx, sh.getMetadata()), &sppb.CommitRequest{ + Session: sh.getID(), + Transaction: &sppb.CommitRequest_SingleUseTransaction{ + SingleUseTransaction: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }, + Mutations: mPb, + }, grpc.Trailer(&trailers)) + if e != nil { + if isAbortErr(e) { + // Mask ABORT error as retryable, because aborted transactions are allowed to be retried. + return errRetry(toSpannerErrorWithMetadata(e, trailers)) + } + if shouldDropSession(e) { + // Discard the bad session. + sh.destroy() + } + return e + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// isAbortedErr returns true if the error indicates that an gRPC call is aborted on the server side. +func isAbortErr(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Aborted { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/value.go b/vendor/cloud.google.com/go/spanner/value.go new file mode 100644 index 000000000..ed13582c7 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value.go @@ -0,0 +1,1244 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/fields" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// NullInt64 represents a Cloud Spanner INT64 that may be NULL. +type NullInt64 struct { + Int64 int64 + Valid bool // Valid is true if Int64 is not NULL. +} + +// String implements Stringer.String for NullInt64 +func (n NullInt64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Int64) +} + +// NullString represents a Cloud Spanner STRING that may be NULL. +type NullString struct { + StringVal string + Valid bool // Valid is true if StringVal is not NULL. +} + +// String implements Stringer.String for NullString +func (n NullString) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%q", n.StringVal) +} + +// NullFloat64 represents a Cloud Spanner FLOAT64 that may be NULL. +type NullFloat64 struct { + Float64 float64 + Valid bool // Valid is true if Float64 is not NULL. +} + +// String implements Stringer.String for NullFloat64 +func (n NullFloat64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Float64) +} + +// NullBool represents a Cloud Spanner BOOL that may be NULL. +type NullBool struct { + Bool bool + Valid bool // Valid is true if Bool is not NULL. +} + +// String implements Stringer.String for NullBool +func (n NullBool) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Bool) +} + +// NullTime represents a Cloud Spanner TIMESTAMP that may be null. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL. +} + +// String implements Stringer.String for NullTime +func (n NullTime) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Time.Format(time.RFC3339Nano)) +} + +// NullDate represents a Cloud Spanner DATE that may be null. +type NullDate struct { + Date civil.Date + Valid bool // Valid is true if Date is not NULL. +} + +// String implements Stringer.String for NullDate +func (n NullDate) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Date) +} + +// NullRow represents a Cloud Spanner STRUCT that may be NULL. +// See also the document for Row. +// Note that NullRow is not a valid Cloud Spanner column Type. +type NullRow struct { + Row Row + Valid bool // Valid is true if Row is not NULL. +} + +// GenericColumnValue represents the generic encoded value and type of the +// column. See google.spanner.v1.ResultSet proto for details. This can be +// useful for proxying query results when the result types are not known in +// advance. +type GenericColumnValue struct { + Type *sppb.Type + Value *proto3.Value +} + +// Decode decodes a GenericColumnValue. The ptr argument should be a pointer +// to a Go value that can accept v. +func (v GenericColumnValue) Decode(ptr interface{}) error { + return decodeValue(v.Value, v.Type, ptr) +} + +// NewGenericColumnValue creates a GenericColumnValue from Go value that is +// valid for Cloud Spanner. +func NewGenericColumnValue(v interface{}) (*GenericColumnValue, error) { + value, typ, err := encodeValue(v) + if err != nil { + return nil, err + } + return &GenericColumnValue{Value: value, Type: typ}, nil +} + +// errTypeMismatch returns error for destination not having a compatible type +// with source Cloud Spanner type. +func errTypeMismatch(srcType sppb.TypeCode, isArray bool, dst interface{}) error { + usage := srcType.String() + if isArray { + usage = fmt.Sprintf("%v[%v]", sppb.TypeCode_ARRAY, srcType) + } + return spannerErrorf(codes.InvalidArgument, "type %T cannot be used for decoding %v", dst, usage) +} + +// errNilSpannerType returns error for nil Cloud Spanner type in decoding. +func errNilSpannerType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner data type in decoding") +} + +// errNilSrc returns error for decoding from nil proto value. +func errNilSrc() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner value in decoding") +} + +// errNilDst returns error for decoding into nil interface{}. +func errNilDst(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "cannot decode into nil type %T", dst) +} + +// errNilArrElemType returns error for input Cloud Spanner data type being a array but without a +// non-nil array element type. +func errNilArrElemType(t *sppb.Type) error { + return spannerErrorf(codes.FailedPrecondition, "array type %v is with nil array element type", t) +} + +// errDstNotForNull returns error for decoding a SQL NULL value into a destination which doesn't +// support NULL values. +func errDstNotForNull(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "destination %T cannot support NULL SQL values", dst) +} + +// errBadEncoding returns error for decoding wrongly encoded BYTES/INT64. +func errBadEncoding(v *proto3.Value, err error) error { + return spannerErrorf(codes.FailedPrecondition, "%v wasn't correctly encoded: <%v>", v, err) +} + +func parseNullTime(v *proto3.Value, p *NullTime, code sppb.TypeCode, isNull bool) error { + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, false, p) + } + if isNull { + *p = NullTime{} + return nil + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := time.Parse(time.RFC3339Nano, x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Time = y + return nil +} + +// decodeValue decodes a protobuf Value into a pointer to a Go value, as +// specified by sppb.Type. +func decodeValue(v *proto3.Value, t *sppb.Type, ptr interface{}) error { + if v == nil { + return errNilSrc() + } + if t == nil { + return errNilSpannerType() + } + code := t.Code + acode := sppb.TypeCode_TYPE_CODE_UNSPECIFIED + if code == sppb.TypeCode_ARRAY { + if t.ArrayElementType == nil { + return errNilArrElemType(t) + } + acode = t.ArrayElementType.Code + } + typeErr := errTypeMismatch(code, false, ptr) + if code == sppb.TypeCode_ARRAY { + typeErr = errTypeMismatch(acode, true, ptr) + } + nullErr := errDstNotForNull(ptr) + _, isNull := v.Kind.(*proto3.Value_NullValue) + + // Do the decoding based on the type of ptr. + switch p := ptr.(type) { + case nil: + return errNilDst(nil) + case *string: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return typeErr + } + if isNull { + return nullErr + } + x, err := getStringValue(v) + if err != nil { + return err + } + *p = x + case *NullString: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return typeErr + } + if isNull { + *p = NullString{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + p.Valid = true + p.StringVal = x + case *[]NullString: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRING { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeStringArray(x) + if err != nil { + return err + } + *p = y + case *[]byte: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BYTES { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := base64.StdEncoding.DecodeString(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *[][]byte: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BYTES { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeByteArray(x) + if err != nil { + return err + } + *p = y + case *int64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return typeErr + } + if isNull { + return nullErr + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullInt64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return typeErr + } + if isNull { + *p = NullInt64{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Int64 = y + case *[]NullInt64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_INT64 { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeIntArray(x) + if err != nil { + return err + } + *p = y + case *bool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return typeErr + } + if isNull { + return nullErr + } + x, err := getBoolValue(v) + if err != nil { + return err + } + *p = x + case *NullBool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return typeErr + } + if isNull { + *p = NullBool{} + break + } + x, err := getBoolValue(v) + if err != nil { + return err + } + p.Valid = true + p.Bool = x + case *[]NullBool: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BOOL { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeBoolArray(x) + if err != nil { + return err + } + *p = y + case *float64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return typeErr + } + if isNull { + return nullErr + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + *p = x + case *NullFloat64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return typeErr + } + if isNull { + *p = NullFloat64{} + break + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + p.Valid = true + p.Float64 = x + case *[]NullFloat64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_FLOAT64 { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeFloat64Array(x) + if err != nil { + return err + } + *p = y + case *time.Time: + var nt NullTime + if isNull { + return nullErr + } + err := parseNullTime(v, &nt, code, isNull) + if err != nil { + return nil + } + *p = nt.Time + case *NullTime: + err := parseNullTime(v, p, code, isNull) + if err != nil { + return err + } + case *[]NullTime: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_TIMESTAMP { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeTimeArray(x) + if err != nil { + return err + } + *p = y + case *civil.Date: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return typeErr + } + if isNull { + return nullErr + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullDate: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return typeErr + } + if isNull { + *p = NullDate{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Date = y + case *[]NullDate: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_DATE { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeDateArray(x) + if err != nil { + return err + } + *p = y + case *[]NullRow: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRUCT { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeRowArray(t.ArrayElementType.StructType, x) + if err != nil { + return err + } + *p = y + case *GenericColumnValue: + *p = GenericColumnValue{ + // Deep clone to ensure subsequent changes to t or v + // don't affect our decoded value. + Type: proto.Clone(t).(*sppb.Type), + Value: proto.Clone(v).(*proto3.Value), + } + default: + // Check if the proto encoding is for an array of structs. + if !(code == sppb.TypeCode_ARRAY && acode == sppb.TypeCode_STRUCT) { + return typeErr + } + vp := reflect.ValueOf(p) + if !vp.IsValid() { + return errNilDst(p) + } + if !isPtrStructPtrSlice(vp.Type()) { + // The container is not a pointer to a struct pointer slice. + return typeErr + } + // Only use reflection for nil detection on slow path. + // Also, IsNil panics on many types, so check it after the type check. + if vp.IsNil() { + return errNilDst(p) + } + if isNull { + // The proto Value is encoding NULL, set the pointer to struct + // slice to nil as well. + vp.Elem().Set(reflect.Zero(vp.Elem().Type())) + break + } + x, err := getListValue(v) + if err != nil { + return err + } + if err = decodeStructArray(t.ArrayElementType.StructType, x, p); err != nil { + return err + } + } + return nil +} + +// errSrvVal returns an error for getting a wrong source protobuf value in decoding. +func errSrcVal(v *proto3.Value, want string) error { + return spannerErrorf(codes.FailedPrecondition, "cannot use %v(Kind: %T) as Value_%sValue in decoding", + v, v.GetKind(), want) +} + +// getStringValue returns the string value encoded in proto3.Value v whose +// kind is proto3.Value_StringValue. +func getStringValue(v *proto3.Value) (string, error) { + if x, ok := v.GetKind().(*proto3.Value_StringValue); ok && x != nil { + return x.StringValue, nil + } + return "", errSrcVal(v, "String") +} + +// getBoolValue returns the bool value encoded in proto3.Value v whose +// kind is proto3.Value_BoolValue. +func getBoolValue(v *proto3.Value) (bool, error) { + if x, ok := v.GetKind().(*proto3.Value_BoolValue); ok && x != nil { + return x.BoolValue, nil + } + return false, errSrcVal(v, "Bool") +} + +// getListValue returns the proto3.ListValue contained in proto3.Value v whose +// kind is proto3.Value_ListValue. +func getListValue(v *proto3.Value) (*proto3.ListValue, error) { + if x, ok := v.GetKind().(*proto3.Value_ListValue); ok && x != nil { + return x.ListValue, nil + } + return nil, errSrcVal(v, "List") +} + +// errUnexpectedNumStr returns error for decoder getting a unexpected string for +// representing special float values. +func errUnexpectedNumStr(s string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for number", s) +} + +// getFloat64Value returns the float64 value encoded in proto3.Value v whose +// kind is proto3.Value_NumberValue / proto3.Value_StringValue. +// Cloud Spanner uses string to encode NaN, Infinity and -Infinity. +func getFloat64Value(v *proto3.Value) (float64, error) { + switch x := v.GetKind().(type) { + case *proto3.Value_NumberValue: + if x == nil { + break + } + return x.NumberValue, nil + case *proto3.Value_StringValue: + if x == nil { + break + } + switch x.StringValue { + case "NaN": + return math.NaN(), nil + case "Infinity": + return math.Inf(1), nil + case "-Infinity": + return math.Inf(-1), nil + default: + return 0, errUnexpectedNumStr(x.StringValue) + } + } + return 0, errSrcVal(v, "Number") +} + +// errNilListValue returns error for unexpected nil ListValue in decoding Cloud Spanner ARRAYs. +func errNilListValue(sqlType string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil ListValue in decoding %v array", sqlType) +} + +// errDecodeArrayElement returns error for failure in decoding single array element. +func errDecodeArrayElement(i int, v proto.Message, sqlType string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode %v(array element %v) as %v, error = <%v>", v, i, sqlType, err) + } + se.decorate(fmt.Sprintf("cannot decode %v(array element %v) as %v", v, i, sqlType)) + return se +} + +// decodeStringArray decodes proto3.ListValue pb into a NullString slice. +func decodeStringArray(pb *proto3.ListValue) ([]NullString, error) { + if pb == nil { + return nil, errNilListValue("STRING") + } + a := make([]NullString, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, stringType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "STRING", err) + } + } + return a, nil +} + +// decodeIntArray decodes proto3.ListValue pb into a NullInt64 slice. +func decodeIntArray(pb *proto3.ListValue) ([]NullInt64, error) { + if pb == nil { + return nil, errNilListValue("INT64") + } + a := make([]NullInt64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, intType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "INT64", err) + } + } + return a, nil +} + +// decodeBoolArray decodes proto3.ListValue pb into a NullBool slice. +func decodeBoolArray(pb *proto3.ListValue) ([]NullBool, error) { + if pb == nil { + return nil, errNilListValue("BOOL") + } + a := make([]NullBool, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, boolType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BOOL", err) + } + } + return a, nil +} + +// decodeFloat64Array decodes proto3.ListValue pb into a NullFloat64 slice. +func decodeFloat64Array(pb *proto3.ListValue) ([]NullFloat64, error) { + if pb == nil { + return nil, errNilListValue("FLOAT64") + } + a := make([]NullFloat64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, floatType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "FLOAT64", err) + } + } + return a, nil +} + +// decodeByteArray decodes proto3.ListValue pb into a slice of byte slice. +func decodeByteArray(pb *proto3.ListValue) ([][]byte, error) { + if pb == nil { + return nil, errNilListValue("BYTES") + } + a := make([][]byte, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, bytesType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BYTES", err) + } + } + return a, nil +} + +// decodeTimeArray decodes proto3.ListValue pb into a NullTime slice. +func decodeTimeArray(pb *proto3.ListValue) ([]NullTime, error) { + if pb == nil { + return nil, errNilListValue("TIMESTAMP") + } + a := make([]NullTime, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, timeType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err) + } + } + return a, nil +} + +// decodeDateArray decodes proto3.ListValue pb into a NullDate slice. +func decodeDateArray(pb *proto3.ListValue) ([]NullDate, error) { + if pb == nil { + return nil, errNilListValue("DATE") + } + a := make([]NullDate, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, dateType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "DATE", err) + } + } + return a, nil +} + +func errNotStructElement(i int, v *proto3.Value) error { + return errDecodeArrayElement(i, v, "STRUCT", + spannerErrorf(codes.FailedPrecondition, "%v(type: %T) doesn't encode Cloud Spanner STRUCT", v, v)) +} + +// decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to +// the structual information given in sppb.StructType ty. +func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) { + if pb == nil { + return nil, errNilListValue("STRUCT") + } + a := make([]NullRow, len(pb.Values)) + for i := range pb.Values { + switch v := pb.Values[i].GetKind().(type) { + case *proto3.Value_ListValue: + a[i] = NullRow{ + Row: Row{ + fields: ty.Fields, + vals: v.ListValue.Values, + }, + Valid: true, + } + // Null elements not currently supported by the server, see + // https://cloud.google.com/spanner/docs/query-syntax#using-structs-with-select + case *proto3.Value_NullValue: + // no-op, a[i] is NullRow{} already + default: + return nil, errNotStructElement(i, pb.Values[i]) + } + } + return a, nil +} + +// structFieldColumn returns the name of i-th field of struct type typ if the field +// is untagged; otherwise, it returns the tagged name of the field. +func structFieldColumn(typ reflect.Type, i int) (col string, ok bool) { + desc := typ.Field(i) + if desc.PkgPath != "" || desc.Anonymous { + // Skip unexported or anonymous fields. + return "", false + } + col = desc.Name + if tag := desc.Tag.Get("spanner"); tag != "" { + if tag == "-" { + // Skip fields tagged "-" to match encoding/json and others. + return "", false + } + col = tag + if idx := strings.Index(tag, ","); idx != -1 { + col = tag[:idx] + } + } + return col, true +} + +// errNilSpannerStructType returns error for unexpected nil Cloud Spanner STRUCT schema type in decoding. +func errNilSpannerStructType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil StructType in decoding Cloud Spanner STRUCT") +} + +// errUnnamedField returns error for decoding a Cloud Spanner STRUCT with unnamed field into a Go struct. +func errUnnamedField(ty *sppb.StructType, i int) error { + return spannerErrorf(codes.InvalidArgument, "unnamed field %v in Cloud Spanner STRUCT %+v", i, ty) +} + +// errNoOrDupGoField returns error for decoding a Cloud Spanner +// STRUCT into a Go struct which is either missing a field, or has duplicate fields. +func errNoOrDupGoField(s interface{}, f string) error { + return spannerErrorf(codes.InvalidArgument, "Go struct %+v(type %T) has no or duplicate fields for Cloud Spanner STRUCT field %v", s, s, f) +} + +// errDupColNames returns error for duplicated Cloud Spanner STRUCT field names found in decoding a Cloud Spanner STRUCT into a Go struct. +func errDupSpannerField(f string, ty *sppb.StructType) error { + return spannerErrorf(codes.InvalidArgument, "duplicated field name %q in Cloud Spanner STRUCT %+v", f, ty) +} + +// errDecodeStructField returns error for failure in decoding a single field of a Cloud Spanner STRUCT. +func errDecodeStructField(ty *sppb.StructType, f string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode field %v of Cloud Spanner STRUCT %+v, error = <%v>", f, ty, err) + } + se.decorate(fmt.Sprintf("cannot decode field %v of Cloud Spanner STRUCT %+v", f, ty)) + return se +} + +// decodeStruct decodes proto3.ListValue pb into struct referenced by pointer ptr, according to +// the structual information given in sppb.StructType ty. +func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if reflect.ValueOf(ptr).IsNil() { + return errNilDst(ptr) + } + if ty == nil { + return errNilSpannerStructType() + } + // t holds the structual information of ptr. + t := reflect.TypeOf(ptr).Elem() + // v is the actual value that ptr points to. + v := reflect.ValueOf(ptr).Elem() + + fields, err := fieldCache.Fields(t) + if err != nil { + return toSpannerError(err) + } + seen := map[string]bool{} + for i, f := range ty.Fields { + if f.Name == "" { + return errUnnamedField(ty, i) + } + sf := fields.Match(f.Name) + if sf == nil { + return errNoOrDupGoField(ptr, f.Name) + } + if seen[f.Name] { + // We don't allow duplicated field name. + return errDupSpannerField(f.Name, ty) + } + // Try to decode a single field. + if err := decodeValue(pb.Values[i], f.Type, v.FieldByIndex(sf.Index).Addr().Interface()); err != nil { + return errDecodeStructField(ty, f.Name, err) + } + // Mark field f.Name as processed. + seen[f.Name] = true + } + return nil +} + +// isPtrStructPtrSlice returns true if ptr is a pointer to a slice of struct pointers. +func isPtrStructPtrSlice(t reflect.Type) bool { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice { + // t is not a pointer to a slice. + return false + } + if t = t.Elem(); t.Elem().Kind() != reflect.Ptr || t.Elem().Elem().Kind() != reflect.Struct { + // the slice that t points to is not a slice of struct pointers. + return false + } + return true +} + +// decodeStructArray decodes proto3.ListValue pb into struct slice referenced by pointer ptr, according to the +// structual information given in a sppb.StructType. +func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if pb == nil { + return errNilListValue("STRUCT") + } + // Type of the struct pointers stored in the slice that ptr points to. + ts := reflect.TypeOf(ptr).Elem().Elem() + // The slice that ptr points to, might be nil at this point. + v := reflect.ValueOf(ptr).Elem() + // Allocate empty slice. + v.Set(reflect.MakeSlice(v.Type(), 0, len(pb.Values))) + // Decode every struct in pb.Values. + for i, pv := range pb.Values { + // Check if pv is a NULL value. + if _, isNull := pv.Kind.(*proto3.Value_NullValue); isNull { + // Append a nil pointer to the slice. + v.Set(reflect.Append(v, reflect.New(ts).Elem())) + continue + } + // Allocate empty struct. + s := reflect.New(ts.Elem()) + // Get proto3.ListValue l from proto3.Value pv. + l, err := getListValue(pv) + if err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Decode proto3.ListValue l into struct referenced by s.Interface(). + if err = decodeStruct(ty, l, s.Interface()); err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Append the decoded struct back into the slice. + v.Set(reflect.Append(v, s)) + } + return nil +} + +// errEncoderUnsupportedType returns error for not being able to encode a value of +// certain type. +func errEncoderUnsupportedType(v interface{}) error { + return spannerErrorf(codes.InvalidArgument, "encoder doesn't support type %T", v) +} + +// encodeValue encodes a Go native type into a proto3.Value. +func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) { + pb := &proto3.Value{ + Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}, + } + var pt *sppb.Type + var err error + switch v := v.(type) { + case nil: + case string: + pb.Kind = stringKind(v) + pt = stringType() + case NullString: + if v.Valid { + return encodeValue(v.StringVal) + } + case []string: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(stringType()) + } + case []NullString: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(stringType()) + } + case []byte: + if v != nil { + pb.Kind = stringKind(base64.StdEncoding.EncodeToString(v)) + pt = bytesType() + } + case [][]byte: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(bytesType()) + } + case int: + pb.Kind = stringKind(strconv.FormatInt(int64(v), 10)) + pt = intType() + case []int: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(intType()) + } + case int64: + pb.Kind = stringKind(strconv.FormatInt(v, 10)) + pt = intType() + case []int64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(intType()) + } + case NullInt64: + if v.Valid { + return encodeValue(v.Int64) + } + case []NullInt64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(intType()) + } + case bool: + pb.Kind = &proto3.Value_BoolValue{BoolValue: v} + pt = boolType() + case []bool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(boolType()) + } + case NullBool: + if v.Valid { + return encodeValue(v.Bool) + } + case []NullBool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(boolType()) + } + case float64: + pb.Kind = &proto3.Value_NumberValue{NumberValue: v} + pt = floatType() + case []float64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(floatType()) + } + case NullFloat64: + if v.Valid { + return encodeValue(v.Float64) + } + case []NullFloat64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(floatType()) + } + case time.Time: + pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano)) + pt = timeType() + case []time.Time: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(timeType()) + } + case NullTime: + if v.Valid { + return encodeValue(v.Time) + } + case []NullTime: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(timeType()) + } + case civil.Date: + pb.Kind = stringKind(v.String()) + pt = dateType() + case []civil.Date: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(dateType()) + } + case NullDate: + if v.Valid { + return encodeValue(v.Date) + } + case []NullDate: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(dateType()) + } + case GenericColumnValue: + // Deep clone to ensure subsequent changes to v before + // transmission don't affect our encoded value. + pb = proto.Clone(v.Value).(*proto3.Value) + pt = proto.Clone(v.Type).(*sppb.Type) + default: + return nil, nil, errEncoderUnsupportedType(v) + } + return pb, pt, nil +} + +// encodeValueArray encodes a Value array into a proto3.ListValue. +func encodeValueArray(vs []interface{}) (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(vs)) + for _, v := range vs { + pb, _, err := encodeValue(v) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, pb) + } + return lv, nil +} + +// encodeArray assumes that all values of the array element type encode without error. +func encodeArray(len int, at func(int) interface{}) (*proto3.Value, error) { + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(at(i)) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} + +func spannerTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + if s := t.Get("spanner"); s != "" { + if s == "-" { + return "", false, nil, nil + } + return s, true, nil, nil + } + return "", true, nil, nil +} + +var fieldCache = fields.NewCache(spannerTagParser, nil, nil) diff --git a/vendor/cloud.google.com/go/spanner/value_test.go b/vendor/cloud.google.com/go/spanner/value_test.go new file mode 100644 index 000000000..992e8508c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value_test.go @@ -0,0 +1,613 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + t1 = mustParseTime("2016-11-15T15:04:05.999999999Z") + // Boundaries + t2 = mustParseTime("0000-01-01T00:00:00.000000000Z") + t3 = mustParseTime("9999-12-31T23:59:59.999999999Z") + // Local timezone + t4 = time.Now() + d1 = mustParseDate("2016-11-15") + d2 = mustParseDate("1678-01-01") +) + +func mustParseTime(s string) time.Time { + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + panic(err) + } + return t +} + +func mustParseDate(s string) civil.Date { + d, err := civil.ParseDate(s) + if err != nil { + panic(err) + } + return d +} + +// Test encoding Values. +func TestEncodeValue(t *testing.T) { + var ( + tString = stringType() + tInt = intType() + tBool = boolType() + tFloat = floatType() + tBytes = bytesType() + tTime = timeType() + tDate = dateType() + ) + for i, test := range []struct { + in interface{} + want *proto3.Value + wantType *sppb.Type + }{ + // STRING / STRING ARRAY + {"abc", stringProto("abc"), tString}, + {NullString{"abc", true}, stringProto("abc"), tString}, + {NullString{"abc", false}, nullProto(), nil}, + {[]string{"abc", "bcd"}, listProto(stringProto("abc"), stringProto("bcd")), listType(tString)}, + {[]NullString{{"abcd", true}, {"xyz", false}}, listProto(stringProto("abcd"), nullProto()), listType(tString)}, + // BYTES / BYTES ARRAY + {[]byte("foo"), bytesProto([]byte("foo")), tBytes}, + {[]byte(nil), nullProto(), nil}, + {[][]byte{nil, []byte("ab")}, listProto(nullProto(), bytesProto([]byte("ab"))), listType(tBytes)}, + {[][]byte(nil), nullProto(), nil}, + // INT64 / INT64 ARRAY + {7, intProto(7), tInt}, + {[]int{31, 127}, listProto(intProto(31), intProto(127)), listType(tInt)}, + {int64(81), intProto(81), tInt}, + {[]int64{33, 129}, listProto(intProto(33), intProto(129)), listType(tInt)}, + {NullInt64{11, true}, intProto(11), tInt}, + {NullInt64{11, false}, nullProto(), nil}, + {[]NullInt64{{35, true}, {131, false}}, listProto(intProto(35), nullProto()), listType(tInt)}, + // BOOL / BOOL ARRAY + {true, boolProto(true), tBool}, + {NullBool{true, true}, boolProto(true), tBool}, + {NullBool{true, false}, nullProto(), nil}, + {[]bool{true, false}, listProto(boolProto(true), boolProto(false)), listType(tBool)}, + {[]NullBool{{true, true}, {true, false}}, listProto(boolProto(true), nullProto()), listType(tBool)}, + // FLOAT64 / FLOAT64 ARRAY + {3.14, floatProto(3.14), tFloat}, + {NullFloat64{3.1415, true}, floatProto(3.1415), tFloat}, + {NullFloat64{math.Inf(1), true}, floatProto(math.Inf(1)), tFloat}, + {NullFloat64{3.14159, false}, nullProto(), nil}, + {[]float64{3.141, 0.618, math.Inf(-1)}, listProto(floatProto(3.141), floatProto(0.618), floatProto(math.Inf(-1))), listType(tFloat)}, + {[]NullFloat64{{3.141, true}, {0.618, false}}, listProto(floatProto(3.141), nullProto()), listType(tFloat)}, + // TIMESTAMP / TIMESTAMP ARRAY + {t1, timeProto(t1), tTime}, + {NullTime{t1, true}, timeProto(t1), tTime}, + {NullTime{t1, false}, nullProto(), nil}, + {[]time.Time{t1, t2, t3, t4}, listProto(timeProto(t1), timeProto(t2), timeProto(t3), timeProto(t4)), listType(tTime)}, + {[]NullTime{{t1, true}, {t1, false}}, listProto(timeProto(t1), nullProto()), listType(tTime)}, + // DATE / DATE ARRAY + {d1, dateProto(d1), tDate}, + {NullDate{d1, true}, dateProto(d1), tDate}, + {NullDate{civil.Date{}, false}, nullProto(), nil}, + {[]civil.Date{d1, d2}, listProto(dateProto(d1), dateProto(d2)), listType(tDate)}, + {[]NullDate{{d1, true}, {civil.Date{}, false}}, listProto(dateProto(d1), nullProto()), listType(tDate)}, + // GenericColumnValue + {GenericColumnValue{tString, stringProto("abc")}, stringProto("abc"), tString}, + {GenericColumnValue{tString, nullProto()}, nullProto(), tString}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + GenericColumnValue{ + Type: listType(tInt), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + listProto(intProto(5), nullProto(), stringProto("bcd")), + listType(tInt), + }, + } { + got, gotType, err := encodeValue(test.in) + if err != nil { + t.Fatalf("#%d: got error during encoding: %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("#%d: got encode result: %v, want %v", i, got, test.want) + } + if !reflect.DeepEqual(gotType, test.wantType) { + t.Errorf("#%d: got encode type: %v, want %v", i, gotType, test.wantType) + } + } +} + +// Test decoding Values. +func TestDecodeValue(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + want interface{} + fail bool + }{ + // STRING + {stringProto("abc"), stringType(), "abc", false}, + {nullProto(), stringType(), "abc", true}, + {stringProto("abc"), stringType(), NullString{"abc", true}, false}, + {nullProto(), stringType(), NullString{}, false}, + // STRING ARRAY + { + listProto(stringProto("abc"), nullProto(), stringProto("bcd")), + listType(stringType()), + []NullString{{"abc", true}, {}, {"bcd", true}}, + false, + }, + {nullProto(), listType(stringType()), []NullString(nil), false}, + // BYTES + {bytesProto([]byte("ab")), bytesType(), []byte("ab"), false}, + {nullProto(), bytesType(), []byte(nil), false}, + // BYTES ARRAY + {listProto(bytesProto([]byte("ab")), nullProto()), listType(bytesType()), [][]byte{[]byte("ab"), nil}, false}, + {nullProto(), listType(bytesType()), [][]byte(nil), false}, + //INT64 + {intProto(15), intType(), int64(15), false}, + {nullProto(), intType(), int64(0), true}, + {intProto(15), intType(), NullInt64{15, true}, false}, + {nullProto(), intType(), NullInt64{}, false}, + // INT64 ARRAY + {listProto(intProto(91), nullProto(), intProto(87)), listType(intType()), []NullInt64{{91, true}, {}, {87, true}}, false}, + {nullProto(), listType(intType()), []NullInt64(nil), false}, + // BOOL + {boolProto(true), boolType(), true, false}, + {nullProto(), boolType(), true, true}, + {boolProto(true), boolType(), NullBool{true, true}, false}, + {nullProto(), boolType(), NullBool{}, false}, + // BOOL ARRAY + {listProto(boolProto(true), boolProto(false), nullProto()), listType(boolType()), []NullBool{{true, true}, {false, true}, {}}, false}, + {nullProto(), listType(boolType()), []NullBool(nil), false}, + // FLOAT64 + {floatProto(3.14), floatType(), 3.14, false}, + {nullProto(), floatType(), 0.00, true}, + {floatProto(3.14), floatType(), NullFloat64{3.14, true}, false}, + {nullProto(), floatType(), NullFloat64{}, false}, + // FLOAT64 ARRAY + { + listProto(floatProto(math.Inf(1)), floatProto(math.Inf(-1)), nullProto(), floatProto(3.1)), + listType(floatType()), + []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}, {}, {3.1, true}}, + false, + }, + {nullProto(), listType(floatType()), []NullFloat64(nil), false}, + // TIMESTAMP + {timeProto(t1), timeType(), t1, false}, + {timeProto(t1), timeType(), NullTime{t1, true}, false}, + {nullProto(), timeType(), NullTime{}, false}, + // TIMESTAMP ARRAY + {listProto(timeProto(t1), timeProto(t2), timeProto(t3), nullProto()), listType(timeType()), []NullTime{{t1, true}, {t2, true}, {t3, true}, {}}, false}, + {nullProto(), listType(timeType()), []NullTime(nil), false}, + // DATE + {dateProto(d1), dateType(), d1, false}, + {dateProto(d1), dateType(), NullDate{d1, true}, false}, + {nullProto(), dateType(), NullDate{}, false}, + // DATE ARRAY + {listProto(dateProto(d1), dateProto(d2), nullProto()), listType(dateType()), []NullDate{{d1, true}, {d2, true}, {}}, false}, + {nullProto(), listType(dateType()), []NullDate(nil), false}, + // STRUCT ARRAY + // STRUCT schema is equal to the following Go struct: + // type s struct { + // Col1 NullInt64 + // Col2 []struct { + // SubCol1 float64 + // SubCol2 string + // } + // } + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []NullRow{ + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + }, + }, + Valid: true, + }, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + nullProto(), + nullProto(), + }, + }, + Valid: true, + }, + {}, + }, + fail: false, + }, + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []*struct { + Col1 NullInt64 + StructCol []*struct { + SubCol1 NullFloat64 + SubCol2 string + } `spanner:"Col2"` + }{ + { + Col1: NullInt64{3, true}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }{ + { + SubCol1: NullFloat64{3.14, true}, + SubCol2: "this", + }, + { + SubCol1: NullFloat64{0.57, true}, + SubCol2: "siht", + }, + }, + }, + { + Col1: NullInt64{}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }(nil), + }, + nil, + }, + fail: false, + }, + // GenericColumnValue + {stringProto("abc"), stringType(), GenericColumnValue{stringType(), stringProto("abc")}, false}, + {nullProto(), stringType(), GenericColumnValue{stringType(), nullProto()}, false}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + in: listProto(intProto(5), nullProto(), stringProto("bcd")), + t: listType(intType()), + want: GenericColumnValue{ + Type: listType(intType()), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + fail: false, + }, + } { + t.Logf("(%v) Testing decoding %v(%v)", i, test.in, test.t) + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := decodeValue(test.in, test.t, gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("%d: cannot decode %v(%v): %v", i, test.in, test.t, err) + } + t.Logf("%d: got error message %v", i, err) + continue + } + if test.fail { + t.Errorf("%d: decoding %v(%v) succeeds unexpectedly, want error", i, test.in, test.t) + continue + } + got := reflect.Indirect(gotp).Interface() + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: unexpected decoding result - got %v, want %v", i, got, test.want) + continue + } + } +} + +// Test error cases for decodeValue. +func TestDecodeValueErrors(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + v interface{} + }{ + {nullProto(), stringType(), nil}, + {nullProto(), stringType(), 1}, + } { + err := decodeValue(test.in, test.t, test.v) + if err == nil { + t.Errorf("#%d: want error, got nil", i) + } + } +} + +// Test NaN encoding/decoding. +func TestNaN(t *testing.T) { + // Decode NaN value. + f := 0.0 + nf := NullFloat64{} + // To float64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &f); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(f) { + t.Errorf("f = %v, want %v", f, math.NaN()) + } + // To NullFloat64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &nf); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(nf.Float64) || !nf.Valid { + t.Errorf("f = %v, want %v", f, NullFloat64{math.NaN(), true}) + } + // Encode NaN value + // From float64 + v, _, err := encodeValue(math.NaN()) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok := v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } + // From NullFloat64 + v, _, err = encodeValue(NullFloat64{math.NaN(), true}) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok = v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } +} + +func TestGenericColumnValue(t *testing.T) { + for _, test := range []struct { + in GenericColumnValue + want interface{} + fail bool + }{ + {GenericColumnValue{stringType(), stringProto("abc")}, "abc", false}, + {GenericColumnValue{stringType(), stringProto("abc")}, 5, true}, + {GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}, []NullInt64{{91, true}, {}, {87, true}}, false}, + {GenericColumnValue{intType(), intProto(42)}, GenericColumnValue{intType(), intProto(42)}, false}, // trippy! :-) + } { + // We take a copy and mutate because we're paranoid about immutability. + inCopy := GenericColumnValue{ + Type: proto.Clone(test.in.Type).(*sppb.Type), + Value: proto.Clone(test.in.Value).(*proto3.Value), + } + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := inCopy.Decode(gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("cannot decode %v to %v: %v", test.in, test.want, err) + } + continue + } + if test.fail { + t.Errorf("decoding %v to %v succeeds unexpectedly", test.in, test.want) + } + // mutations to inCopy should be invisible to gotp. + inCopy.Type.Code = sppb.TypeCode_TIMESTAMP + inCopy.Value.Kind = &proto3.Value_NumberValue{NumberValue: 999} + got := reflect.Indirect(gotp).Interface() + if !reflect.DeepEqual(got, test.want) { + t.Errorf("unexpected decode result - got %v, want %v", got, test.want) + } + + // Test we can go backwards as well. + v, err := NewGenericColumnValue(test.want) + if err != nil { + t.Errorf("NewGenericColumnValue failed: %v", err) + continue + } + if !reflect.DeepEqual(*v, test.in) { + t.Errorf("unexpected encode result - got %v, want %v", v, test.in) + } + // If want is a GenericColumnValue, mutate its underlying value to validate + // we have taken a deep copy. + if gcv, ok := test.want.(GenericColumnValue); ok { + gcv.Type.Code = sppb.TypeCode_TIMESTAMP + gcv.Value.Kind = &proto3.Value_NumberValue{NumberValue: 999} + if !reflect.DeepEqual(*v, test.in) { + t.Errorf("expected deep copy - got %v, want %v", v, test.in) + } + } + } +} + +func runBench(b *testing.B, size int, f func(a []int) (*proto3.Value, *sppb.Type, error)) { + a := make([]int, size) + for i := 0; i < b.N; i++ { + f(a) + } +} + +func BenchmarkEncodeIntArrayOrig1(b *testing.B) { + runBench(b, 1, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayOrig10(b *testing.B) { + runBench(b, 10, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayOrig100(b *testing.B) { + runBench(b, 100, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayOrig1000(b *testing.B) { + runBench(b, 1000, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayFunc1(b *testing.B) { + runBench(b, 1, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayFunc10(b *testing.B) { + runBench(b, 10, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayFunc100(b *testing.B) { + runBench(b, 100, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayFunc1000(b *testing.B) { + runBench(b, 1000, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayReflect1(b *testing.B) { + runBench(b, 1, encodeIntArrayReflect) +} + +func BenchmarkEncodeIntArrayReflect10(b *testing.B) { + runBench(b, 10, encodeIntArrayReflect) +} + +func BenchmarkEncodeIntArrayReflect100(b *testing.B) { + runBench(b, 100, encodeIntArrayReflect) +} + +func BenchmarkEncodeIntArrayReflect1000(b *testing.B) { + runBench(b, 1000, encodeIntArrayReflect) +} + +func encodeIntArrayOrig(a []int) (*proto3.Value, *sppb.Type, error) { + vs := make([]*proto3.Value, len(a)) + var err error + for i := range a { + vs[i], _, err = encodeValue(a[i]) + if err != nil { + return nil, nil, err + } + } + return listProto(vs...), listType(intType()), nil +} + +func encodeIntArrayFunc(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArray(len(a), func(i int) interface{} { return a[i] }) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeIntArrayReflect(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArrayReflect(a) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeArrayReflect(a interface{}) (*proto3.Value, error) { + va := reflect.ValueOf(a) + len := va.Len() + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(va.Index(i).Interface()) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go index 26110f176..1bfa6c843 100644 --- a/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,4 +20,14 @@ // Google Cloud Speech API. package speech // import "cloud.google.com/go/speech/apiv1beta1" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go index a1b04cf54..7fe8d492b 100644 --- a/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -131,8 +131,18 @@ func TestSpeechSyncRecognize(t *testing.T) { mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) - var config *speechpb.RecognitionConfig = &speechpb.RecognitionConfig{} - var audio *speechpb.RecognitionAudio = &speechpb.RecognitionAudio{} + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } var request = &speechpb.SyncRecognizeRequest{ Config: config, Audio: audio, @@ -162,8 +172,18 @@ func TestSpeechSyncRecognizeError(t *testing.T) { errCode := codes.Internal mockSpeech.err = grpc.Errorf(errCode, "test error") - var config *speechpb.RecognitionConfig = &speechpb.RecognitionConfig{} - var audio *speechpb.RecognitionAudio = &speechpb.RecognitionAudio{} + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } var request = &speechpb.SyncRecognizeRequest{ Config: config, Audio: audio, @@ -197,8 +217,18 @@ func TestSpeechAsyncRecognize(t *testing.T) { Result: &longrunningpb.Operation_Response{Response: any}, }) - var config *speechpb.RecognitionConfig = &speechpb.RecognitionConfig{} - var audio *speechpb.RecognitionAudio = &speechpb.RecognitionAudio{} + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } var request = &speechpb.AsyncRecognizeRequest{ Config: config, Audio: audio, @@ -242,8 +272,18 @@ func TestSpeechAsyncRecognizeError(t *testing.T) { }, }) - var config *speechpb.RecognitionConfig = &speechpb.RecognitionConfig{} - var audio *speechpb.RecognitionAudio = &speechpb.RecognitionAudio{} + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } var request = &speechpb.AsyncRecognizeRequest{ Config: config, Audio: audio, diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go index 9480359cd..ea9b90a07 100644 --- a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,10 +18,9 @@ package speech import ( "fmt" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" "cloud.google.com/go/longrunning" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" @@ -31,7 +30,6 @@ import ( longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) // CallOptions contains the retry settings for each method of Client. @@ -84,7 +82,7 @@ type Client struct { CallOptions *CallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewClient creates a new speech client. @@ -101,7 +99,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error client: speechpb.NewSpeechClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -119,17 +117,14 @@ func (c *Client) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *Client) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *Client) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // SyncRecognize perform synchronous speech-recognition: receive results after all audio // has been sent and processed. func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest) (*speechpb.SyncRecognizeResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *speechpb.SyncRecognizeResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -147,8 +142,7 @@ func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeR // `Operation.error` or an `Operation.response` which contains // an `AsyncRecognizeResponse` message. func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest) (*AsyncRecognizeResponseOperation, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -166,8 +160,7 @@ func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecogniz // StreamingRecognize perform bidirectional streaming speech-recognition: receive results while // sending audio. This method is only available via the gRPC API (not REST). func (c *Client) StreamingRecognize(ctx context.Context) (speechpb.Speech_StreamingRecognizeClient, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp speechpb.Speech_StreamingRecognizeClient err := gax.Invoke(ctx, func(ctx context.Context) error { var err error diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go index 4585cad2a..6b9db0a0e 100644 --- a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 714d280e2..6c2439425 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -27,6 +27,7 @@ type ACLRole string const ( RoleOwner ACLRole = "OWNER" RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" ) // ACLEntity refers to a user or group. diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index cf6496b9b..951391f54 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -152,5 +152,10 @@ SignedURL for details. // TODO: Handle error. } fmt.Println(url) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index bafb136a5..53fc02923 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -82,7 +82,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error option.WithUserAgent(userAgent), } opts = append(o, opts...) - hc, _, err := transport.NewHTTPClient(ctx, opts...) + hc, ep, err := transport.NewHTTPClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } @@ -90,6 +90,9 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error if err != nil { return nil, fmt.Errorf("storage client: %v", err) } + if ep != "" { + rawService.BasePath = ep + } return &Client{ hc: hc, raw: rawService, @@ -508,27 +511,33 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return nil, err } var res *http.Response - err = runWithRetry(ctx, func() error { res, err = o.c.hc.Do(req); return err }) + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + return nil + }) if err != nil { return nil, err } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { - res.Body.Close() - return nil, errors.New("storage: partial request not satisfied") - } var size int64 // total size of object, even if a range was requested. if res.StatusCode == http.StatusPartialContent { diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index a7a932990..61bd0be63 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -120,7 +120,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { // Close completes the write operation and flushes any buffered data. // If Close doesn't return an error, metadata about the written object -// can be retrieved by calling Object. +// can be retrieved by calling Attrs. func (w *Writer) Close() error { if !w.opened { if err := w.open(); err != nil { diff --git a/vendor/cloud.google.com/go/trace/apiv1/doc.go b/vendor/cloud.google.com/go/trace/apiv1/doc.go index 4eb012b0d..6d956bddb 100644 --- a/vendor/cloud.google.com/go/trace/apiv1/doc.go +++ b/vendor/cloud.google.com/go/trace/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -25,4 +25,14 @@ // Use the client at cloud.google.com/go/trace in preference to this. package trace // import "cloud.google.com/go/trace/apiv1" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/mock_test.go b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go index ed278ab52..e8a1ffecb 100644 --- a/vendor/cloud.google.com/go/trace/apiv1/mock_test.go +++ b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go index 75ef94d7a..057c8faf3 100644 --- a/vendor/cloud.google.com/go/trace/apiv1/trace_client.go +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package trace import ( "fmt" "math" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -31,7 +30,6 @@ import ( cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) // CallOptions contains the retry settings for each method of Client. @@ -86,7 +84,7 @@ type Client struct { CallOptions *CallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewClient creates a new trace service client. @@ -107,7 +105,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error client: cloudtracepb.NewTraceServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -125,10 +123,8 @@ func (c *Client) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *Client) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *Client) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // PatchTraces sends new traces to Stackdriver Trace or updates existing traces. If the ID @@ -137,8 +133,7 @@ func (c *Client) SetGoogleClientInfo(name, version string) { // and any new fields provided are merged with the existing trace data. If the // ID does not match, a new trace is created. func (c *Client) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest) error { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) err := gax.Invoke(ctx, func(ctx context.Context) error { var err error _, err = c.client.PatchTraces(ctx, req) @@ -149,8 +144,7 @@ func (c *Client) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesR // GetTrace gets a single trace by its ID. func (c *Client) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest) (*cloudtracepb.Trace, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *cloudtracepb.Trace err := gax.Invoke(ctx, func(ctx context.Context) error { var err error @@ -165,8 +159,7 @@ func (c *Client) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest // ListTraces returns of a list of traces that match the specified filter conditions. func (c *Client) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest) *TraceIterator { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) it := &TraceIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtracepb.Trace, string, error) { var resp *cloudtracepb.ListTracesResponse diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go index e5c432c88..733c5918c 100644 --- a/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/trace/trace.go b/vendor/cloud.google.com/go/trace/trace.go index bf761f9e6..59892902e 100644 --- a/vendor/cloud.google.com/go/trace/trace.go +++ b/vendor/cloud.google.com/go/trace/trace.go @@ -37,10 +37,16 @@ // ... // } // -// SpanFromRequest returns nil if the *Client is nil, so you can disable +// SpanFromRequest and NewSpan returns nil if the *Client is nil, so you can disable // tracing by not initializing your *Client variable. All of the exported // functions on *Span do nothing when the *Span is nil. // +// If you need to start traces that don't correspond to an incoming HTTP request, +// you can use NewSpan to create a root-level span. +// +// span := traceClient.NewSpan("span name") +// defer span.Finish() +// // Although a trace span object is created for every request, only a subset of // traces are uploaded to the server, for efficiency. By default, the requests // that are traced are those with the tracing bit set in the options field of @@ -69,14 +75,31 @@ // ... // childSpan.Finish() // +// Alternatively, if you have access to the X-Cloud-Trace-Context header value +// but not the underlying HTTP request (this can happen if you are using a +// different transport or messaging protocol, such as gRPC), you can use +// SpanFromHeader instead of SpanFromRequest. In that case, you will need to +// specify the span name explicility, since it cannot be constructed from the +// HTTP request's URL and method. +// +// func handler(r *somepkg.Request) { +// span := traceClient.SpanFromHeader("span name", r.TraceContext()) +// defer span.Finish() +// ... +// } +// // Spans can contain a map from keys to values that have useful information // about the span. The elements of this map are called labels. Some labels, // whose keys all begin with the string "trace.cloud.google.com/", are set // automatically in the following ways: +// // - SpanFromRequest sets some labels to data about the incoming request. +// // - NewRemoteChild sets some labels to data about the outgoing request. +// // - Finish sets a label to a stack trace, if the stack trace option is enabled -// in the incoming trace header. +// in the incoming trace header. +// // - The WithResponse option sets some labels to data about a response. // You can also set labels using SetLabel. If a label is given a value // automatically and by SetLabel, the automatically-set value is used. @@ -90,11 +113,12 @@ // ... // childSpan.Finish(trace.WithResponse(resp)) // -// When a span created by SpanFromRequest is finished, the finished spans in the -// corresponding trace -- the span itself and its descendants -- are uploaded -// to the Stackdriver Trace server using the *Client that created the span. -// Finish returns immediately, and uploading occurs asynchronously. You can use -// the FinishWait function instead to wait until uploading has finished. +// When a span created by SpanFromRequest or SpamFromHeader is finished, the +// finished spans in the corresponding trace -- the span itself and its +// descendants -- are uploaded to the Stackdriver Trace server using the +// *Client that created the span. Finish returns immediately, and uploading +// occurs asynchronously. You can use the FinishWait function instead to wait +// until uploading has finished. // // err := span.FinishWait() // @@ -114,8 +138,8 @@ // FromContext. // // func foo(ctx context.Context) { -// newSpan := trace.FromContext(ctx).NewChild("in foo") -// defer newSpan.Finish() +// span := trace.FromContext(ctx).NewChild("in foo") +// defer span.Finish() // ... // } // @@ -300,7 +324,49 @@ func (c *Client) SetSamplingPolicy(p SamplingPolicy) { } } -// SpanFromRequest returns a new trace span. +// SpanFromHeader returns a new trace span, based on a provided request header +// value. See https://cloud.google.com/trace/docs/faq. +// +// It returns nil iff the client is nil. +// +// The trace information and identifiers will be read from the header value. +// Otherwise, a new trace ID is made and the parent span ID is zero. +// +// The name of the new span is provided as an argument. +// +// If a non-nil sampling policy has been set in the client, it can override +// the options set in the header and choose whether to trace the request. +// +// If the header doesn't have existing tracing information, then a *Span is +// returned anyway, but it will not be uploaded to the server, just as when +// calling SpanFromRequest on an untraced request. +// +// Most users using HTTP should use SpanFromRequest, rather than +// SpanFromHeader, since it provides additional functionality for HTTP +// requests. In particular, it will set various pieces of request information +// as labels on the *Span, which is not available from the header alone. +func (c *Client) SpanFromHeader(name string, header string) *Span { + if c == nil { + return nil + } + traceID, parentSpanID, options, ok := traceInfoFromHeader(header) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: c, + globalOptions: options, + localOptions: options, + } + span := startNewChild(name, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, ok) + return span +} + +// SpanFromRequest returns a new trace span for an HTTP request. // // It returns nil iff the client is nil. // @@ -317,44 +383,69 @@ func (c *Client) SetSamplingPolicy(p SamplingPolicy) { // methods can still be called -- the Finish, FinishWait, and SetLabel methods // do nothing. NewChild does nothing, and returns the same *Span. TraceID // works as usual. -func (client *Client) SpanFromRequest(r *http.Request) *Span { - if client == nil { +func (c *Client) SpanFromRequest(r *http.Request) *Span { + if c == nil { return nil } - traceID, parentSpanID, options, hasTraceHeader := traceInfoFromRequest(r) - if !hasTraceHeader { + traceID, parentSpanID, options, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) + if !ok { traceID = nextTraceID() } t := &trace{ traceID: traceID, - client: client, + client: c, globalOptions: options, localOptions: options, } span := startNewChildWithRequest(r, t, parentSpanID) span.span.Kind = spanKindServer span.rootSpan = true - if client.policy != nil { - d := client.policy.Sample(Parameters{HasTraceHeader: hasTraceHeader}) - if d.Trace { - // Turn on tracing locally, and in child requests. - span.trace.localOptions |= optionTrace - span.trace.globalOptions |= optionTrace - } else { - // Turn off tracing locally. - span.trace.localOptions = 0 - return span - } - if d.Sample { - // This trace is in the random sample, so set the labels. - span.SetLabel(labelSamplingPolicy, d.Policy) - span.SetLabel(labelSamplingWeight, fmt.Sprint(d.Weight)) - } - } + configureSpanFromPolicy(span, c.policy, ok) + return span +} +// NewSpan returns a new trace span with the given name. +// +// A new trace and span ID is generated to trace the span. +// Returned span need to be finished by calling Finish or FinishWait. +func (c *Client) NewSpan(name string) *Span { + if c == nil { + return nil + } + t := &trace{ + traceID: nextTraceID(), + client: c, + localOptions: optionTrace, + globalOptions: optionTrace, + } + span := startNewChild(name, t, 0) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, false) return span } +func configureSpanFromPolicy(s *Span, p SamplingPolicy, ok bool) { + if p == nil { + return + } + d := p.Sample(Parameters{HasTraceHeader: ok}) + if d.Trace { + // Turn on tracing locally, and in child requests. + s.trace.localOptions |= optionTrace + s.trace.globalOptions |= optionTrace + } else { + // Turn off tracing locally. + s.trace.localOptions = 0 + return + } + if d.Sample { + // This trace is in the random sample, so set the labels. + s.SetLabel(labelSamplingPolicy, d.Policy) + s.SetLabel(labelSamplingWeight, fmt.Sprint(d.Weight)) + } +} + // NewContext returns a derived context containing the span. func NewContext(ctx context.Context, s *Span) context.Context { if s == nil { @@ -369,9 +460,8 @@ func FromContext(ctx context.Context) *Span { return s } -func traceInfoFromRequest(r *http.Request) (string, uint64, optionFlags, bool) { +func traceInfoFromHeader(h string) (string, uint64, optionFlags, bool) { // See https://cloud.google.com/trace/docs/faq for the header format. - h := r.Header.Get(httpHeader) // Return if the header is empty or missing, or if the header is unreasonably // large, to avoid making unnecessary copies of a large string. if h == "" || len(h) > 200 { @@ -540,8 +630,9 @@ func (s *Span) NewRemoteChild(r *http.Request) *Span { return newSpan } -func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanId uint64) *Span { - newSpan := startNewChild(r.URL.Path, trace, parentSpanId) +func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanID uint64) *Span { + name := r.URL.Host + r.URL.Path // drop scheme and query params + newSpan := startNewChild(name, trace, parentSpanID) if r.Host == "" { newSpan.host = r.URL.Host } else { @@ -552,9 +643,9 @@ func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanId uint64 return newSpan } -func startNewChild(name string, trace *trace, parentSpanId uint64) *Span { +func startNewChild(name string, trace *trace, parentSpanID uint64) *Span { spanID := nextSpanID() - for spanID == parentSpanId { + for spanID == parentSpanID { spanID = nextSpanID() } newSpan := &Span{ @@ -562,7 +653,7 @@ func startNewChild(name string, trace *trace, parentSpanId uint64) *Span { span: api.TraceSpan{ Kind: spanKindClient, Name: name, - ParentSpanId: parentSpanId, + ParentSpanId: parentSpanID, SpanId: spanID, }, start: time.Now(), diff --git a/vendor/cloud.google.com/go/trace/trace_test.go b/vendor/cloud.google.com/go/trace/trace_test.go index da9363e94..556947d03 100644 --- a/vendor/cloud.google.com/go/trace/trace_test.go +++ b/vendor/cloud.google.com/go/trace/trace_test.go @@ -74,20 +74,18 @@ type fakeDatastoreServer struct { func (f *fakeDatastoreServer) Lookup(ctx context.Context, req *dspb.LookupRequest) (*dspb.LookupResponse, error) { if f.fail { - return nil, errors.New("failed!") + return nil, errors.New("lookup failed") } return &dspb.LookupResponse{}, nil } // makeRequests makes some requests. -// req is an incoming request used to construct the trace. traceClient is the -// client used to upload the trace. rt is the trace client's http client's -// transport. This is used to retrieve the trace uploaded by the client, if -// any. If expectTrace is true, we expect a trace will be uploaded. If -// synchronous is true, the call to Finish is expected not to return before the -// client has uploaded any traces. -func makeRequests(t *testing.T, req *http.Request, traceClient *Client, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { - span := traceClient.SpanFromRequest(req) +// span is the root span. rt is the trace client's http client's transport. +// This is used to retrieve the trace uploaded by the client, if any. If +// expectTrace is true, we expect a trace will be uploaded. If synchronous is +// true, the call to Finish is expected not to return before the client has +// uploaded any traces. +func makeRequests(t *testing.T, span *Span, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { ctx := NewContext(context.Background(), span) // An HTTP request. @@ -196,25 +194,195 @@ func makeRequests(t *testing.T, req *http.Request, traceClient *Client, rt *fake func TestTrace(t *testing.T) { t.Parallel() - testTrace(t, false) + testTrace(t, false, true) } func TestTraceWithWait(t *testing.T) { - testTrace(t, true) + testTrace(t, true, true) } -func testTrace(t *testing.T, synchronous bool) { - req, err := http.NewRequest("GET", "http://example.com/foo", nil) +func TestTraceFromHeader(t *testing.T) { + t.Parallel() + testTrace(t, false, false) +} + +func TestTraceFromHeaderWithWait(t *testing.T) { + testTrace(t, false, true) +} + +func TestNewSpan(t *testing.T) { + const traceID = "0123456789ABCDEF0123456789ABCDEF" + + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + span := traceClient.NewSpan("/foo") + span.trace.traceID = traceID + + uploaded := makeRequests(t, span, rt, true, true) + + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "example.com/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "www.googleapis.com/storage/v1/b/testbucket/o", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: nil, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + { + Kind: "RPC_SERVER", + Labels: map[string]string{}, + Name: "/foo", + }, + }, + TraceId: traceID, + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) if err != nil { t.Fatal(err) } - req.Header["X-Cloud-Trace-Context"] = []string{`0123456789ABCDEF0123456789ABCDEF/42;o=3`} + if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Fatalf("PatchTraces request: got %s want %s", got, want) + } + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 0 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 0) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !reflect.DeepEqual(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s want %s", got, want) + } +} + +func testTrace(t *testing.T, synchronous bool, fromRequest bool) { + const header = `0123456789ABCDEF0123456789ABCDEF/42;o=3` rt := newFakeRoundTripper() traceClient := newTestClient(rt) - uploaded := makeRequests(t, req, traceClient, rt, synchronous, true) + span := traceClient.SpanFromHeader("/foo", header) + headerOrReqLabels := map[string]string{} + headerOrReqName := "/foo" + + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("X-Cloud-Trace-Context", header) + span = traceClient.SpanFromRequest(req) + headerOrReqLabels = map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/url": "http://example.com/foo", + } + headerOrReqName = "example.com/foo" + } + uploaded := makeRequests(t, span, rt, synchronous, true) if uploaded == nil { t.Fatalf("No trace uploaded, expected one.") } @@ -232,7 +400,7 @@ func testTrace(t *testing.T, synchronous bool) { "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "http://example.com/bar", }, - Name: "/bar", + Name: "example.com/bar", }, { Kind: "RPC_CLIENT", @@ -242,7 +410,7 @@ func testTrace(t *testing.T, synchronous bool) { "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", }, - Name: "/compute/v1/projects/testproject/zones", + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", }, { Kind: "RPC_CLIENT", @@ -252,7 +420,7 @@ func testTrace(t *testing.T, synchronous bool) { "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", }, - Name: "/storage/v1/b/testbucket/o", + Name: "www.googleapis.com/storage/v1/b/testbucket/o", }, &api.TraceSpan{ Kind: "RPC_CLIENT", @@ -261,17 +429,13 @@ func testTrace(t *testing.T, synchronous bool) { }, &api.TraceSpan{ Kind: "RPC_CLIENT", - Labels: map[string]string{"error": "rpc error: code = 2 desc = failed!"}, + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, Name: "/google.datastore.v1.Datastore/Lookup", }, { - Kind: "RPC_SERVER", - Labels: map[string]string{ - "trace.cloud.google.com/http/host": "example.com", - "trace.cloud.google.com/http/method": "GET", - "trace.cloud.google.com/http/url": "http://example.com/foo", - }, - Name: "/foo", + Kind: "RPC_SERVER", + Labels: headerOrReqLabels, + Name: headerOrReqName, }, }, TraceId: "0123456789ABCDEF0123456789ABCDEF", @@ -353,19 +517,27 @@ func testTrace(t *testing.T, synchronous bool) { if !reflect.DeepEqual(patch, expected) { got, _ := json.Marshal(patch) want, _ := json.Marshal(expected) - t.Errorf("PatchTraces request: got %s want %s", got, want) + t.Errorf("PatchTraces request: got %s \n\n want %s", got, want) } } func TestNoTrace(t *testing.T) { - testNoTrace(t, false) + testNoTrace(t, false, true) } func TestNoTraceWithWait(t *testing.T) { - testNoTrace(t, true) + testNoTrace(t, true, true) +} + +func TestNoTraceFromHeader(t *testing.T) { + testNoTrace(t, false, false) } -func testNoTrace(t *testing.T, synchronous bool) { +func TestNoTraceFromHeaderWithWait(t *testing.T) { + testNoTrace(t, true, false) +} + +func testNoTrace(t *testing.T, synchronous bool, fromRequest bool) { for _, header := range []string{ `0123456789ABCDEF0123456789ABCDEF/42;o=2`, `0123456789ABCDEF0123456789ABCDEF/42;o=0`, @@ -373,16 +545,22 @@ func testNoTrace(t *testing.T, synchronous bool) { `0123456789ABCDEF0123456789ABCDEF`, ``, } { - req, err := http.NewRequest("GET", "http://example.com/foo", nil) - if header != "" { - req.Header["X-Cloud-Trace-Context"] = []string{header} - } - if err != nil { - t.Fatal(err) - } rt := newFakeRoundTripper() traceClient := newTestClient(rt) - uploaded := makeRequests(t, req, traceClient, rt, synchronous, false) + var span *Span + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if header != "" { + req.Header.Set("X-Cloud-Trace-Context", header) + } + if err != nil { + t.Fatal(err) + } + span = traceClient.SpanFromRequest(req) + } else { + span = traceClient.SpanFromHeader("/foo", header) + } + uploaded := makeRequests(t, span, rt, synchronous, false) if uploaded != nil { t.Errorf("Got a trace, expected none.") } @@ -609,7 +787,7 @@ func TestPropagation(t *testing.T) { t.Fatal(err) } if header != "" { - req.Header["X-Cloud-Trace-Context"] = []string{header} + req.Header.Set("X-Cloud-Trace-Context", header) } span := traceClient.SpanFromRequest(req) diff --git a/vendor/cloud.google.com/go/vision/annotations.go b/vendor/cloud.google.com/go/vision/annotations.go index 5de7de05b..716496cc4 100644 --- a/vendor/cloud.google.com/go/vision/annotations.go +++ b/vendor/cloud.google.com/go/vision/annotations.go @@ -17,6 +17,7 @@ package vision import ( "image" + "golang.org/x/text/language" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -36,10 +37,16 @@ type Annotations struct { Labels []*EntityAnnotation // Texts holds the results of text detection. Texts []*EntityAnnotation + // FullTexts holds the results of full text (OCR) detection. + FullText *TextAnnotation // SafeSearch holds the results of safe-search detection. SafeSearch *SafeSearchAnnotation // ImageProps contains properties of the annotated image. ImageProps *ImageProps + // Web contains web annotations for the image. + Web *WebDetection + // CropHints contains crop hints for the image. + CropHints []*CropHint // If non-nil, then one or more of the attempted annotations failed. // Non-nil annotations are guaranteed to be correct, even if Error is @@ -64,8 +71,11 @@ func annotationsFromProto(res *pb.AnnotateImageResponse) *Annotations { for _, a := range res.TextAnnotations { as.Texts = append(as.Texts, entityAnnotationFromProto(a)) } + as.FullText = textAnnotationFromProto(res.FullTextAnnotation) as.SafeSearch = safeSearchAnnotationFromProto(res.SafeSearchAnnotation) as.ImageProps = imagePropertiesFromProto(res.ImagePropertiesAnnotation) + as.Web = webDetectionFromProto(res.WebDetection) + as.CropHints = cropHintsFromProto(res.CropHintsAnnotation) if res.Error != nil { // res.Error is a google.rpc.Status. Convert to a Go error. Use a gRPC // error because it preserves the code as a separate field. @@ -213,6 +223,298 @@ func entityAnnotationFromProto(e *pb.EntityAnnotation) *EntityAnnotation { } } +// TextAnnotation contains a structured representation of OCR extracted text. +// The hierarchy of an OCR extracted text structure looks like: +// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol +// Each structural component, starting from Page, may further have its own +// properties. Properties describe detected languages, breaks etc. +type TextAnnotation struct { + // List of pages detected by OCR. + Pages []*Page + // UTF-8 text detected on the pages. + Text string +} + +func textAnnotationFromProto(pta *pb.TextAnnotation) *TextAnnotation { + if pta == nil { + return nil + } + var pages []*Page + for _, p := range pta.Pages { + pages = append(pages, pageFromProto(p)) + } + return &TextAnnotation{ + Pages: pages, + Text: pta.Text, + } +} + +// A Page is a page of text detected from OCR. +type Page struct { + // Additional information detected on the page. + Properties *TextProperties + // Page width in pixels. + Width int32 + // Page height in pixels. + Height int32 + // List of blocks of text, images etc on this page. + Blocks []*Block +} + +func pageFromProto(p *pb.Page) *Page { + if p == nil { + return nil + } + var blocks []*Block + for _, b := range p.Blocks { + blocks = append(blocks, blockFromProto(b)) + } + return &Page{ + Properties: textPropertiesFromProto(p.Property), + Width: p.Width, + Height: p.Height, + Blocks: blocks, + } +} + +// A Block is a logical element on the page. +type Block struct { + // Additional information detected for the block. + Properties *TextProperties + // The bounding box for the block. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // List of paragraphs in this block (if this blocks is of type text). + Paragraphs []*Paragraph + // Detected block type (text, image etc) for this block. + BlockType BlockType +} + +// A BlockType represents the kind of Block (text, image, etc.) +type BlockType int + +const ( + // Unknown block type. + UnknownBlock BlockType = BlockType(pb.Block_UNKNOWN) + // Regular text block. + TextBlock BlockType = BlockType(pb.Block_TEXT) + // Table block. + TableBlock BlockType = BlockType(pb.Block_TABLE) + // Image block. + PictureBlock BlockType = BlockType(pb.Block_PICTURE) + // Horizontal/vertical line box. + RulerBlock BlockType = BlockType(pb.Block_RULER) + // Barcode block. + BarcodeBlock BlockType = BlockType(pb.Block_BARCODE) +) + +func blockFromProto(p *pb.Block) *Block { + if p == nil { + return nil + } + var paras []*Paragraph + for _, pa := range p.Paragraphs { + paras = append(paras, paragraphFromProto(pa)) + } + return &Block{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Paragraphs: paras, + BlockType: BlockType(p.BlockType), + } +} + +// A Paragraph is a structural unit of text representing a number of words in +// certain order. +type Paragraph struct { + // Additional information detected for the paragraph. + Properties *TextProperties + // The bounding box for the paragraph. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // List of words in this paragraph. + Words []*Word +} + +func paragraphFromProto(p *pb.Paragraph) *Paragraph { + if p == nil { + return nil + } + var words []*Word + for _, w := range p.Words { + words = append(words, wordFromProto(w)) + } + return &Paragraph{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Words: words, + } +} + +// A Word is a word in a text document. +type Word struct { + // Additional information detected for the word. + Properties *TextProperties + // The bounding box for the word. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // List of symbols in the word. + // The order of the symbols follows the natural reading order. + Symbols []*Symbol +} + +func wordFromProto(p *pb.Word) *Word { + if p == nil { + return nil + } + var syms []*Symbol + for _, s := range p.Symbols { + syms = append(syms, symbolFromProto(s)) + } + return &Word{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Symbols: syms, + } +} + +// A Symbol is a symbol in a text document. +type Symbol struct { + // Additional information detected for the symbol. + Properties *TextProperties + // The bounding box for the symbol. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // The actual UTF-8 representation of the symbol. + Text string +} + +func symbolFromProto(p *pb.Symbol) *Symbol { + if p == nil { + return nil + } + return &Symbol{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Text: p.Text, + } +} + +// TextProperties contains additional information about an OCR structural component. +type TextProperties struct { + // A list of detected languages together with confidence. + DetectedLanguages []*DetectedLanguage + // Detected start or end of a text segment. + DetectedBreak *DetectedBreak +} + +// Detected language for a structural component. +type DetectedLanguage struct { + // The BCP-47 language code, such as "en-US" or "sr-Latn". + Code language.Tag + // The confidence of the detected language, in the range [0, 1]. + Confidence float32 +} + +// DetectedBreak is the detected start or end of a structural component. +type DetectedBreak struct { + // The type of break. + Type DetectedBreakType + // True if break prepends the element. + IsPrefix bool +} + +type DetectedBreakType int + +const ( + // Unknown break label type. + UnknownBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_UNKNOWN) + // Regular space. + SpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_SPACE) + // Sure space (very wide). + SureSpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_SURE_SPACE) + // Line-wrapping break. + EOLSureSpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_EOL_SURE_SPACE) + // End-line hyphen that is not present in text; does not co-occur with SPACE, LEADER_SPACE, or LINE_BREAK. + HyphenBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_HYPHEN) + // Line break that ends a paragraph. + LineBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_LINE_BREAK) +) + +func textPropertiesFromProto(p *pb.TextAnnotation_TextProperty) *TextProperties { + var dls []*DetectedLanguage + for _, dl := range p.DetectedLanguages { + tag, _ := language.Parse(dl.LanguageCode) + // Ignore error. If err != nil the returned tag will not be garbage, + // but a best-effort attempt at a parse. At worst it will be + // language.Und, the documented "undefined" Tag. + dls = append(dls, &DetectedLanguage{Code: tag, Confidence: dl.Confidence}) + } + var db *DetectedBreak + if p.DetectedBreak != nil { + db = &DetectedBreak{ + Type: DetectedBreakType(p.DetectedBreak.Type), + IsPrefix: p.DetectedBreak.IsPrefix, + } + } + return &TextProperties{ + DetectedLanguages: dls, + DetectedBreak: db, + } +} + // SafeSearchAnnotation describes the results of a SafeSearch detection on an image. type SafeSearchAnnotation struct { // Adult is the likelihood that the image contains adult content. @@ -257,3 +559,131 @@ func imagePropertiesFromProto(ip *pb.ImageProperties) *ImageProps { } return &ImageProps{DominantColors: cinfos} } + +// WebDetection contains relevant information for the image from the Internet. +type WebDetection struct { + // Deduced entities from similar images on the Internet. + WebEntities []*WebEntity + // Fully matching images from the Internet. + // They're definite neardups and most often a copy of the query image with + // merely a size change. + FullMatchingImages []*WebImage + // Partial matching images from the Internet. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its crops. + PartialMatchingImages []*WebImage + // Web pages containing the matching images from the Internet. + PagesWithMatchingImages []*WebPage +} + +func webDetectionFromProto(p *pb.WebDetection) *WebDetection { + if p == nil { + return nil + } + var ( + wes []*WebEntity + fmis, pmis []*WebImage + wps []*WebPage + ) + for _, e := range p.WebEntities { + wes = append(wes, webEntityFromProto(e)) + } + for _, m := range p.FullMatchingImages { + fmis = append(fmis, webImageFromProto(m)) + } + for _, m := range p.PartialMatchingImages { + pmis = append(fmis, webImageFromProto(m)) + } + for _, g := range p.PagesWithMatchingImages { + wps = append(wps, webPageFromProto(g)) + } + return &WebDetection{ + WebEntities: wes, + FullMatchingImages: fmis, + PartialMatchingImages: pmis, + PagesWithMatchingImages: wps, + } +} + +// A WebEntity is an entity deduced from similar images on the Internet. +type WebEntity struct { + // Opaque entity ID. + ID string + // Overall relevancy score for the entity. + // Not normalized and not comparable across different image queries. + Score float32 + // Canonical description of the entity, in English. + Description string +} + +func webEntityFromProto(p *pb.WebDetection_WebEntity) *WebEntity { + return &WebEntity{ + ID: p.EntityId, + Score: p.Score, + Description: p.Description, + } +} + +// WebImage contains metadata for online images. +type WebImage struct { + // The result image URL. + URL string + // Overall relevancy score for the image. + // Not normalized and not comparable across different image queries. + Score float32 +} + +func webImageFromProto(p *pb.WebDetection_WebImage) *WebImage { + return &WebImage{ + URL: p.Url, + Score: p.Score, + } +} + +// A WebPage contains metadata for web pages. +type WebPage struct { + // The result web page URL. + URL string + // Overall relevancy score for the web page. + // Not normalized and not comparable across different image queries. + Score float32 +} + +func webPageFromProto(p *pb.WebDetection_WebPage) *WebPage { + return &WebPage{ + URL: p.Url, + Score: p.Score, + } +} + +// CropHint is a single crop hint that is used to generate a new crop when +// serving an image. +type CropHint struct { + // The bounding polygon for the crop region. The coordinates of the bounding + // box are in the original image's scale, as returned in `ImageParams`. + BoundingPoly []image.Point + // Confidence of this being a salient region. Range [0, 1]. + Confidence float32 + // Fraction of importance of this salient region with respect to the original + // image. + ImportanceFraction float32 +} + +func cropHintsFromProto(p *pb.CropHintsAnnotation) []*CropHint { + if p == nil { + return nil + } + var chs []*CropHint + for _, pch := range p.CropHints { + chs = append(chs, cropHintFromProto(pch)) + } + return chs +} + +func cropHintFromProto(pch *pb.CropHint) *CropHint { + return &CropHint{ + BoundingPoly: boundingPolyFromProto(pch.BoundingPoly), + Confidence: pch.Confidence, + ImportanceFraction: pch.ImportanceFraction, + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/doc.go b/vendor/cloud.google.com/go/vision/apiv1/doc.go index 8ef4387d4..a1dfbd75e 100644 --- a/vendor/cloud.google.com/go/vision/apiv1/doc.go +++ b/vendor/cloud.google.com/go/vision/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,4 +24,14 @@ // Use the client at cloud.google.com/go/vision in preference to this. package vision // import "cloud.google.com/go/vision/apiv1" -const gapicNameVersion = "gapic/0.1.0" +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go index b4548cabd..950e7a00f 100644 --- a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,10 +18,9 @@ package vision import ( "fmt" - "runtime" - "strings" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" @@ -29,7 +28,6 @@ import ( visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) // ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient. @@ -78,14 +76,14 @@ type ImageAnnotatorClient struct { CallOptions *ImageAnnotatorCallOptions // The metadata to be sent with each request. - metadata metadata.MD + xGoogHeader string } // NewImageAnnotatorClient creates a new image annotator client. // -// Service that performs Google Cloud Vision API detection tasks, such as face, -// landmark, logo, label, and text detection, over client images, and returns -// detected entities from the images. +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) (*ImageAnnotatorClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultImageAnnotatorClientOptions(), opts...)...) if err != nil { @@ -97,7 +95,7 @@ func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) ( imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo("gapic", version.Repo) return c, nil } @@ -115,16 +113,13 @@ func (c *ImageAnnotatorClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *ImageAnnotatorClient) SetGoogleClientInfo(name, version string) { - goVersion := strings.Replace(runtime.Version(), " ", "_", -1) - v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) - c.metadata = metadata.Pairs("x-goog-api-client", v) +func (c *ImageAnnotatorClient) SetGoogleClientInfo(clientName, clientVersion string) { + c.xGoogHeader = fmt.Sprintf("gl-go/%s %s/%s gax/%s grpc/", version.Go(), clientName, clientVersion, gax.Version) } // BatchAnnotateImages run image detection and annotation for a batch of images. func (c *ImageAnnotatorClient) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { - md, _ := metadata.FromContext(ctx) - ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + ctx = insertXGoog(ctx, c.xGoogHeader) var resp *visionpb.BatchAnnotateImagesResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go index f32c6883c..3702eb314 100644 --- a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/vision/apiv1/mock_test.go b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go index 90c0a65f4..0cc6a61ef 100644 --- a/vendor/cloud.google.com/go/vision/apiv1/mock_test.go +++ b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go @@ -1,4 +1,4 @@ -// Copyright 2016, Google Inc. All rights reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/vision/doc.go b/vendor/cloud.google.com/go/vision/doc.go index 1ce5f1984..8f18e3e1a 100644 --- a/vendor/cloud.google.com/go/vision/doc.go +++ b/vendor/cloud.google.com/go/vision/doc.go @@ -43,9 +43,9 @@ Use NewImageFromReader to obtain an image from any io.Reader, such as an open fi img, err := vision.NewImageFromReader(f) if err != nil { ... } -Use NewImageFromGCS to refer to an image in Google Cloud Storage: +Use NewImageFromURI to refer to an image in Google Cloud Storage: - img := vision.NewImageFromGCS("gs://my-bucket/my-image.png") + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") Annotating Images @@ -92,5 +92,10 @@ provides easy access to the positions of facial features: fmt.Println(faces[0].Face.Eyes.Left.Pupil) This package is experimental and subject to API changes. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. */ package vision // import "cloud.google.com/go/vision" diff --git a/vendor/cloud.google.com/go/vision/examples_test.go b/vendor/cloud.google.com/go/vision/examples_test.go index cfdcb4f55..a63d6368f 100644 --- a/vendor/cloud.google.com/go/vision/examples_test.go +++ b/vendor/cloud.google.com/go/vision/examples_test.go @@ -48,8 +48,8 @@ func Example_NewImageFromReader() { fmt.Println(img) } -func Example_NewImageFromGCS() { - img := vision.NewImageFromGCS("gs://my-bucket/my-image.png") +func Example_NewImageFromURI() { + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") fmt.Println(img) } @@ -60,7 +60,7 @@ func ExampleClient_Annotate_oneImage() { // TODO: handle error. } annsSlice, err := client.Annotate(ctx, &vision.AnnotateRequest{ - Image: vision.NewImageFromGCS("gs://my-bucket/my-image.png"), + Image: vision.NewImageFromURI("gs://my-bucket/my-image.png"), MaxLogos: 100, MaxTexts: 100, SafeSearch: true, @@ -89,7 +89,7 @@ func ExampleClient_DetectFaces() { if err != nil { // TODO: handle error. } - img := vision.NewImageFromGCS("gs://my-bucket/my-image.png") + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") faces, err := client.DetectFaces(ctx, img, 10) if err != nil { // TODO: handle error. diff --git a/vendor/cloud.google.com/go/vision/image.go b/vendor/cloud.google.com/go/vision/image.go index 3bcb2362e..03bf579fc 100644 --- a/vendor/cloud.google.com/go/vision/image.go +++ b/vendor/cloud.google.com/go/vision/image.go @@ -27,7 +27,7 @@ import ( type Image struct { // Exactly one of content and gcsURI will be non-zero. content []byte // raw image bytes - gcsURI string // URI of the form "gs://BUCKET/OBJECT" + uri string // URI of the form "gs://BUCKET/OBJECT", or public URL // Rect is a rectangle on the Earth's surface represented by the // image. It is optional. @@ -60,13 +60,13 @@ func NewImageFromReader(r io.ReadCloser) (*Image, error) { return &Image{content: bytes}, nil } -// NewImageFromGCS returns an image that refers to an object in Google Cloud Storage. -// gcsPath must be a valid Google Cloud Storage URI of the form "gs://BUCKET/OBJECT". +// NewImageFromURI returns an image that refers to an object in Google Cloud Storage +// (when the uri is of the form "gs://BUCKET/OBJECT") or at a public URL. // // You may optionally set Rect and LanguageHints on the returned Image before // using it. -func NewImageFromGCS(gcsURI string) *Image { - return &Image{gcsURI: gcsURI} +func NewImageFromURI(uri string) *Image { + return &Image{uri: uri} } // toProtos converts the Image to the two underlying API protos it represents, @@ -76,8 +76,8 @@ func (img *Image) toProtos() (*pb.Image, *pb.ImageContext) { switch { case img.content != nil: pimg = &pb.Image{Content: img.content} - case img.gcsURI != "": - pimg = &pb.Image{Source: &pb.ImageSource{GcsImageUri: img.gcsURI}} + case img.uri != "": + pimg = &pb.Image{Source: &pb.ImageSource{ImageUri: img.uri}} } var pctx *pb.ImageContext @@ -87,6 +87,5 @@ func (img *Image) toProtos() (*pb.Image, *pb.ImageContext) { LanguageHints: img.LanguageHints, } } - return pimg, pctx } diff --git a/vendor/cloud.google.com/go/vision/image_test.go b/vendor/cloud.google.com/go/vision/image_test.go new file mode 100644 index 000000000..0aa554d12 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/image_test.go @@ -0,0 +1,41 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "reflect" + "testing" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func TestImageToProtos(t *testing.T) { + const url = "https://www.example.com/test.jpg" + langHints := []string{"en", "fr"} + img := NewImageFromURI("https://www.example.com/test.jpg") + img.LanguageHints = langHints + + goti, gotc := img.toProtos() + wanti := &pb.Image{Source: &pb.ImageSource{ImageUri: url}} + if !reflect.DeepEqual(goti, wanti) { + t.Errorf("got %+v, want %+v", goti, wanti) + } + wantc := &pb.ImageContext{ + LanguageHints: langHints, + } + if !reflect.DeepEqual(gotc, wantc) { + t.Errorf("got %+v, want %+v", gotc, wantc) + } +} diff --git a/vendor/cloud.google.com/go/vision/vision.go b/vendor/cloud.google.com/go/vision/vision.go index b21431a8c..925d012a6 100644 --- a/vendor/cloud.google.com/go/vision/vision.go +++ b/vendor/cloud.google.com/go/vision/vision.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +// TODO(jba): test crop hints, text annotation, web annotation +// TODO(jba): expose DOCUMENT_TEXT annotation + package vision import ( @@ -89,6 +92,10 @@ type AnnotateRequest struct { SafeSearch bool // ImageProps specifies whether image properties should be obtained for the image. ImageProps bool + // Web specifies whether web annotations should be obtained for the image. + Web bool + // CropHints specifies whether crop hints should be computed for the image. + CropHints *CropHintsParams } func (ar *AnnotateRequest) toProto() *pb.AnnotateImageRequest { @@ -124,6 +131,18 @@ func (ar *AnnotateRequest) toProto() *pb.AnnotateImageRequest { if ar.ImageProps { add(pb.Feature_IMAGE_PROPERTIES, 0) } + if ar.Web { + add(pb.Feature_WEB_DETECTION, 0) + } + if ar.CropHints != nil { + add(pb.Feature_CROP_HINTS, 0) + if ictx == nil { + ictx = &pb.ImageContext{} + } + ictx.CropHintsParams = &pb.CropHintsParams{ + AspectRatios: ar.CropHints.AspectRatios, + } + } return &pb.AnnotateImageRequest{ Image: img, Features: features, @@ -131,6 +150,17 @@ func (ar *AnnotateRequest) toProto() *pb.AnnotateImageRequest { } } +// CropHintsParams are parameters for a request for crop hints. +type CropHintsParams struct { + // Aspect ratios for desired crop hints, representing the ratio of the + // width to the height of the image. For example, if the desired aspect + // ratio is 4:3, the corresponding float value should be 1.33333. If not + // specified, the best possible crop is returned. The number of provided + // aspect ratios is limited to a maximum of 16; any aspect ratios provided + // after the 16th are ignored. + AspectRatios []float32 +} + // Called for a single image and a single feature. func (c *Client) annotateOne(ctx context.Context, req *AnnotateRequest) (*Annotations, error) { annsSlice, err := c.Annotate(ctx, req) @@ -216,6 +246,24 @@ func (c *Client) DetectImageProps(ctx context.Context, img *Image) (*ImageProps, return anns.ImageProps, nil } +// DetectWeb computes a web annotation on the image. +func (c *Client) DetectWeb(ctx context.Context, img *Image) (*WebDetection, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, Web: true}) + if err != nil { + return nil, err + } + return anns.Web, nil +} + +// CropHints computes crop hints for the image. +func (c *Client) CropHints(ctx context.Context, img *Image, params *CropHintsParams) ([]*CropHint, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, CropHints: params}) + if err != nil { + return nil, err + } + return anns.CropHints, nil +} + // A Likelihood is an approximate representation of a probability. type Likelihood int diff --git a/vendor/cloud.google.com/go/vision/vision_test.go b/vendor/cloud.google.com/go/vision/vision_test.go index f53134a1c..ac4786de1 100644 --- a/vendor/cloud.google.com/go/vision/vision_test.go +++ b/vendor/cloud.google.com/go/vision/vision_test.go @@ -20,6 +20,7 @@ import ( "testing" "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" "google.golang.org/api/option" ) @@ -33,7 +34,7 @@ func TestAnnotate(t *testing.T) { path string // path to image file, relative to testdata // If one of these is true, we expect that annotation to be non-nil. faces, landmarks, logos, labels, texts bool - // We always expect safe search and image properties to be present. + // We always expect safe search, image properties, web and crop hints to be present. }{ {path: "face.jpg", faces: true, labels: true}, {path: "cat.jpg", labels: true}, @@ -51,14 +52,20 @@ func TestAnnotate(t *testing.T) { MaxLogos: 1, MaxLabels: 1, MaxTexts: 1, + Web: true, SafeSearch: true, ImageProps: true, + CropHints: &CropHintsParams{}, }) if err != nil { t.Fatalf("annotating %s: %v", test.path, err) } anns := annsSlice[0] p := map[bool]string{true: "present", false: "absent"} + if anns.Error != nil { + t.Errorf("%s: got Error %v; want nil", test.path, anns.Error) + continue + } if got, want := (anns.Faces != nil), test.faces; got != want { t.Errorf("%s: faces %s, want %s", test.path, p[got], p[want]) } @@ -80,8 +87,11 @@ func TestAnnotate(t *testing.T) { if got, want := (anns.ImageProps != nil), true; got != want { t.Errorf("%s: image properties %s, want %s", test.path, p[got], p[want]) } - if anns.Error != nil { - t.Errorf("%s: got Error %v; want nil", test.path, anns.Error) + if got, want := (anns.Web != nil), true; got != want { + t.Errorf("%s: web %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.CropHints != nil), true; got != want { + t.Errorf("%s: crop hints %s, want %s", test.path, p[got], p[want]) } } } diff --git a/vendor/code.cloudfoundry.org/lager/LICENSE b/vendor/code.cloudfoundry.org/lager/LICENSE index 5c304d1a4..f49a4e16e 100644 --- a/vendor/code.cloudfoundry.org/lager/LICENSE +++ b/vendor/code.cloudfoundry.org/lager/LICENSE @@ -1,4 +1,4 @@ -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +178,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +186,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -198,4 +198,4 @@ Apache License distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/lager/NOTICE b/vendor/code.cloudfoundry.org/lager/NOTICE index ff96b880b..3c8dd5b60 100644 --- a/vendor/code.cloudfoundry.org/lager/NOTICE +++ b/vendor/code.cloudfoundry.org/lager/NOTICE @@ -1,6 +1,6 @@ -lager +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. -Copyright (c) 2014-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,3 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/lager/chug/package.go b/vendor/code.cloudfoundry.org/lager/chug/package.go new file mode 100644 index 000000000..a29db591b --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/chug/package.go @@ -0,0 +1 @@ +package chug // import "code.cloudfoundry.org/lager/chug" diff --git a/vendor/code.cloudfoundry.org/lager/ginkgoreporter/package.go b/vendor/code.cloudfoundry.org/lager/ginkgoreporter/package.go new file mode 100644 index 000000000..a9000a1b6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/ginkgoreporter/package.go @@ -0,0 +1 @@ +package ginkgoreporter // import "code.cloudfoundry.org/lager/ginkgoreporter" diff --git a/vendor/code.cloudfoundry.org/lager/lagerflags/integration/package.go b/vendor/code.cloudfoundry.org/lager/lagerflags/integration/package.go new file mode 100644 index 000000000..0a628ffdd --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/lagerflags/integration/package.go @@ -0,0 +1 @@ +package main // import "code.cloudfoundry.org/lager/lagerflags/integration" diff --git a/vendor/code.cloudfoundry.org/lager/lagerflags/package.go b/vendor/code.cloudfoundry.org/lager/lagerflags/package.go new file mode 100644 index 000000000..84ef39d41 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/lagerflags/package.go @@ -0,0 +1 @@ +package lagerflags // import "code.cloudfoundry.org/lager/lagerflags" diff --git a/vendor/code.cloudfoundry.org/lager/lagertest/package.go b/vendor/code.cloudfoundry.org/lager/lagertest/package.go new file mode 100644 index 000000000..ed8804820 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/lagertest/package.go @@ -0,0 +1 @@ +package lagertest // import "code.cloudfoundry.org/lager/lagertest" diff --git a/vendor/code.cloudfoundry.org/lager/package.go b/vendor/code.cloudfoundry.org/lager/package.go new file mode 100644 index 000000000..7e8b063de --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/package.go @@ -0,0 +1 @@ +package lager // import "code.cloudfoundry.org/lager" diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go index 83607ff6c..ac7e51bfb 100644 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go @@ -43,7 +43,7 @@ import ( "io/ioutil" "github.com/golang/protobuf/proto" - protobuf "google.golang.org/genproto/protobuf" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" ) // extractFile extracts a FileDescriptorProto from a gzip'd buffer. diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go index 282a1e3a7..27b0729cb 100644 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go @@ -6,7 +6,7 @@ import ( "github.com/golang/protobuf/descriptor" tpb "github.com/golang/protobuf/proto/testdata" - protobuf "google.golang.org/genproto/protobuf" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" ) func TestMessage(t *testing.T) { diff --git a/vendor/github.com/mattn/go-sqlite3/.travis.yml b/vendor/github.com/mattn/go-sqlite3/.travis.yml index b1a28860a..2a58cd8c4 100644 --- a/vendor/github.com/mattn/go-sqlite3/.travis.yml +++ b/vendor/github.com/mattn/go-sqlite3/.travis.yml @@ -7,8 +7,8 @@ env: - GOTAGS=trace #- GOTAGS="libsqlite3 trace" # trusty is too old for this go: - - 1.5 - - 1.6 + - 1.7 + - 1.8 - tip before_install: - go get github.com/mattn/goveralls diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md index a5379c322..2d114619e 100644 --- a/vendor/github.com/mattn/go-sqlite3/README.md +++ b/vendor/github.com/mattn/go-sqlite3/README.md @@ -45,19 +45,21 @@ FAQ Use `go build --tags "icu"` + Available extensions: `json1`, `fts5`, `icu` + * Can't build go-sqlite3 on windows 64bit. > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit. - > See: https://github.com/mattn/go-sqlite3/issues/27 + > See: [#27](https://github.com/mattn/go-sqlite3/issues/27) * Getting insert error while query is opened. > You can pass some arguments into the connection string, for example, a URI. - > See: https://github.com/mattn/go-sqlite3/issues/39 + > See: [#39](https://github.com/mattn/go-sqlite3/issues/39) * Do you want to cross compile? mingw on Linux or Mac? - > See: https://github.com/mattn/go-sqlite3/issues/106 + > See: [#106](https://github.com/mattn/go-sqlite3/issues/106) > See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html * Want to get time.Time with current locale @@ -66,7 +68,7 @@ FAQ * Can use this in multiple routines concurrently? - Yes for readonly. But, No for writable. See #50, #51, #209. + Yes for readonly. But, No for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209). * Why is it racy if I use a `sql.Open("sqlite", ":memory:")` database? @@ -75,7 +77,7 @@ FAQ specified ":memory:", that connection will see a brand new database. A workaround is to use "file::memory:?mode=memory&cache=shared". Every connection to this string will point to the same in-memory database. See - #204 for more info. + [#204](https://github.com/mattn/go-sqlite3/issues/204) for more info. License ------- diff --git a/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go b/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go index 3059f9eab..17bddebde 100644 --- a/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go +++ b/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go @@ -2,9 +2,10 @@ package main import ( "database/sql" - "github.com/mattn/go-sqlite3" "log" "os" + + "github.com/mattn/go-sqlite3" ) func main() { @@ -19,35 +20,35 @@ func main() { os.Remove("./foo.db") os.Remove("./bar.db") - destDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db") + srcDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db") if err != nil { log.Fatal(err) } - defer destDb.Close() - destDb.Ping() + defer srcDb.Close() + srcDb.Ping() - _, err = destDb.Exec("create table foo(id int, value text)") + _, err = srcDb.Exec("create table foo(id int, value text)") if err != nil { log.Fatal(err) } - _, err = destDb.Exec("insert into foo values(1, 'foo')") + _, err = srcDb.Exec("insert into foo values(1, 'foo')") if err != nil { log.Fatal(err) } - _, err = destDb.Exec("insert into foo values(2, 'bar')") + _, err = srcDb.Exec("insert into foo values(2, 'bar')") if err != nil { log.Fatal(err) } - _, err = destDb.Query("select * from foo") + _, err = srcDb.Query("select * from foo") if err != nil { log.Fatal(err) } - srcDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db") + destDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db") if err != nil { log.Fatal(err) } - defer srcDb.Close() - srcDb.Ping() + defer destDb.Close() + destDb.Ping() bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main") if err != nil { diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 7a852e62d..825e7d889 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,8 @@ #ifndef USE_LIBSQLITE3 +#define SQLITE_DISABLE_INTRINSIC 1 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.15.1. By combining all the individual C code files into this +** version 3.17.0. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -205,12 +206,28 @@ # define _LARGEFILE_SOURCE 1 #endif -/* What version of GCC is being used. 0 means GCC is not being used */ -#ifdef __GNUC__ +/* The GCC_VERSION, CLANG_VERSION, and MSVC_VERSION macros are used to +** conditionally include optimizations for each of these compilers. A +** value of 0 means that compiler is not being used. The +** SQLITE_DISABLE_INTRINSIC macro means do not use any compiler-specific +** optimizations, and hence set all compiler macros to 0 +*/ +#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC) # define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__) #else # define GCC_VERSION 0 #endif +#if defined(__clang__) && !defined(_WIN32) && !defined(SQLITE_DISABLE_INTRINSIC) +# define CLANG_VERSION \ + (__clang_major__*1000000+__clang_minor__*1000+__clang_patchlevel__) +#else +# define CLANG_VERSION 0 +#endif +#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC) +# define MSVC_VERSION _MSC_VER +#else +# define MSVC_VERSION 0 +#endif /* Needed for various definitions... */ #if defined(__GNUC__) && !defined(_GNU_SOURCE) @@ -382,13 +399,13 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.15.1" -#define SQLITE_VERSION_NUMBER 3015001 -#define SQLITE_SOURCE_ID "2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36" +#define SQLITE_VERSION "3.17.0" +#define SQLITE_VERSION_NUMBER 3017000 +#define SQLITE_SOURCE_ID "2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c" /* ** CAPI3REF: Run-Time Library Version Numbers -** KEYWORDS: sqlite3_version, sqlite3_sourceid +** KEYWORDS: sqlite3_version sqlite3_sourceid ** ** These interfaces provide the same information as the [SQLITE_VERSION], ** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros @@ -520,7 +537,11 @@ typedef struct sqlite3 sqlite3; */ #ifdef SQLITE_INT64_TYPE typedef SQLITE_INT64_TYPE sqlite_int64; - typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; +# ifdef SQLITE_UINT64_TYPE + typedef SQLITE_UINT64_TYPE sqlite_uint64; +# else + typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; +# endif #elif defined(_MSC_VER) || defined(__BORLANDC__) typedef __int64 sqlite_int64; typedef unsigned __int64 sqlite_uint64; @@ -833,7 +854,7 @@ SQLITE_API int sqlite3_exec( ** file that were written at the application level might have changed ** and that adjacent bytes, even bytes within the same sector are ** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN -** flag indicate that a file cannot be deleted when open. The +** flag indicates that a file cannot be deleted when open. The ** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on ** read-only media and cannot be changed even by processes with ** elevated privileges. @@ -983,6 +1004,9 @@ struct sqlite3_file { **
  • [SQLITE_IOCAP_ATOMIC64K] **
  • [SQLITE_IOCAP_SAFE_APPEND] **
  • [SQLITE_IOCAP_SEQUENTIAL] +**
  • [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN] +**
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] +**
  • [SQLITE_IOCAP_IMMUTABLE] ** ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of @@ -1296,6 +1320,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_VFS_POINTER 27 #define SQLITE_FCNTL_JOURNAL_POINTER 28 #define SQLITE_FCNTL_WIN32_GET_HANDLE 29 +#define SQLITE_FCNTL_PDB 30 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -2248,6 +2273,18 @@ struct sqlite3_mem_methods { ** until after the database connection closes. ** ** +**
    SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
    +**
    Usually, when a database in wal mode is closed or detached from a +** database handle, SQLite checks if this will mean that there are now no +** connections at all to the database. If so, it performs a checkpoint +** operation before closing the connection. This option may be used to +** override this behaviour. The first parameter passed to this operation +** is an integer - non-zero to disable checkpoints-on-close, or zero (the +** default) to enable them. The second parameter is a pointer to an integer +** into which is written 0 or 1 to indicate whether checkpoints-on-close +** have been disabled - 0 if they are not disabled, 1 if they are. +**
    +** ** */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ @@ -2256,6 +2293,7 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */ +#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */ /* @@ -3857,6 +3895,10 @@ SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt); ** sqlite3_stmt_readonly() to return true since, while those statements ** change the configuration of a database connection, they do not make ** changes to the content of the database files on disk. +** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since +** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and +** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so +** sqlite3_stmt_readonly() returns false for those commands. */ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); @@ -4139,8 +4181,12 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); ** METHOD: sqlite3_stmt ** ** ^Return the number of columns in the result set returned by the -** [prepared statement]. ^This routine returns 0 if pStmt is an SQL -** statement that does not return data (for example an [UPDATE]). +** [prepared statement]. ^If this routine returns 0, that means the +** [prepared statement] returns no data (for example an [UPDATE]). +** ^However, just because this routine returns a positive number does not +** mean that one or more rows of data will be returned. ^A SELECT statement +** will always have a positive sqlite3_column_count() but depending on the +** WHERE clause constraints and the table content, it might return no rows. ** ** See also: [sqlite3_data_count()] */ @@ -5649,7 +5695,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); ** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified. ** ** ^In the current implementation, the update hook -** is not invoked when duplication rows are deleted because of an +** is not invoked when conflicting rows are deleted because of an ** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook ** invoked when rows are deleted using the [truncate optimization]. ** The exceptions defined in this paragraph might change in a future @@ -6431,6 +6477,12 @@ typedef struct sqlite3_blob sqlite3_blob; ** [database connection] error code and message accessible via ** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. ** +** A BLOB referenced by sqlite3_blob_open() may be read using the +** [sqlite3_blob_read()] interface and modified by using +** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a +** different row of the same table using the [sqlite3_blob_reopen()] +** interface. However, the column, table, or database of a [BLOB handle] +** cannot be changed after the [BLOB handle] is opened. ** ** ^(If the row that a BLOB handle points to is modified by an ** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects @@ -6454,6 +6506,10 @@ typedef struct sqlite3_blob sqlite3_blob; ** ** To avoid a resource leak, every open [BLOB handle] should eventually ** be released by a call to [sqlite3_blob_close()]. +** +** See also: [sqlite3_blob_close()], +** [sqlite3_blob_reopen()], [sqlite3_blob_read()], +** [sqlite3_blob_bytes()], [sqlite3_blob_write()]. */ SQLITE_API int sqlite3_blob_open( sqlite3*, @@ -6469,11 +6525,11 @@ SQLITE_API int sqlite3_blob_open( ** CAPI3REF: Move a BLOB Handle to a New Row ** METHOD: sqlite3_blob ** -** ^This function is used to move an existing blob handle so that it points +** ^This function is used to move an existing [BLOB handle] so that it points ** to a different row of the same database table. ^The new row is identified ** by the rowid value passed as the second argument. Only the row can be ** changed. ^The database, table and column on which the blob handle is open -** remain the same. Moving an existing blob handle to a new row can be +** remain the same. Moving an existing [BLOB handle] to a new row is ** faster than closing the existing handle and opening a new one. ** ** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - @@ -8402,7 +8458,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** ** ^The [sqlite3_preupdate_hook()] interface registers a callback function ** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation -** on a [rowid table]. +** on a database table. ** ^At most one preupdate hook may be registered at a time on a single ** [database connection]; each call to [sqlite3_preupdate_hook()] overrides ** the previous setting. @@ -8411,9 +8467,9 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as ** the first parameter to callbacks. ** -** ^The preupdate hook only fires for changes to [rowid tables]; the preupdate -** hook is not invoked for changes to [virtual tables] or [WITHOUT ROWID] -** tables. +** ^The preupdate hook only fires for changes to real database tables; the +** preupdate hook is not invoked for changes to [virtual tables] or to +** system tables like sqlite_master or sqlite_stat1. ** ** ^The second parameter to the preupdate callback is a pointer to ** the [database connection] that registered the preupdate hook. @@ -8427,12 +8483,16 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** databases.)^ ** ^The fifth parameter to the preupdate callback is the name of the ** table that is being modified. -** ^The sixth parameter to the preupdate callback is the initial [rowid] of the -** row being changes for SQLITE_UPDATE and SQLITE_DELETE changes and is -** undefined for SQLITE_INSERT changes. -** ^The seventh parameter to the preupdate callback is the final [rowid] of -** the row being changed for SQLITE_UPDATE and SQLITE_INSERT changes and is -** undefined for SQLITE_DELETE changes. +** +** For an UPDATE or DELETE operation on a [rowid table], the sixth +** parameter passed to the preupdate callback is the initial [rowid] of the +** row being modified or deleted. For an INSERT operation on a rowid table, +** or any operation on a WITHOUT ROWID table, the value of the sixth +** parameter is undefined. For an INSERT or UPDATE on a rowid table the +** seventh parameter is the final rowid value of the row being inserted +** or updated. The value of the seventh parameter passed to the callback +** function is not defined for operations on WITHOUT ROWID tables, or for +** INSERT operations on rowid tables. ** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces @@ -8472,7 +8532,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** ** See also: [sqlite3_update_hook()] */ -SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook( +#if defined(SQLITE_ENABLE_PREUPDATE_HOOK) +SQLITE_API void *sqlite3_preupdate_hook( sqlite3 *db, void(*xPreUpdate)( void *pCtx, /* Copy of third arg to preupdate_hook() */ @@ -8485,10 +8546,11 @@ SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook( ), void* ); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_count(sqlite3 *); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_depth(sqlite3 *); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_count(sqlite3 *); +SQLITE_API int sqlite3_preupdate_depth(sqlite3 *); +SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +#endif /* ** CAPI3REF: Low-level system error code @@ -8504,7 +8566,7 @@ SQLITE_API int sqlite3_system_errno(sqlite3*); /* ** CAPI3REF: Database Snapshot -** KEYWORDS: {snapshot} +** KEYWORDS: {snapshot} {sqlite3_snapshot} ** EXPERIMENTAL ** ** An instance of the snapshot object records the state of a [WAL mode] @@ -8528,7 +8590,9 @@ SQLITE_API int sqlite3_system_errno(sqlite3*); ** to an historical snapshot (if possible). The destructor for ** sqlite3_snapshot objects is [sqlite3_snapshot_free()]. */ -typedef struct sqlite3_snapshot sqlite3_snapshot; +typedef struct sqlite3_snapshot { + unsigned char hidden[48]; +} sqlite3_snapshot; /* ** CAPI3REF: Record A Database Snapshot @@ -8539,9 +8603,32 @@ typedef struct sqlite3_snapshot sqlite3_snapshot; ** schema S in database connection D. ^On success, the ** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly ** created [sqlite3_snapshot] object into *P and returns SQLITE_OK. -** ^If schema S of [database connection] D is not a [WAL mode] database -** that is in a read transaction, then [sqlite3_snapshot_get(D,S,P)] -** leaves the *P value unchanged and returns an appropriate [error code]. +** If there is not already a read-transaction open on schema S when +** this function is called, one is opened automatically. +** +** The following must be true for this function to succeed. If any of +** the following statements are false when sqlite3_snapshot_get() is +** called, SQLITE_ERROR is returned. The final value of *P is undefined +** in this case. +** +**
      +**
    • The database handle must be in [autocommit mode]. +** +**
    • Schema S of [database connection] D must be a [WAL mode] database. +** +**
    • There must not be a write transaction open on schema S of database +** connection D. +** +**
    • One or more transactions must have been written to the current wal +** file since it was created on disk (by any connection). This means +** that a snapshot cannot be taken on a wal mode database with no wal +** file immediately after it is first opened. At least one transaction +** must be written to it first. +**
    +** +** This function may also return SQLITE_NOMEM. If it is called with the +** database handle in autocommit mode but fails for some other reason, +** whether or not a read transaction is opened on schema S is undefined. ** ** The [sqlite3_snapshot] object returned from a successful call to ** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()] @@ -8634,6 +8721,28 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( sqlite3_snapshot *p2 ); +/* +** CAPI3REF: Recover snapshots from a wal file +** EXPERIMENTAL +** +** If all connections disconnect from a database file but do not perform +** a checkpoint, the existing wal file is opened along with the database +** file the next time the database is opened. At this point it is only +** possible to successfully call sqlite3_snapshot_open() to open the most +** recent snapshot of the database (the one at the head of the wal file), +** even though the wal file may contain other valid snapshots for which +** clients have sqlite3_snapshot handles. +** +** This function attempts to scan the wal file associated with database zDb +** of database handle db and make all valid snapshots available to +** sqlite3_snapshot_open(). It is an error if there is already a read +** transaction open on the database, or if the database is not a wal mode +** database. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -8819,7 +8928,7 @@ typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; ** attached database. It is not an error if database zDb is not attached ** to the database when the session object is created. */ -int sqlite3session_create( +SQLITE_API int sqlite3session_create( sqlite3 *db, /* Database handle */ const char *zDb, /* Name of db (e.g. "main") */ sqlite3_session **ppSession /* OUT: New session object */ @@ -8837,7 +8946,7 @@ int sqlite3session_create( ** are attached is closed. Refer to the documentation for ** [sqlite3session_create()] for details. */ -void sqlite3session_delete(sqlite3_session *pSession); +SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* @@ -8857,7 +8966,7 @@ void sqlite3session_delete(sqlite3_session *pSession); ** The return value indicates the final state of the session object: 0 if ** the session is disabled, or 1 if it is enabled. */ -int sqlite3session_enable(sqlite3_session *pSession, int bEnable); +SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable); /* ** CAPI3REF: Set Or Clear the Indirect Change Flag @@ -8886,7 +8995,7 @@ int sqlite3session_enable(sqlite3_session *pSession, int bEnable); ** The return value indicates the final state of the indirect flag: 0 if ** it is clear, or 1 if it is set. */ -int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); +SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); /* ** CAPI3REF: Attach A Table To A Session Object @@ -8916,7 +9025,7 @@ int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); ** SQLITE_OK is returned if the call completes without error. Or, if an error ** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned. */ -int sqlite3session_attach( +SQLITE_API int sqlite3session_attach( sqlite3_session *pSession, /* Session object */ const char *zTab /* Table name */ ); @@ -8930,7 +9039,7 @@ int sqlite3session_attach( ** If xFilter returns 0, changes is not tracked. Note that once a table is ** attached, xFilter will not be called again. */ -void sqlite3session_table_filter( +SQLITE_API void sqlite3session_table_filter( sqlite3_session *pSession, /* Session object */ int(*xFilter)( void *pCtx, /* Copy of third arg to _filter_table() */ @@ -9043,7 +9152,7 @@ void sqlite3session_table_filter( ** another field of the same row is updated while the session is enabled, the ** resulting changeset will contain an UPDATE change that updates both fields. */ -int sqlite3session_changeset( +SQLITE_API int sqlite3session_changeset( sqlite3_session *pSession, /* Session object */ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ void **ppChangeset /* OUT: Buffer containing changeset */ @@ -9087,7 +9196,8 @@ int sqlite3session_changeset( ** the from-table, a DELETE record is added to the session object. ** **
  • For each row (primary key) that exists in both tables, but features -** different in each, an UPDATE record is added to the session. +** different non-PK values in each, an UPDATE record is added to the +** session. ** ** ** To clarify, if this function is called and then a changeset constructed @@ -9104,7 +9214,7 @@ int sqlite3session_changeset( ** message. It is the responsibility of the caller to free this buffer using ** sqlite3_free(). */ -int sqlite3session_diff( +SQLITE_API int sqlite3session_diff( sqlite3_session *pSession, const char *zFromDb, const char *zTbl, @@ -9140,7 +9250,7 @@ int sqlite3session_diff( ** a single table are grouped together, tables appear in the order in which ** they were attached to the session object). */ -int sqlite3session_patchset( +SQLITE_API int sqlite3session_patchset( sqlite3_session *pSession, /* Session object */ int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ void **ppPatchset /* OUT: Buffer containing changeset */ @@ -9161,7 +9271,7 @@ int sqlite3session_patchset( ** guaranteed that a call to sqlite3session_changeset() will return a ** changeset containing zero changes. */ -int sqlite3session_isempty(sqlite3_session *pSession); +SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); /* ** CAPI3REF: Create An Iterator To Traverse A Changeset @@ -9196,7 +9306,7 @@ int sqlite3session_isempty(sqlite3_session *pSession); ** the applies to table X, then one for table Y, and then later on visit ** another change for table X. */ -int sqlite3changeset_start( +SQLITE_API int sqlite3changeset_start( sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */ int nChangeset, /* Size of changeset blob in bytes */ void *pChangeset /* Pointer to blob containing changeset */ @@ -9225,7 +9335,7 @@ int sqlite3changeset_start( ** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or ** SQLITE_NOMEM. */ -int sqlite3changeset_next(sqlite3_changeset_iter *pIter); +SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); /* ** CAPI3REF: Obtain The Current Operation From A Changeset Iterator @@ -9253,7 +9363,7 @@ int sqlite3changeset_next(sqlite3_changeset_iter *pIter); ** SQLite error code is returned. The values of the output variables may not ** be trusted in this case. */ -int sqlite3changeset_op( +SQLITE_API int sqlite3changeset_op( sqlite3_changeset_iter *pIter, /* Iterator object */ const char **pzTab, /* OUT: Pointer to table name */ int *pnCol, /* OUT: Number of columns in table */ @@ -9286,7 +9396,7 @@ int sqlite3changeset_op( ** SQLITE_OK is returned and the output variables populated as described ** above. */ -int sqlite3changeset_pk( +SQLITE_API int sqlite3changeset_pk( sqlite3_changeset_iter *pIter, /* Iterator object */ unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */ int *pnCol /* OUT: Number of entries in output array */ @@ -9316,7 +9426,7 @@ int sqlite3changeset_pk( ** If some other error occurs (e.g. an OOM condition), an SQLite error code ** is returned and *ppValue is set to NULL. */ -int sqlite3changeset_old( +SQLITE_API int sqlite3changeset_old( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int iVal, /* Column number */ sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */ @@ -9349,7 +9459,7 @@ int sqlite3changeset_old( ** If some other error occurs (e.g. an OOM condition), an SQLite error code ** is returned and *ppValue is set to NULL. */ -int sqlite3changeset_new( +SQLITE_API int sqlite3changeset_new( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int iVal, /* Column number */ sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */ @@ -9376,7 +9486,7 @@ int sqlite3changeset_new( ** If some other error occurs (e.g. an OOM condition), an SQLite error code ** is returned and *ppValue is set to NULL. */ -int sqlite3changeset_conflict( +SQLITE_API int sqlite3changeset_conflict( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int iVal, /* Column number */ sqlite3_value **ppValue /* OUT: Value from conflicting row */ @@ -9392,7 +9502,7 @@ int sqlite3changeset_conflict( ** ** In all other cases this function returns SQLITE_MISUSE. */ -int sqlite3changeset_fk_conflicts( +SQLITE_API int sqlite3changeset_fk_conflicts( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int *pnOut /* OUT: Number of FK violations */ ); @@ -9425,7 +9535,7 @@ int sqlite3changeset_fk_conflicts( ** // An error has occurred ** } */ -int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); +SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); /* ** CAPI3REF: Invert A Changeset @@ -9455,7 +9565,7 @@ int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); ** WARNING/TODO: This function currently assumes that the input is a valid ** changeset. If it is not, the results are undefined. */ -int sqlite3changeset_invert( +SQLITE_API int sqlite3changeset_invert( int nIn, const void *pIn, /* Input changeset */ int *pnOut, void **ppOut /* OUT: Inverse of input */ ); @@ -9484,7 +9594,7 @@ int sqlite3changeset_invert( ** ** Refer to the sqlite3_changegroup documentation below for details. */ -int sqlite3changeset_concat( +SQLITE_API int sqlite3changeset_concat( int nA, /* Number of bytes in buffer pA */ void *pA, /* Pointer to buffer containing changeset A */ int nB, /* Number of bytes in buffer pB */ @@ -9672,7 +9782,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); **
      **
    • The table has the same name as the name recorded in the ** changeset, and -**
    • The table has the same number of columns as recorded in the +**
    • The table has at least as many columns as recorded in the ** changeset, and **
    • The table has primary key columns in the same position as ** recorded in the changeset. @@ -9717,7 +9827,11 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** If a row with matching primary key values is found, but one or more of ** the non-primary key fields contains a value different from the original ** row value stored in the changeset, the conflict-handler function is -** invoked with [SQLITE_CHANGESET_DATA] as the second argument. +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the +** database table has more columns than are recorded in the changeset, +** only the values of those non-primary key fields are compared against +** the current database contents - any trailing database table columns +** are ignored. ** ** If no row with matching primary key values is found in the database, ** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] @@ -9732,7 +9846,9 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** **
      INSERT Changes
      ** For each INSERT change, an attempt is made to insert the new row into -** the database. +** the database. If the changeset row contains fewer fields than the +** database table, the trailing fields are populated with their default +** values. ** ** If the attempt to insert the row fails because the database already ** contains a row with the same primary key values, the conflict handler @@ -9750,13 +9866,13 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** For each UPDATE change, this function checks if the target database ** contains a row with the same primary key value (or values) as the ** original row values stored in the changeset. If it does, and the values -** stored in all non-primary key columns also match the values stored in -** the changeset the row is updated within the target database. +** stored in all modified non-primary key columns also match the values +** stored in the changeset the row is updated within the target database. ** ** If a row with matching primary key values is found, but one or more of -** the non-primary key fields contains a value different from an original -** row value stored in the changeset, the conflict-handler function is -** invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since +** the modified non-primary key fields contains a value different from an +** original row value stored in the changeset, the conflict-handler function +** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since ** UPDATE changes only contain values for non-primary key fields that are ** to be modified, only those fields need to match the original values to ** avoid the SQLITE_CHANGESET_DATA conflict-handler callback. @@ -9784,7 +9900,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** rolled back, restoring the target database to its original state, and an ** SQLite error code returned. */ -int sqlite3changeset_apply( +SQLITE_API int sqlite3changeset_apply( sqlite3 *db, /* Apply change to "main" db of this handle */ int nChangeset, /* Size of changeset in bytes */ void *pChangeset, /* Changeset blob */ @@ -9985,7 +10101,7 @@ int sqlite3changeset_apply( ** parameter set to a value less than or equal to zero. Other than this, ** no guarantees are made as to the size of the chunks of data returned. */ -int sqlite3changeset_apply_strm( +SQLITE_API int sqlite3changeset_apply_strm( sqlite3 *db, /* Apply change to "main" db of this handle */ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ void *pIn, /* First arg for xInput */ @@ -10000,7 +10116,7 @@ int sqlite3changeset_apply_strm( ), void *pCtx /* First argument passed to xConflict */ ); -int sqlite3changeset_concat_strm( +SQLITE_API int sqlite3changeset_concat_strm( int (*xInputA)(void *pIn, void *pData, int *pnData), void *pInA, int (*xInputB)(void *pIn, void *pData, int *pnData), @@ -10008,23 +10124,23 @@ int sqlite3changeset_concat_strm( int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); -int sqlite3changeset_invert_strm( +SQLITE_API int sqlite3changeset_invert_strm( int (*xInput)(void *pIn, void *pData, int *pnData), void *pIn, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); -int sqlite3changeset_start_strm( +SQLITE_API int sqlite3changeset_start_strm( sqlite3_changeset_iter **pp, int (*xInput)(void *pIn, void *pData, int *pnData), void *pIn ); -int sqlite3session_changeset_strm( +SQLITE_API int sqlite3session_changeset_strm( sqlite3_session *pSession, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); -int sqlite3session_patchset_strm( +SQLITE_API int sqlite3session_patchset_strm( sqlite3_session *pSession, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut @@ -10931,6 +11047,7 @@ struct fts5_api { # include # pragma intrinsic(_byteswap_ushort) # pragma intrinsic(_byteswap_ulong) +# pragma intrinsic(_byteswap_uint64) # pragma intrinsic(_ReadWriteBarrier) # else # include @@ -11469,6 +11586,18 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #include #include +/* +** Use a macro to replace memcpy() if compiled with SQLITE_INLINE_MEMCPY. +** This allows better measurements of where memcpy() is used when running +** cachegrind. But this macro version of memcpy() is very slow so it +** should not be used in production. This is a performance measurement +** hack only. +*/ +#ifdef SQLITE_INLINE_MEMCPY +# define memcpy(D,S,N) {char*xxd=(char*)(D);const char*xxs=(const char*)(S);\ + int xxn=(N);while(xxn-->0)*(xxd++)=*(xxs++);} +#endif + /* ** If compiling for a processor that lacks floating point support, ** substitute integer for floating-point @@ -11553,9 +11682,12 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); ** pagecaches for each database connection. A positive number is the ** number of pages. A negative number N translations means that a buffer ** of -1024*N bytes is allocated and used for as many pages as it will hold. +** +** The default value of "20" was choosen to minimize the run-time of the +** speedtest1 test program with options: --shrink-memory --reprepare */ #ifndef SQLITE_DEFAULT_PCACHE_INITSZ -# define SQLITE_DEFAULT_PCACHE_INITSZ 100 +# define SQLITE_DEFAULT_PCACHE_INITSZ 20 #endif /* @@ -11730,32 +11862,35 @@ typedef INT16_TYPE LogEst; ** ** For best performance, an attempt is made to guess at the byte-order ** using C-preprocessor macros. If that is unsuccessful, or if -** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined +** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined ** at run-time. */ -#if (defined(i386) || defined(__i386__) || defined(_M_IX86) || \ +#ifndef SQLITE_BYTEORDER +# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ - defined(__arm__)) && !defined(SQLITE_RUNTIME_BYTEORDER) -# define SQLITE_BYTEORDER 1234 -# define SQLITE_BIGENDIAN 0 -# define SQLITE_LITTLEENDIAN 1 -# define SQLITE_UTF16NATIVE SQLITE_UTF16LE + defined(__arm__) +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ppc__) +# define SQLITE_BYTEORDER 4321 +# else +# define SQLITE_BYTEORDER 0 +# endif #endif -#if (defined(sparc) || defined(__ppc__)) \ - && !defined(SQLITE_RUNTIME_BYTEORDER) -# define SQLITE_BYTEORDER 4321 +#if SQLITE_BYTEORDER==4321 # define SQLITE_BIGENDIAN 1 # define SQLITE_LITTLEENDIAN 0 # define SQLITE_UTF16NATIVE SQLITE_UTF16BE -#endif -#if !defined(SQLITE_BYTEORDER) +#elif SQLITE_BYTEORDER==1234 +# define SQLITE_BIGENDIAN 0 +# define SQLITE_LITTLEENDIAN 1 +# define SQLITE_UTF16NATIVE SQLITE_UTF16LE +#else # ifdef SQLITE_AMALGAMATION const int sqlite3one = 1; # else extern const int sqlite3one; # endif -# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ # define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0) # define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1) # define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE) @@ -12012,6 +12147,14 @@ typedef struct Walker Walker; typedef struct WhereInfo WhereInfo; typedef struct With With; +/* A VList object records a mapping between parameters/variables/wildcards +** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer +** variable number associated with that parameter. See the format description +** on the sqlite3VListAdd() routine for more information. A VList is really +** just an array of integers. +*/ +typedef int VList; + /* ** Defer sourcing vdbe.h and btree.h until after the "u8" and ** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque @@ -12270,9 +12413,10 @@ SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*); SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags); -/* Allowed flags for the 2nd argument to sqlite3BtreeDelete() */ +/* Allowed flags for sqlite3BtreeDelete() and sqlite3BtreeInsert() */ #define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */ #define BTREE_AUXDELETE 0x04 /* not the primary delete operation */ +#define BTREE_APPEND 0x08 /* Insert is likely an append */ /* An instance of the BtreePayload object describes the content of a single ** entry in either an index or table btree. @@ -12296,27 +12440,29 @@ struct BtreePayload { const void *pKey; /* Key content for indexes. NULL for tables */ sqlite3_int64 nKey; /* Size of pKey for indexes. PRIMARY KEY for tabs */ const void *pData; /* Data for tables. NULL for indexes */ + struct Mem *aMem; /* First of nMem value in the unpacked pKey */ + u16 nMem; /* Number of aMem[] value. Might be zero */ int nData; /* Size of pData. 0 if none. */ int nZero; /* Extra zero data appended after pData,nData */ }; SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload, - int bias, int seekResult); + int flags, int seekResult); SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*); SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes); SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*); +SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); #ifndef SQLITE_OMIT_INCRBLOB +SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *); #endif @@ -12329,6 +12475,7 @@ SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void); #ifndef NDEBUG SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*); #endif +SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor*); #ifndef SQLITE_OMIT_BTREECOUNT SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *, i64 *); @@ -12433,8 +12580,7 @@ typedef struct SubProgram SubProgram; struct VdbeOp { u8 opcode; /* What operation to perform */ signed char p4type; /* One of the P4_xxx constants for p4 */ - u8 notUsed1; - u8 p5; /* Fifth parameter is an unsigned character */ + u16 p5; /* Fifth parameter is an unsigned 16-bit integer */ int p1; /* First operand */ int p2; /* Second parameter (often the jump destination) */ int p3; /* The third parameter */ @@ -12502,22 +12648,21 @@ typedef struct VdbeOpList VdbeOpList; #define P4_NOTUSED 0 /* The P4 parameter is not used */ #define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */ #define P4_STATIC (-2) /* Pointer to a static string */ -#define P4_COLLSEQ (-4) /* P4 is a pointer to a CollSeq structure */ -#define P4_FUNCDEF (-5) /* P4 is a pointer to a FuncDef structure */ -#define P4_KEYINFO (-6) /* P4 is a pointer to a KeyInfo structure */ -#define P4_EXPR (-7) /* P4 is a pointer to an Expr tree */ -#define P4_MEM (-8) /* P4 is a pointer to a Mem* structure */ +#define P4_COLLSEQ (-3) /* P4 is a pointer to a CollSeq structure */ +#define P4_FUNCDEF (-4) /* P4 is a pointer to a FuncDef structure */ +#define P4_KEYINFO (-5) /* P4 is a pointer to a KeyInfo structure */ +#define P4_EXPR (-6) /* P4 is a pointer to an Expr tree */ +#define P4_MEM (-7) /* P4 is a pointer to a Mem* structure */ #define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */ -#define P4_VTAB (-10) /* P4 is a pointer to an sqlite3_vtab structure */ -#define P4_MPRINTF (-11) /* P4 is a string obtained from sqlite3_mprintf() */ -#define P4_REAL (-12) /* P4 is a 64-bit floating point value */ -#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ -#define P4_INT32 (-14) /* P4 is a 32-bit signed integer */ -#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */ -#define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */ -#define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */ -#define P4_TABLE (-20) /* P4 is a pointer to a Table structure */ -#define P4_FUNCCTX (-21) /* P4 is a pointer to an sqlite3_context object */ +#define P4_VTAB (-8) /* P4 is a pointer to an sqlite3_vtab structure */ +#define P4_REAL (-9) /* P4 is a 64-bit floating point value */ +#define P4_INT64 (-10) /* P4 is a 64-bit signed integer */ +#define P4_INT32 (-11) /* P4 is a 32-bit signed integer */ +#define P4_INTARRAY (-12) /* P4 is a vector of 32-bit integers */ +#define P4_SUBPROGRAM (-13) /* P4 is a pointer to a SubProgram structure */ +#define P4_ADVANCE (-14) /* P4 is a pointer to BtreeNext() or BtreePrev() */ +#define P4_TABLE (-15) /* P4 is a pointer to a Table structure */ +#define P4_FUNCCTX (-16) /* P4 is a pointer to an sqlite3_context object */ /* Error message codes for OP_Halt */ #define P5_ConstraintNotNull 1 @@ -12627,7 +12772,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Program 64 #define OP_FkIfZero 65 /* synopsis: if fkctr[P1]==0 goto P2 */ #define OP_IfPos 66 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ -#define OP_IfNotZero 67 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */ +#define OP_IfNotZero 67 /* synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ #define OP_DecrJumpZero 68 /* synopsis: if (--r[P1])==0 goto P2 */ #define OP_IncrVacuum 69 #define OP_VNext 70 @@ -12681,48 +12826,47 @@ typedef struct VdbeOpList VdbeOpList; #define OP_ResetCount 118 #define OP_SorterCompare 119 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ #define OP_SorterData 120 /* synopsis: r[P2]=data */ -#define OP_RowKey 121 /* synopsis: r[P2]=key */ -#define OP_RowData 122 /* synopsis: r[P2]=data */ -#define OP_Rowid 123 /* synopsis: r[P2]=rowid */ -#define OP_NullRow 124 -#define OP_SorterInsert 125 -#define OP_IdxInsert 126 /* synopsis: key=r[P2] */ -#define OP_IdxDelete 127 /* synopsis: key=r[P2@P3] */ -#define OP_Seek 128 /* synopsis: Move P3 to P1.rowid */ -#define OP_IdxRowid 129 /* synopsis: r[P2]=rowid */ -#define OP_Destroy 130 -#define OP_Clear 131 +#define OP_RowData 121 /* synopsis: r[P2]=data */ +#define OP_Rowid 122 /* synopsis: r[P2]=rowid */ +#define OP_NullRow 123 +#define OP_SorterInsert 124 /* synopsis: key=r[P2] */ +#define OP_IdxInsert 125 /* synopsis: key=r[P2] */ +#define OP_IdxDelete 126 /* synopsis: key=r[P2@P3] */ +#define OP_Seek 127 /* synopsis: Move P3 to P1.rowid */ +#define OP_IdxRowid 128 /* synopsis: r[P2]=rowid */ +#define OP_Destroy 129 +#define OP_Clear 130 +#define OP_ResetSorter 131 #define OP_Real 132 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ -#define OP_ResetSorter 133 -#define OP_CreateIndex 134 /* synopsis: r[P2]=root iDb=P1 */ -#define OP_CreateTable 135 /* synopsis: r[P2]=root iDb=P1 */ -#define OP_ParseSchema 136 -#define OP_LoadAnalysis 137 -#define OP_DropTable 138 -#define OP_DropIndex 139 -#define OP_DropTrigger 140 -#define OP_IntegrityCk 141 -#define OP_RowSetAdd 142 /* synopsis: rowset(P1)=r[P2] */ -#define OP_Param 143 -#define OP_FkCounter 144 /* synopsis: fkctr[P1]+=P2 */ -#define OP_MemMax 145 /* synopsis: r[P1]=max(r[P1],r[P2]) */ -#define OP_OffsetLimit 146 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ -#define OP_AggStep0 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggStep 148 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggFinal 149 /* synopsis: accum=r[P1] N=P2 */ -#define OP_Expire 150 -#define OP_TableLock 151 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 152 -#define OP_VCreate 153 -#define OP_VDestroy 154 -#define OP_VOpen 155 -#define OP_VColumn 156 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 157 -#define OP_Pagecount 158 -#define OP_MaxPgcnt 159 -#define OP_CursorHint 160 -#define OP_Noop 161 -#define OP_Explain 162 +#define OP_CreateIndex 133 /* synopsis: r[P2]=root iDb=P1 */ +#define OP_CreateTable 134 /* synopsis: r[P2]=root iDb=P1 */ +#define OP_ParseSchema 135 +#define OP_LoadAnalysis 136 +#define OP_DropTable 137 +#define OP_DropIndex 138 +#define OP_DropTrigger 139 +#define OP_IntegrityCk 140 +#define OP_RowSetAdd 141 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Param 142 +#define OP_FkCounter 143 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 144 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 145 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggStep0 146 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggFinal 148 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 149 +#define OP_TableLock 150 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 151 +#define OP_VCreate 152 +#define OP_VDestroy 153 +#define OP_VOpen 154 +#define OP_VColumn 155 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 156 +#define OP_Pagecount 157 +#define OP_MaxPgcnt 158 +#define OP_CursorHint 159 +#define OP_Noop 160 +#define OP_Explain 161 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -12750,12 +12894,12 @@ typedef struct VdbeOpList VdbeOpList; /* 96 */ 0x00, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ /* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ /* 112 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 120 */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00,\ -/* 128 */ 0x00, 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10,\ -/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10,\ -/* 144 */ 0x00, 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\ -/* 160 */ 0x00, 0x00, 0x00,} +/* 120 */ 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00, 0x00,\ +/* 128 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\ +/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x00,\ +/* 144 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\ +/* 160 */ 0x00, 0x00,} /* The sqlite3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -12786,8 +12930,10 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int); SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe*,int); #if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS) SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N); +SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p); #else # define sqlite3VdbeVerifyNoMallocRequired(A,B) +# define sqlite3VdbeVerifyNoResultRow(A) #endif SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp, int iLineno); SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*); @@ -12795,11 +12941,12 @@ SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, u32 addr, u8); SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1); SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2); SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3); -SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5); +SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u16 P5); SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr); SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe*, int addr); SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op); SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N); +SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe*, void *pP4, int p4type); SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*); SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int); SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int); @@ -12835,7 +12982,7 @@ SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*); SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*); SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*); SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int); -SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo *, char *, int, char **); +SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo*); typedef int (*RecordCompare)(int,const void*,UnpackedRecord*); SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*); @@ -13040,7 +13187,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( int, void(*)(DbPage*) ); -SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager); +SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3*); SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); /* Functions used to configure a Pager object. */ @@ -13091,18 +13238,21 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager); #ifndef SQLITE_OMIT_WAL -SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int, int*, int*); +SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, sqlite3*, int, int*, int*); SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager); SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager); SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen); -SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager); +SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3*); +# ifdef SQLITE_DIRECT_OVERFLOW_READ +SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager, Pgno); +# endif # ifdef SQLITE_ENABLE_SNAPSHOT SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot); SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSnapshot); +SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager); # endif #else -# define sqlite3PagerUseWal(x) 0 +# define sqlite3PagerUseWal(x,y) 0 #endif #ifdef SQLITE_ENABLE_ZIPVFS @@ -13926,6 +14076,7 @@ struct sqlite3 { u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */ u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */ u8 mTrace; /* zero or more SQLITE_TRACE flags */ + u8 skipBtreeMutex; /* True if no shared-cache backends */ int nextPagesize; /* Pagesize after VACUUM if >0 */ u32 magic; /* Magic number for detect library misuse */ int nChange; /* Value returned by sqlite3_changes() */ @@ -14073,6 +14224,7 @@ struct sqlite3 { #define SQLITE_Vacuum 0x10000000 /* Currently in a VACUUM */ #define SQLITE_CellSizeCk 0x20000000 /* Check btree cell sizes on load */ #define SQLITE_Fts3Tokenizer 0x40000000 /* Enable fts3_tokenizer(2) */ +#define SQLITE_NoCkptOnClose 0x80000000 /* No checkpoint on close()/DETACH */ /* @@ -14098,13 +14250,8 @@ struct sqlite3 { /* ** Macros for testing whether or not optimizations are enabled or disabled. */ -#ifndef SQLITE_OMIT_BUILTIN_TEST #define OptimizationDisabled(db, mask) (((db)->dbOptFlags&(mask))!=0) #define OptimizationEnabled(db, mask) (((db)->dbOptFlags&(mask))==0) -#else -#define OptimizationDisabled(db, mask) 0 -#define OptimizationEnabled(db, mask) 1 -#endif /* ** Return true if it OK to factor constant expressions into the initialization @@ -14195,6 +14342,7 @@ struct FuncDestructor { #define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */ #define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a ** single query - might change over time */ +#define SQLITE_FUNC_AFFINITY 0x4000 /* Built-in affinity() function */ /* ** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are @@ -14443,9 +14591,9 @@ struct Table { ExprList *pCheck; /* All CHECK constraints */ /* ... also used as column name list in a VIEW */ int tnum; /* Root BTree page for this table */ + u32 nTabRef; /* Number of pointers to this Table */ i16 iPKey; /* If not negative, use aCol[iPKey] as the rowid */ i16 nCol; /* Number of columns in this table */ - u16 nRef; /* Number of pointers to this Table */ LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */ LogEst szTabRow; /* Estimated size of each table row in bytes */ #ifdef SQLITE_ENABLE_COSTMULT @@ -15201,7 +15349,7 @@ struct SrcList { #define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */ #define WHERE_SEEK_TABLE 0x0400 /* Do not defer seeks on main table */ #define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */ - /* 0x1000 not currently used */ +#define WHERE_SEEK_UNIQ_TABLE 0x1000 /* Do not defer seeks if unique */ /* 0x2000 not currently used */ #define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */ /* 0x8000 not currently used */ @@ -15584,17 +15732,16 @@ struct Parse { } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */ int aTempReg[8]; /* Holding area for temporary registers */ Token sNameToken; /* Token with unqualified schema object name */ - Token sLastToken; /* The last token parsed */ /************************************************************************ ** Above is constant between recursions. Below is reset before and after ** each recursion. The boundary between these two regions is determined - ** using offsetof(Parse,nVar) so the nVar field must be the first field - ** in the recursive region. + ** using offsetof(Parse,sLastToken) so the sLastToken field must be the + ** first field in the recursive region. ************************************************************************/ + Token sLastToken; /* The last token parsed */ ynVar nVar; /* Number of '?' variables seen in the SQL so far */ - int nzVar; /* Number of available slots in azVar[] */ u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */ u8 explain; /* True if the EXPLAIN flag is found on the query */ #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -15606,7 +15753,7 @@ struct Parse { int iSelectId; /* ID of current select for EXPLAIN output */ int iNextSelectId; /* Next available select ID for EXPLAIN output */ #endif - char **azVar; /* Pointers to names of parameters */ + VList *pVList; /* Mapping between variable names and numbers */ Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */ const char *zTail; /* All SQL text past the last semicolon parsed */ Table *pNewTable; /* A table being constructed by CREATE TABLE */ @@ -15626,7 +15773,7 @@ struct Parse { ** Sizes and pointers of various parts of the Parse object. */ #define PARSE_HDR_SZ offsetof(Parse,aColCache) /* Recursive part w/o aColCache*/ -#define PARSE_RECURSE_SZ offsetof(Parse,nVar) /* Recursive part */ +#define PARSE_RECURSE_SZ offsetof(Parse,sLastToken) /* Recursive part */ #define PARSE_TAIL_SZ (sizeof(Parse)-PARSE_RECURSE_SZ) /* Non-recursive part */ #define PARSE_TAIL(X) (((char*)(X))+PARSE_RECURSE_SZ) /* Pointer to tail */ @@ -15663,13 +15810,11 @@ struct AuthContext { #define OPFLAG_NCHANGE 0x01 /* OP_Insert: Set to update db->nChange */ /* Also used in P2 (not P5) of OP_Delete */ #define OPFLAG_EPHEM 0x01 /* OP_Column: Ephemeral output is ok */ -#define OPFLAG_LASTROWID 0x02 /* Set to update db->lastRowid */ +#define OPFLAG_LASTROWID 0x20 /* Set to update db->lastRowid */ #define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */ #define OPFLAG_APPEND 0x08 /* This is likely to be an append */ #define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */ -#ifdef SQLITE_ENABLE_PREUPDATE_HOOK #define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */ -#endif #define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ #define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ #define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ @@ -15677,7 +15822,7 @@ struct AuthContext { #define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */ #define OPFLAG_P2ISREG 0x10 /* P2 to OP_Open** is a register number */ #define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */ -#define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete: keep cursor position */ +#define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete/Insert: save cursor pos */ #define OPFLAG_AUXDELETE 0x04 /* OP_Delete: index in a DELETE op */ /* @@ -15874,7 +16019,7 @@ struct Sqlite3Config { void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */ void *pVdbeBranchArg; /* 1st argument */ #endif -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */ #endif int bLocaltimeFault; /* True to fail localtime() calls */ @@ -16078,7 +16223,7 @@ SQLITE_PRIVATE void sqlite3ScratchFree(void*); SQLITE_PRIVATE void *sqlite3PageMalloc(int); SQLITE_PRIVATE void sqlite3PageFree(void*); SQLITE_PRIVATE void sqlite3MemSetDefault(void); -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void)); #endif SQLITE_PRIVATE int sqlite3HeapNearlyFull(void); @@ -16189,7 +16334,7 @@ SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int); SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int); SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*); -SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*); +SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*); SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*); SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*); SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*); @@ -16205,6 +16350,9 @@ SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*); SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**); SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**); SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int); +#ifndef SQLITE_OMIT_VIRTUALTABLE +SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3*,const char *zName); +#endif SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*); SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int); SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*); @@ -16233,7 +16381,7 @@ SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*, sqlite3_vfs**,char**,char **); SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*); -#ifdef SQLITE_OMIT_BUILTIN_TEST +#ifdef SQLITE_UNTESTABLE # define sqlite3FaultSim(X) SQLITE_OK #else SQLITE_PRIVATE int sqlite3FaultSim(int); @@ -16246,7 +16394,7 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32); SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*); SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*); SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*); -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*); #endif @@ -16335,7 +16483,7 @@ SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int); SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int); -SQLITE_PRIVATE void sqlite3ExprCodeAtInit(Parse*, Expr*, int, u8); +SQLITE_PRIVATE int sqlite3ExprCodeAtInit(Parse*, Expr*, int); SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*); SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse*, Expr*, int); @@ -16343,6 +16491,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int, u8); #define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */ #define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */ #define SQLITE_ECEL_REF 0x04 /* Use ExprList.u.x.iOrderByCol */ +#define SQLITE_ECEL_OMITREF 0x08 /* Omit if ExprList.u.x.iOrderByCol */ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int); SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int); SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int); @@ -16365,7 +16514,7 @@ SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx); SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*); SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*); -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE SQLITE_PRIVATE void sqlite3PrngSaveState(void); SQLITE_PRIVATE void sqlite3PrngRestoreState(void); #endif @@ -16396,6 +16545,11 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,I SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int); SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int, u8,u8,int,int*,int*); +#ifdef SQLITE_ENABLE_NULL_TRIM +SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe*,Table*); +#else +# define sqlite3SetMakeRecordP5(A,B) +#endif SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int); SQLITE_PRIVATE int sqlite3OpenTableAndIndices(Parse*, Table*, int, u8, int, u8*, int*, int*); SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse*, int, int); @@ -16502,6 +16656,9 @@ SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst); #endif +SQLITE_PRIVATE VList *sqlite3VListAdd(sqlite3*,VList*,const char*,int,int); +SQLITE_PRIVATE const char *sqlite3VListNumToName(VList*,int); +SQLITE_PRIVATE int sqlite3VListNameToNum(VList*,const char*,int); /* ** Routines to read and write variable-length integers. These used to @@ -16671,8 +16828,10 @@ SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3*, Index*, int); /* ** The interface to the LEMON-generated parser */ -SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64)); -SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*)); +#ifndef SQLITE_AMALGAMATION +SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64)); +SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*)); +#endif SQLITE_PRIVATE void sqlite3Parser(void*, int, Token, Parse*); #ifdef YYTRACKMAXSTACKDEPTH SQLITE_PRIVATE int sqlite3ParserStackPeak(void*); @@ -16718,6 +16877,13 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3*); SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *, int, int); SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*); SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*); +SQLITE_PRIVATE Module *sqlite3VtabCreateModule( + sqlite3*, + const char*, + const sqlite3_module*, + void*, + void(*)(void*) + ); # define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0) #endif SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse*,Module*); @@ -16775,6 +16941,7 @@ SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *); #define sqlite3FkDropTable(a,b,c) #define sqlite3FkOldmask(a,b) 0 #define sqlite3FkRequired(a,b,c,d) 0 + #define sqlite3FkReferences(a) 0 #endif #ifndef SQLITE_OMIT_FOREIGN_KEY SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*); @@ -16793,10 +16960,10 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex(Parse*,Table*,FKey*,Index**,int**); /* ** The interface to the code in fault.c used for identifying "benign" -** malloc failures. This is only present if SQLITE_OMIT_BUILTIN_TEST +** malloc failures. This is only present if SQLITE_UNTESTABLE ** is not defined. */ -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void); SQLITE_PRIVATE void sqlite3EndBenignMalloc(void); #else @@ -16927,6 +17094,7 @@ SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr); SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr); SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr*, int); SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(Parse*,Expr*,int); +SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse*, Expr*); #endif /* SQLITEINT_H */ @@ -17103,6 +17271,19 @@ SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = { # define SQLITE_STMTJRNL_SPILL (64*1024) #endif +/* +** The default lookaside-configuration, the format "SZ,N". SZ is the +** number of bytes in each lookaside slot (should be a multiple of 8) +** and N is the number of slots. The lookaside-configuration can be +** changed as start-time using sqlite3_config(SQLITE_CONFIG_LOOKASIDE) +** or at run-time for an individual database connection using +** sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE); +*/ +#ifndef SQLITE_DEFAULT_LOOKASIDE +# define SQLITE_DEFAULT_LOOKASIDE 1200,100 +#endif + + /* ** The following singleton contains the global configuration for ** the SQLite library. @@ -17115,8 +17296,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ 0x7ffffffe, /* mxStrlen */ 0, /* neverCorrupt */ - 128, /* szLookaside */ - 500, /* nLookaside */ + SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */ SQLITE_STMTJRNL_SPILL, /* nStmtSpill */ {0,0,0,0,0,0,0,0}, /* m */ {0,0,0,0,0,0,0,0,0}, /* mutex */ @@ -17153,7 +17333,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { 0, /* xVdbeBranch */ 0, /* pVbeBranchArg */ #endif -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE 0, /* xTestCallback */ #endif 0, /* bLocaltimeFault */ @@ -17282,6 +17462,9 @@ static const char * const azCompileOpt[] = { #if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc) "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE), #endif +#if SQLITE_DIRECT_OVERFLOW_READ + "DIRECT_OVERFLOW_READ", +#endif #if SQLITE_DISABLE_DIRSYNC "DISABLE_DIRSYNC", #endif @@ -17368,6 +17551,9 @@ static const char * const azCompileOpt[] = { #if SQLITE_ENABLE_UPDATE_DELETE_LIMIT "ENABLE_UPDATE_DELETE_LIMIT", #endif +#if defined(SQLITE_ENABLE_URI_00_ERROR) + "ENABLE_URI_00_ERROR", +#endif #if SQLITE_HAS_CODEC "HAS_CODEC", #endif @@ -17443,9 +17629,6 @@ static const char * const azCompileOpt[] = { #if SQLITE_OMIT_BTREECOUNT "OMIT_BTREECOUNT", #endif -#if SQLITE_OMIT_BUILTIN_TEST - "OMIT_BUILTIN_TEST", -#endif #if SQLITE_OMIT_CAST "OMIT_CAST", #endif @@ -17608,6 +17791,9 @@ static const char * const azCompileOpt[] = { #if defined(SQLITE_THREADSAFE) "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE), #endif +#if SQLITE_UNTESTABLE + "UNTESTABLE" +#endif #if SQLITE_USE_ALLOCA "USE_ALLOCA", #endif @@ -17761,57 +17947,60 @@ typedef struct AuxData AuxData; */ typedef struct VdbeCursor VdbeCursor; struct VdbeCursor { - u8 eCurType; /* One of the CURTYPE_* values above */ - i8 iDb; /* Index of cursor database in db->aDb[] (or -1) */ - u8 nullRow; /* True if pointing to a row with no data */ - u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */ - u8 isTable; /* True for rowid tables. False for indexes */ + u8 eCurType; /* One of the CURTYPE_* values above */ + i8 iDb; /* Index of cursor database in db->aDb[] (or -1) */ + u8 nullRow; /* True if pointing to a row with no data */ + u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */ + u8 isTable; /* True for rowid tables. False for indexes */ #ifdef SQLITE_DEBUG - u8 seekOp; /* Most recent seek operation on this cursor */ - u8 wrFlag; /* The wrFlag argument to sqlite3BtreeCursor() */ -#endif - Bool isEphemeral:1; /* True for an ephemeral table */ - Bool useRandomRowid:1;/* Generate new record numbers semi-randomly */ - Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ - Pgno pgnoRoot; /* Root page of the open btree cursor */ - i16 nField; /* Number of fields in the header */ - u16 nHdrParsed; /* Number of header fields parsed so far */ + u8 seekOp; /* Most recent seek operation on this cursor */ + u8 wrFlag; /* The wrFlag argument to sqlite3BtreeCursor() */ +#endif + Bool isEphemeral:1; /* True for an ephemeral table */ + Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */ + Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ + Btree *pBtx; /* Separate file holding temporary table */ + i64 seqCount; /* Sequence counter */ + int *aAltMap; /* Mapping from table to index column numbers */ + + /* Cached OP_Column parse information is only valid if cacheStatus matches + ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of + ** CACHE_STALE (0) and so setting cacheStatus=CACHE_STALE guarantees that + ** the cache is out of date. */ + u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */ + int seekResult; /* Result of previous sqlite3BtreeMoveto() or 0 + ** if there have been no prior seeks on the cursor. */ + /* NB: seekResult does not distinguish between "no seeks have ever occurred + ** on this cursor" and "the most recent seek was an exact match". */ + + /* When a new VdbeCursor is allocated, only the fields above are zeroed. + ** The fields that follow are uninitialized, and must be individually + ** initialized prior to first use. */ + VdbeCursor *pAltCursor; /* Associated index cursor from which to read */ union { BtCursor *pCursor; /* CURTYPE_BTREE. Btree cursor */ sqlite3_vtab_cursor *pVCur; /* CURTYPE_VTAB. Vtab cursor */ int pseudoTableReg; /* CURTYPE_PSEUDO. Reg holding content. */ VdbeSorter *pSorter; /* CURTYPE_SORTER. Sorter object */ } uc; - Btree *pBt; /* Separate file holding temporary table */ - KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */ - int seekResult; /* Result of previous sqlite3BtreeMoveto() */ - i64 seqCount; /* Sequence counter */ - i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ - VdbeCursor *pAltCursor; /* Associated index cursor from which to read */ - int *aAltMap; /* Mapping from table to index column numbers */ + KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */ + u32 iHdrOffset; /* Offset to next unparsed byte of the header */ + Pgno pgnoRoot; /* Root page of the open btree cursor */ + i16 nField; /* Number of fields in the header */ + u16 nHdrParsed; /* Number of header fields parsed so far */ + i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ + u32 *aOffset; /* Pointer to aType[nField] */ + const u8 *aRow; /* Data for the current row, if all on one page */ + u32 payloadSize; /* Total number of bytes in the record */ + u32 szRow; /* Byte available in aRow */ #ifdef SQLITE_ENABLE_COLUMN_USED_MASK - u64 maskUsed; /* Mask of columns used by this cursor */ + u64 maskUsed; /* Mask of columns used by this cursor */ #endif - /* Cached information about the header for the data record that the - ** cursor is currently pointing to. Only valid if cacheStatus matches - ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of - ** CACHE_STALE and so setting cacheStatus=CACHE_STALE guarantees that - ** the cache is out of date. - ** - ** aRow might point to (ephemeral) data for the current row, or it might - ** be NULL. - */ - u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */ - u32 payloadSize; /* Total number of bytes in the record */ - u32 szRow; /* Byte available in aRow */ - u32 iHdrOffset; /* Offset to next unparsed byte of the header */ - const u8 *aRow; /* Data for the current row, if all on one page */ - u32 *aOffset; /* Pointer to aType[nField] */ - u32 aType[1]; /* Type values for all entries in the record */ /* 2*nField extra array elements allocated for aType[], beyond the one ** static element declared in the structure. nField total array slots for ** aType[] and nField+1 array slots for aOffset[] */ + u32 aType[1]; /* Type values record decode. MUST BE LAST */ }; @@ -18031,7 +18220,6 @@ struct Vdbe { Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ Parse *pParse; /* Parsing context used to create this Vdbe */ ynVar nVar; /* Number of entries in aVar[] */ - ynVar nzVar; /* Number of entries in azVar[] */ u32 magic; /* Magic number for sanity checking */ int nMem; /* Number of memory locations currently allocated */ int nCursor; /* Number of slots in apCsr[] */ @@ -18056,7 +18244,7 @@ struct Vdbe { char *zErrMsg; /* Error message written here */ VdbeCursor **apCsr; /* One element of this array for each open cursor */ Mem *aVar; /* Values for the OP_Variable opcode. */ - char **azVar; /* Name of variables */ + VList *pVList; /* Name of variables */ #ifndef SQLITE_OMIT_TRACE i64 startTime; /* Time when query started - used for profiling */ #endif @@ -18120,6 +18308,7 @@ struct PreUpdate { i64 iKey2; /* Second key value passed to hook */ Mem *aNew; /* Array of new.* values */ Table *pTab; /* Schema object being upated */ + Index *pPk; /* PK index if pTab is WITHOUT ROWID */ }; /* @@ -18172,7 +18361,7 @@ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*); SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem*,u8,u8); -SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,int,Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,Mem*); SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p); SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*); SQLITE_PRIVATE const char *sqlite3OpcodeName(int); @@ -18641,16 +18830,18 @@ struct tm *__cdecl localtime(const time_t *); */ typedef struct DateTime DateTime; struct DateTime { - sqlite3_int64 iJD; /* The julian day number times 86400000 */ - int Y, M, D; /* Year, month, and day */ - int h, m; /* Hour and minutes */ - int tz; /* Timezone offset in minutes */ - double s; /* Seconds */ - char validYMD; /* True (1) if Y,M,D are valid */ - char validHMS; /* True (1) if h,m,s are valid */ - char validJD; /* True (1) if iJD is valid */ - char validTZ; /* True (1) if tz is valid */ - char tzSet; /* Timezone was set explicitly */ + sqlite3_int64 iJD; /* The julian day number times 86400000 */ + int Y, M, D; /* Year, month, and day */ + int h, m; /* Hour and minutes */ + int tz; /* Timezone offset in minutes */ + double s; /* Seconds */ + char validJD; /* True (1) if iJD is valid */ + char rawS; /* Raw numeric value stored in s */ + char validYMD; /* True (1) if Y,M,D are valid */ + char validHMS; /* True (1) if h,m,s are valid */ + char validTZ; /* True (1) if tz is valid */ + char tzSet; /* Timezone was set explicitly */ + char isError; /* An overflow has occurred */ }; @@ -18798,6 +18989,7 @@ static int parseHhMmSs(const char *zDate, DateTime *p){ s = 0; } p->validJD = 0; + p->rawS = 0; p->validHMS = 1; p->h = h; p->m = m; @@ -18807,6 +18999,14 @@ static int parseHhMmSs(const char *zDate, DateTime *p){ return 0; } +/* +** Put the DateTime object into its error state. +*/ +static void datetimeError(DateTime *p){ + memset(p, 0, sizeof(*p)); + p->isError = 1; +} + /* ** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume ** that the YYYY-MM-DD is according to the Gregorian calendar. @@ -18826,6 +19026,10 @@ static void computeJD(DateTime *p){ M = 1; D = 1; } + if( Y<-4713 || Y>9999 || p->rawS ){ + datetimeError(p); + return; + } if( M<=2 ){ Y--; M += 12; @@ -18906,6 +19110,21 @@ static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ } } +/* +** Input "r" is a numeric quantity which might be a julian day number, +** or the number of seconds since 1970. If the value if r is within +** range of a julian day number, install it as such and set validJD. +** If the value is a valid unix timestamp, put it in p->s and set p->rawS. +*/ +static void setRawDateNumber(DateTime *p, double r){ + p->s = r; + p->rawS = 1; + if( r>=0.0 && r<5373484.5 ){ + p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); + p->validJD = 1; + } +} + /* ** Attempt to parse the given string into a julian day number. Return ** the number of errors. @@ -18935,13 +19154,30 @@ static int parseDateOrTime( }else if( sqlite3StrICmp(zDate,"now")==0){ return setDateTimeToCurrent(context, p); }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8) ){ - p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); - p->validJD = 1; + setRawDateNumber(p, r); return 0; } return 1; } +/* The julian day number for 9999-12-31 23:59:59.999 is 5373484.4999999. +** Multiplying this by 86400000 gives 464269060799999 as the maximum value +** for DateTime.iJD. +** +** But some older compilers (ex: gcc 4.2.1 on older Macs) cannot deal with +** such a large integer literal, so we have to encode it. +*/ +#define INT_464269060799999 ((((i64)0x1a640)<<32)|0x1072fdff) + +/* +** Return TRUE if the given julian day number is within range. +** +** The input is the JulianDay times 86400000. +*/ +static int validJulianDay(sqlite3_int64 iJD){ + return iJD>=0 && iJD<=INT_464269060799999; +} + /* ** Compute the Year, Month, and Day from the julian day number. */ @@ -18953,6 +19189,7 @@ static void computeYMD(DateTime *p){ p->M = 1; p->D = 1; }else{ + assert( validJulianDay(p->iJD) ); Z = (int)((p->iJD + 43200000)/86400000); A = (int)((Z - 1867216.25)/36524.25); A = Z + 1 + A - (A/4); @@ -18983,6 +19220,7 @@ static void computeHMS(DateTime *p){ s -= p->h*3600; p->m = s/60; p->s += s - p->m*60; + p->rawS = 0; p->validHMS = 1; } @@ -19044,14 +19282,14 @@ static int osLocaltime(time_t *t, struct tm *pTm){ #endif sqlite3_mutex_enter(mutex); pX = localtime(t); -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0; #endif if( pX ) *pTm = *pX; sqlite3_mutex_leave(mutex); rc = pX==0; #else -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE if( sqlite3GlobalConfig.bLocaltimeFault ) return 1; #endif #if HAVE_LOCALTIME_R @@ -19122,13 +19360,38 @@ static sqlite3_int64 localtimeOffset( y.validYMD = 1; y.validHMS = 1; y.validJD = 0; + y.rawS = 0; y.validTZ = 0; + y.isError = 0; computeJD(&y); *pRc = SQLITE_OK; return y.iJD - x.iJD; } #endif /* SQLITE_OMIT_LOCALTIME */ +/* +** The following table defines various date transformations of the form +** +** 'NNN days' +** +** Where NNN is an arbitrary floating-point number and "days" can be one +** of several units of time. +*/ +static const struct { + u8 eType; /* Transformation type code */ + u8 nName; /* Length of th name */ + char *zName; /* Name of the transformation */ + double rLimit; /* Maximum NNN value for this transform */ + double rXform; /* Constant used for this transform */ +} aXformType[] = { + { 0, 6, "second", 464269060800.0, 86400000.0/(24.0*60.0*60.0) }, + { 0, 6, "minute", 7737817680.0, 86400000.0/(24.0*60.0) }, + { 0, 4, "hour", 128963628.0, 86400000.0/24.0 }, + { 0, 3, "day", 5373485.0, 86400000.0 }, + { 1, 5, "month", 176546.0, 30.0*86400000.0 }, + { 2, 4, "year", 14713.0, 365.0*86400000.0 }, +}; + /* ** Process a modifier to a date-time stamp. The modifiers are ** as follows: @@ -19153,17 +19416,15 @@ static sqlite3_int64 localtimeOffset( ** to context pCtx. If the error is an unrecognized modifier, no error is ** written to pCtx. */ -static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ +static int parseModifier( + sqlite3_context *pCtx, /* Function context */ + const char *z, /* The text of the modifier */ + int n, /* Length of zMod in bytes */ + DateTime *p /* The date/time value to be modified */ +){ int rc = 1; - int n; double r; - char *z, zBuf[30]; - z = zBuf; - for(n=0; niJD += localtimeOffset(p, pCtx, &rc); clearYMD_HMS_TZ(p); @@ -19183,16 +19444,21 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ /* ** unixepoch ** - ** Treat the current value of p->iJD as the number of + ** Treat the current value of p->s as the number of ** seconds since 1970. Convert to a real julian day number. */ - if( strcmp(z, "unixepoch")==0 && p->validJD ){ - p->iJD = (p->iJD + 43200)/86400 + 21086676*(i64)10000000; - clearYMD_HMS_TZ(p); - rc = 0; + if( sqlite3_stricmp(z, "unixepoch")==0 && p->rawS ){ + r = p->s*1000.0 + 210866760000000.0; + if( r>=0.0 && r<464269060800000.0 ){ + clearYMD_HMS_TZ(p); + p->iJD = (sqlite3_int64)r; + p->validJD = 1; + p->rawS = 0; + rc = 0; + } } #ifndef SQLITE_OMIT_LOCALTIME - else if( strcmp(z, "utc")==0 ){ + else if( sqlite3_stricmp(z, "utc")==0 ){ if( p->tzSet==0 ){ sqlite3_int64 c1; computeJD(p); @@ -19218,7 +19484,7 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ ** weekday N where 0==Sunday, 1==Monday, and so forth. If the ** date is already on the appropriate weekday, this is a no-op. */ - if( strncmp(z, "weekday ", 8)==0 + if( sqlite3_strnicmp(z, "weekday ", 8)==0 && sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8) && (n=(int)r)==r && n>=0 && r<7 ){ sqlite3_int64 Z; @@ -19241,7 +19507,7 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ ** Move the date backwards to the beginning of the current day, ** or month or year. */ - if( strncmp(z, "start of ", 9)!=0 ) break; + if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break; z += 9; computeYMD(p); p->validHMS = 1; @@ -19249,15 +19515,15 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ p->s = 0.0; p->validTZ = 0; p->validJD = 0; - if( strcmp(z,"month")==0 ){ + if( sqlite3_stricmp(z,"month")==0 ){ p->D = 1; rc = 0; - }else if( strcmp(z,"year")==0 ){ + }else if( sqlite3_stricmp(z,"year")==0 ){ computeYMD(p); p->M = 1; p->D = 1; rc = 0; - }else if( strcmp(z,"day")==0 ){ + }else if( sqlite3_stricmp(z,"day")==0 ){ rc = 0; } break; @@ -19275,6 +19541,7 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ case '8': case '9': { double rRounder; + int i; for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){} if( !sqlite3AtoF(z, &r, n, SQLITE_UTF8) ){ rc = 1; @@ -19303,46 +19570,48 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ rc = 0; break; } + + /* If control reaches this point, it means the transformation is + ** one of the forms like "+NNN days". */ z += n; while( sqlite3Isspace(*z) ) z++; n = sqlite3Strlen30(z); if( n>10 || n<3 ) break; - if( z[n-1]=='s' ){ z[n-1] = 0; n--; } + if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; computeJD(p); - rc = 0; + rc = 1; rRounder = r<0 ? -0.5 : +0.5; - if( n==3 && strcmp(z,"day")==0 ){ - p->iJD += (sqlite3_int64)(r*86400000.0 + rRounder); - }else if( n==4 && strcmp(z,"hour")==0 ){ - p->iJD += (sqlite3_int64)(r*(86400000.0/24.0) + rRounder); - }else if( n==6 && strcmp(z,"minute")==0 ){ - p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0)) + rRounder); - }else if( n==6 && strcmp(z,"second")==0 ){ - p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0*60.0)) + rRounder); - }else if( n==5 && strcmp(z,"month")==0 ){ - int x, y; - computeYMD_HMS(p); - p->M += (int)r; - x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; - p->Y += x; - p->M -= x*12; - p->validJD = 0; - computeJD(p); - y = (int)r; - if( y!=r ){ - p->iJD += (sqlite3_int64)((r - y)*30.0*86400000.0 + rRounder); - } - }else if( n==4 && strcmp(z,"year")==0 ){ - int y = (int)r; - computeYMD_HMS(p); - p->Y += y; - p->validJD = 0; - computeJD(p); - if( y!=r ){ - p->iJD += (sqlite3_int64)((r - y)*365.0*86400000.0 + rRounder); + for(i=0; i-aXformType[i].rLimit && rM += (int)r; + x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; + p->Y += x; + p->M -= x*12; + p->validJD = 0; + r -= (int)r; + break; + } + case 2: { /* Special processing to add years */ + int y = (int)r; + computeYMD_HMS(p); + p->Y += y; + p->validJD = 0; + r -= (int)r; + break; + } + } + computeJD(p); + p->iJD += (sqlite3_int64)(r*aXformType[i].rXform + rRounder); + rc = 0; + break; } - }else{ - rc = 1; } clearYMD_HMS_TZ(p); break; @@ -19369,7 +19638,7 @@ static int isDate( sqlite3_value **argv, DateTime *p ){ - int i; + int i, n; const unsigned char *z; int eType; memset(p, 0, sizeof(*p)); @@ -19378,8 +19647,7 @@ static int isDate( } if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT || eType==SQLITE_INTEGER ){ - p->iJD = (sqlite3_int64)(sqlite3_value_double(argv[0])*86400000.0 + 0.5); - p->validJD = 1; + setRawDateNumber(p, sqlite3_value_double(argv[0])); }else{ z = sqlite3_value_text(argv[0]); if( !z || parseDateOrTime(context, (char*)z, p) ){ @@ -19388,8 +19656,11 @@ static int isDate( } for(i=1; iisError || !validJulianDay(p->iJD) ) return 1; return 0; } @@ -20187,7 +20458,7 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){ /* #include "sqliteInt.h" */ -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE /* ** Global variables. @@ -20245,7 +20516,7 @@ SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){ } } -#endif /* #ifndef SQLITE_OMIT_BUILTIN_TEST */ +#endif /* #ifndef SQLITE_UNTESTABLE */ /************** End of fault.c ***********************************************/ /************** Begin file mem0.c ********************************************/ @@ -20438,7 +20709,9 @@ static malloc_zone_t* _sqliteZone_; */ static void *sqlite3MemMalloc(int nByte){ #ifdef SQLITE_MALLOCSIZE - void *p = SQLITE_MALLOC( nByte ); + void *p; + testcase( ROUND8(nByte)==nByte ); + p = SQLITE_MALLOC( nByte ); if( p==0 ){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte); @@ -20447,7 +20720,7 @@ static void *sqlite3MemMalloc(int nByte){ #else sqlite3_int64 *p; assert( nByte>0 ); - nByte = ROUND8(nByte); + testcase( ROUND8(nByte)!=nByte ); p = SQLITE_MALLOC( nByte+8 ); if( p ){ p[0] = nByte; @@ -23569,8 +23842,7 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){ SQLITE_MEMORY_BARRIER; #elif defined(__GNUC__) __sync_synchronize(); -#elif !defined(SQLITE_DISABLE_INTRINSIC) && \ - defined(_MSC_VER) && _MSC_VER>=1300 +#elif MSVC_VERSION>=1300 _ReadWriteBarrier(); #elif defined(MemoryBarrier) MemoryBarrier(); @@ -24102,11 +24374,19 @@ static void sqlite3MallocAlarm(int nByte){ ** Do a memory allocation with statistics and alarms. Assume the ** lock is already held. */ -static int mallocWithAlarm(int n, void **pp){ - int nFull; +static void mallocWithAlarm(int n, void **pp){ void *p; + int nFull; assert( sqlite3_mutex_held(mem0.mutex) ); + assert( n>0 ); + + /* In Firefox (circa 2017-02-08), xRoundup() is remapped to an internal + ** implementation of malloc_good_size(), which must be called in debug + ** mode and specifically when the DMD "Dark Matter Detector" is enabled + ** or else a crash results. Hence, do not attempt to optimize out the + ** following xRoundup() call. */ nFull = sqlite3GlobalConfig.m.xRoundup(n); + sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, n); if( mem0.alarmThreshold>0 ){ sqlite3_int64 nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); @@ -24130,7 +24410,6 @@ static int mallocWithAlarm(int n, void **pp){ sqlite3StatusUp(SQLITE_STATUS_MALLOC_COUNT, 1); } *pp = p; - return nFull; } /* @@ -24404,7 +24683,7 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ sqlite3_mutex_enter(mem0.mutex); sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes); nDiff = nNew - nOld; - if( sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >= + if( nDiff>0 && sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >= mem0.alarmThreshold-nDiff ){ sqlite3MallocAlarm(nDiff); } @@ -24611,9 +24890,8 @@ SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){ if( z==0 ){ return 0; } - n = sqlite3Strlen30(z) + 1; - assert( (n&0x7fffffff)==n ); - zNew = sqlite3DbMallocRaw(db, (int)n); + n = strlen(z) + 1; + zNew = sqlite3DbMallocRaw(db, n); if( zNew ){ memcpy(zNew, z, n); } @@ -24771,7 +25049,6 @@ typedef struct et_info { /* Information about each format field */ ** Allowed values for et_info.flags */ #define FLAG_SIGNED 1 /* True if the value to convert is signed */ -#define FLAG_INTERN 2 /* True if for internal use only */ #define FLAG_STRING 4 /* Allow infinity precision */ @@ -24805,11 +25082,10 @@ static const et_info fmtinfo[] = { { '%', 0, 0, etPERCENT, 0, 0 }, { 'p', 16, 0, etPOINTER, 0, 1 }, -/* All the rest have the FLAG_INTERN bit set and are thus for internal -** use only */ - { 'T', 0, 2, etTOKEN, 0, 0 }, - { 'S', 0, 2, etSRCLIST, 0, 0 }, - { 'r', 10, 3, etORDINAL, 0, 0 }, + /* All the rest are undocumented and are for internal use only */ + { 'T', 0, 0, etTOKEN, 0, 0 }, + { 'S', 0, 0, etSRCLIST, 0, 0 }, + { 'r', 10, 1, etORDINAL, 0, 0 }, }; /* @@ -24903,7 +25179,6 @@ SQLITE_PRIVATE void sqlite3VXPrintf( etByte done; /* Loop termination flag */ etByte xtype = etINVALID; /* Conversion paradigm */ u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ - u8 useIntern; /* Ok to use internal conversions (ex: %T) */ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ sqlite_uint64 longvalue; /* Value for integer types */ LONGDOUBLE_TYPE realvalue; /* Value for real types */ @@ -24922,13 +25197,11 @@ SQLITE_PRIVATE void sqlite3VXPrintf( char buf[etBUFSIZE]; /* Conversion buffer */ bufpt = 0; - if( pAccum->printfFlags ){ - if( (bArgList = (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC))!=0 ){ - pArgList = va_arg(ap, PrintfArguments*); - } - useIntern = pAccum->printfFlags & SQLITE_PRINTF_INTERNAL; + if( (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC)!=0 ){ + pArgList = va_arg(ap, PrintfArguments*); + bArgList = 1; }else{ - bArgList = useIntern = 0; + bArgList = 0; } for(; (c=(*fmt))!=0; ++fmt){ if( c!='%' ){ @@ -25040,11 +25313,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf( for(idx=0; idxflags & FLAG_INTERN)==0 ){ - xtype = infop->type; - }else{ - return; - } + xtype = infop->type; break; } } @@ -25413,7 +25682,9 @@ SQLITE_PRIVATE void sqlite3VXPrintf( break; } case etTOKEN: { - Token *pToken = va_arg(ap, Token*); + Token *pToken; + if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return; + pToken = va_arg(ap, Token*); assert( bArgList==0 ); if( pToken && pToken->n ){ sqlite3StrAccumAppend(pAccum, (const char*)pToken->z, pToken->n); @@ -25422,9 +25693,13 @@ SQLITE_PRIVATE void sqlite3VXPrintf( break; } case etSRCLIST: { - SrcList *pSrc = va_arg(ap, SrcList*); - int k = va_arg(ap, int); - struct SrcList_item *pItem = &pSrc->a[k]; + SrcList *pSrc; + int k; + struct SrcList_item *pItem; + if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return; + pSrc = va_arg(ap, SrcList*); + k = va_arg(ap, int); + pItem = &pSrc->a[k]; assert( bArgList==0 ); assert( k>=0 && knSrc ); if( pItem->zDatabase ){ @@ -25446,9 +25721,13 @@ SQLITE_PRIVATE void sqlite3VXPrintf( ** the output. */ width -= length; - if( width>0 && !flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); - sqlite3StrAccumAppend(pAccum, bufpt, length); - if( width>0 && flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); + if( width>0 ){ + if( !flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); + sqlite3StrAccumAppend(pAccum, bufpt, length); + if( flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); + }else{ + sqlite3StrAccumAppend(pAccum, bufpt, length); + } if( zExtra ){ sqlite3DbFree(pAccum->db, zExtra); @@ -25553,7 +25832,7 @@ SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){ assert( p->accError==0 || p->nAlloc==0 ); if( p->nChar+N >= p->nAlloc ){ enlargeAndAppend(p,z,N); - }else{ + }else if( N ){ assert( p->zText ); p->nChar += N; memcpy(&p->zText[p->nChar-N], z, N); @@ -25573,18 +25852,23 @@ SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum *p, const char *z){ ** Return a pointer to the resulting string. Return a NULL ** pointer if any kind of error was encountered. */ +static SQLITE_NOINLINE char *strAccumFinishRealloc(StrAccum *p){ + assert( p->mxAlloc>0 && !isMalloced(p) ); + p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 ); + if( p->zText ){ + memcpy(p->zText, p->zBase, p->nChar+1); + p->printfFlags |= SQLITE_PRINTF_MALLOCED; + }else{ + setStrAccumError(p, STRACCUM_NOMEM); + } + return p->zText; +} SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){ if( p->zText ){ assert( (p->zText==p->zBase)==!isMalloced(p) ); p->zText[p->nChar] = 0; if( p->mxAlloc>0 && !isMalloced(p) ){ - p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 ); - if( p->zText ){ - memcpy(p->zText, p->zBase, p->nChar+1); - p->printfFlags |= SQLITE_PRINTF_MALLOCED; - }else{ - setStrAccumError(p, STRACCUM_NOMEM); - } + return strAccumFinishRealloc(p); } } return p->zText; @@ -25724,7 +26008,8 @@ SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_li #endif sqlite3StrAccumInit(&acc, 0, zBuf, n, 0); sqlite3VXPrintf(&acc, zFormat, ap); - return sqlite3StrAccumFinish(&acc); + zBuf[acc.nChar] = 0; + return zBuf; } SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ char *z; @@ -25872,6 +26157,7 @@ static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){ va_start(ap, zFormat); sqlite3VXPrintf(&acc, zFormat, ap); va_end(ap); + assert( acc.nChar>0 ); if( zBuf[acc.nChar-1]!='\n' ) sqlite3StrAccumAppend(&acc, "\n", 1); sqlite3StrAccumFinish(&acc); fprintf(stdout,"%s", zBuf); @@ -26432,7 +26718,7 @@ SQLITE_API void sqlite3_randomness(int N, void *pBuf){ sqlite3_mutex_leave(mutex); } -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE /* ** For testing purposes, we sometimes want to preserve the state of ** PRNG and restore the PRNG to its saved state at a later time, or @@ -26457,7 +26743,7 @@ SQLITE_PRIVATE void sqlite3PrngRestoreState(void){ sizeof(sqlite3Prng) ); } -#endif /* SQLITE_OMIT_BUILTIN_TEST */ +#endif /* SQLITE_UNTESTABLE */ /************** End of random.c **********************************************/ /************** Begin file threads.c *****************************************/ @@ -27315,7 +27601,7 @@ SQLITE_PRIVATE void sqlite3Coverage(int x){ ** Return whatever integer value the test callback returns, or return ** SQLITE_OK if no test callback is installed. */ -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE SQLITE_PRIVATE int sqlite3FaultSim(int iTest){ int (*xCallback)(int) = sqlite3GlobalConfig.xTestCallback; return xCallback ? xCallback(iTest) : SQLITE_OK; @@ -28413,13 +28699,11 @@ SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){ u32 x; memcpy(&x,p,4); return x; -#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \ - && defined(__GNUC__) && GCC_VERSION>=4003000 +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) u32 x; memcpy(&x,p,4); return __builtin_bswap32(x); -#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \ - && defined(_MSC_VER) && _MSC_VER>=1300 +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 u32 x; memcpy(&x,p,4); return _byteswap_ulong(x); @@ -28431,12 +28715,10 @@ SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){ SQLITE_PRIVATE void sqlite3Put4byte(unsigned char *p, u32 v){ #if SQLITE_BYTEORDER==4321 memcpy(p,&v,4); -#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \ - && defined(__GNUC__) && GCC_VERSION>=4003000 +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) u32 x = __builtin_bswap32(v); memcpy(p,&x,4); -#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \ - && defined(_MSC_VER) && _MSC_VER>=1300 +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 u32 x = _byteswap_ulong(v); memcpy(p,&x,4); #else @@ -28552,6 +28834,9 @@ SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){ ** overflow, leave *pA unchanged and return 1. */ SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){ +#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000 + return __builtin_add_overflow(*pA, iB, pA); +#else i64 iA = *pA; testcase( iA==0 ); testcase( iA==1 ); testcase( iB==-1 ); testcase( iB==0 ); @@ -28566,8 +28851,12 @@ SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){ } *pA += iB; return 0; +#endif } SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){ +#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000 + return __builtin_sub_overflow(*pA, iB, pA); +#else testcase( iB==SMALLEST_INT64+1 ); if( iB==SMALLEST_INT64 ){ testcase( (*pA)==(-1) ); testcase( (*pA)==0 ); @@ -28577,8 +28866,12 @@ SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){ }else{ return sqlite3AddInt64(pA, -iB); } +#endif } SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){ +#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000 + return __builtin_mul_overflow(*pA, iB, pA); +#else i64 iA = *pA; if( iB>0 ){ if( iA>LARGEST_INT64/iB ) return 1; @@ -28594,6 +28887,7 @@ SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){ } *pA = iA*iB; return 0; +#endif } /* @@ -28727,6 +29021,109 @@ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){ } #endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */ +/* +** Add a new name/number pair to a VList. This might require that the +** VList object be reallocated, so return the new VList. If an OOM +** error occurs, the original VList returned and the +** db->mallocFailed flag is set. +** +** A VList is really just an array of integers. To destroy a VList, +** simply pass it to sqlite3DbFree(). +** +** The first integer is the number of integers allocated for the whole +** VList. The second integer is the number of integers actually used. +** Each name/number pair is encoded by subsequent groups of 3 or more +** integers. +** +** Each name/number pair starts with two integers which are the numeric +** value for the pair and the size of the name/number pair, respectively. +** The text name overlays one or more following integers. The text name +** is always zero-terminated. +** +** Conceptually: +** +** struct VList { +** int nAlloc; // Number of allocated slots +** int nUsed; // Number of used slots +** struct VListEntry { +** int iValue; // Value for this entry +** int nSlot; // Slots used by this entry +** // ... variable name goes here +** } a[0]; +** } +** +** During code generation, pointers to the variable names within the +** VList are taken. When that happens, nAlloc is set to zero as an +** indication that the VList may never again be enlarged, since the +** accompanying realloc() would invalidate the pointers. +*/ +SQLITE_PRIVATE VList *sqlite3VListAdd( + sqlite3 *db, /* The database connection used for malloc() */ + VList *pIn, /* The input VList. Might be NULL */ + const char *zName, /* Name of symbol to add */ + int nName, /* Bytes of text in zName */ + int iVal /* Value to associate with zName */ +){ + int nInt; /* number of sizeof(int) objects needed for zName */ + char *z; /* Pointer to where zName will be stored */ + int i; /* Index in pIn[] where zName is stored */ + + nInt = nName/4 + 3; + assert( pIn==0 || pIn[0]>=3 ); /* Verify ok to add new elements */ + if( pIn==0 || pIn[1]+nInt > pIn[0] ){ + /* Enlarge the allocation */ + int nAlloc = (pIn ? pIn[0]*2 : 10) + nInt; + VList *pOut = sqlite3DbRealloc(db, pIn, nAlloc*sizeof(int)); + if( pOut==0 ) return pIn; + if( pIn==0 ) pOut[1] = 2; + pIn = pOut; + pIn[0] = nAlloc; + } + i = pIn[1]; + pIn[i] = iVal; + pIn[i+1] = nInt; + z = (char*)&pIn[i+2]; + pIn[1] = i+nInt; + assert( pIn[1]<=pIn[0] ); + memcpy(z, zName, nName); + z[nName] = 0; + return pIn; +} + +/* +** Return a pointer to the name of a variable in the given VList that +** has the value iVal. Or return a NULL if there is no such variable in +** the list +*/ +SQLITE_PRIVATE const char *sqlite3VListNumToName(VList *pIn, int iVal){ + int i, mx; + if( pIn==0 ) return 0; + mx = pIn[1]; + i = 2; + do{ + if( pIn[i]==iVal ) return (char*)&pIn[i+2]; + i += pIn[i+1]; + }while( i0 then r[P1]-=P3, goto P2"), - /* 67 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]-=P3, goto P2"), + /* 67 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), /* 68 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), /* 69 */ "IncrVacuum" OpHelp(""), /* 70 */ "VNext" OpHelp(""), @@ -29136,48 +29533,47 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 118 */ "ResetCount" OpHelp(""), /* 119 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), /* 120 */ "SorterData" OpHelp("r[P2]=data"), - /* 121 */ "RowKey" OpHelp("r[P2]=key"), - /* 122 */ "RowData" OpHelp("r[P2]=data"), - /* 123 */ "Rowid" OpHelp("r[P2]=rowid"), - /* 124 */ "NullRow" OpHelp(""), - /* 125 */ "SorterInsert" OpHelp(""), - /* 126 */ "IdxInsert" OpHelp("key=r[P2]"), - /* 127 */ "IdxDelete" OpHelp("key=r[P2@P3]"), - /* 128 */ "Seek" OpHelp("Move P3 to P1.rowid"), - /* 129 */ "IdxRowid" OpHelp("r[P2]=rowid"), - /* 130 */ "Destroy" OpHelp(""), - /* 131 */ "Clear" OpHelp(""), + /* 121 */ "RowData" OpHelp("r[P2]=data"), + /* 122 */ "Rowid" OpHelp("r[P2]=rowid"), + /* 123 */ "NullRow" OpHelp(""), + /* 124 */ "SorterInsert" OpHelp("key=r[P2]"), + /* 125 */ "IdxInsert" OpHelp("key=r[P2]"), + /* 126 */ "IdxDelete" OpHelp("key=r[P2@P3]"), + /* 127 */ "Seek" OpHelp("Move P3 to P1.rowid"), + /* 128 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 129 */ "Destroy" OpHelp(""), + /* 130 */ "Clear" OpHelp(""), + /* 131 */ "ResetSorter" OpHelp(""), /* 132 */ "Real" OpHelp("r[P2]=P4"), - /* 133 */ "ResetSorter" OpHelp(""), - /* 134 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"), - /* 135 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"), - /* 136 */ "ParseSchema" OpHelp(""), - /* 137 */ "LoadAnalysis" OpHelp(""), - /* 138 */ "DropTable" OpHelp(""), - /* 139 */ "DropIndex" OpHelp(""), - /* 140 */ "DropTrigger" OpHelp(""), - /* 141 */ "IntegrityCk" OpHelp(""), - /* 142 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), - /* 143 */ "Param" OpHelp(""), - /* 144 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), - /* 145 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), - /* 146 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), - /* 147 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 148 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 149 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 150 */ "Expire" OpHelp(""), - /* 151 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 152 */ "VBegin" OpHelp(""), - /* 153 */ "VCreate" OpHelp(""), - /* 154 */ "VDestroy" OpHelp(""), - /* 155 */ "VOpen" OpHelp(""), - /* 156 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 157 */ "VRename" OpHelp(""), - /* 158 */ "Pagecount" OpHelp(""), - /* 159 */ "MaxPgcnt" OpHelp(""), - /* 160 */ "CursorHint" OpHelp(""), - /* 161 */ "Noop" OpHelp(""), - /* 162 */ "Explain" OpHelp(""), + /* 133 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"), + /* 134 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"), + /* 135 */ "ParseSchema" OpHelp(""), + /* 136 */ "LoadAnalysis" OpHelp(""), + /* 137 */ "DropTable" OpHelp(""), + /* 138 */ "DropIndex" OpHelp(""), + /* 139 */ "DropTrigger" OpHelp(""), + /* 140 */ "IntegrityCk" OpHelp(""), + /* 141 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 142 */ "Param" OpHelp(""), + /* 143 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 144 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 145 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 146 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 147 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 148 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 149 */ "Expire" OpHelp(""), + /* 150 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 151 */ "VBegin" OpHelp(""), + /* 152 */ "VCreate" OpHelp(""), + /* 153 */ "VDestroy" OpHelp(""), + /* 154 */ "VOpen" OpHelp(""), + /* 155 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 156 */ "VRename" OpHelp(""), + /* 157 */ "Pagecount" OpHelp(""), + /* 158 */ "MaxPgcnt" OpHelp(""), + /* 159 */ "CursorHint" OpHelp(""), + /* 160 */ "Noop" OpHelp(""), + /* 161 */ "Explain" OpHelp(""), }; return azName[i]; } @@ -30446,7 +30842,14 @@ struct unixFileId { #if OS_VXWORKS struct vxworksFileId *pId; /* Unique file ID for vxworks. */ #else - ino_t ino; /* Inode number */ + /* We are told that some versions of Android contain a bug that + ** sizes ino_t at only 32-bits instead of 64-bits. (See + ** https://android-review.googlesource.com/#/c/115351/3/dist/sqlite3.c) + ** To work around this, always allocate 64-bits for the inode number. + ** On small machines that only have 32-bit inodes, this wastes 4 bytes, + ** but that should not be a big deal. */ + /* WAS: ino_t ino; */ + u64 ino; /* Inode number */ #endif }; @@ -30691,7 +31094,7 @@ static int findInodeInfo( #if OS_VXWORKS fileId.pId = pFile->pId; #else - fileId.ino = statbuf.st_ino; + fileId.ino = (u64)statbuf.st_ino; #endif pInode = inodeList; while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){ @@ -30725,7 +31128,8 @@ static int fileHasMoved(unixFile *pFile){ #else struct stat buf; return pFile->pInode!=0 && - (osStat(pFile->zPath, &buf)!=0 || buf.st_ino!=pFile->pInode->fileId.ino); + (osStat(pFile->zPath, &buf)!=0 + || (u64)buf.st_ino!=pFile->pInode->fileId.ino); #endif } @@ -34897,7 +35301,7 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){ unixEnterMutex(); pInode = inodeList; while( pInode && (pInode->fileId.dev!=sStat.st_dev - || pInode->fileId.ino!=sStat.st_ino) ){ + || pInode->fileId.ino!=(u64)sStat.st_ino) ){ pInode = pInode->pNext; } if( pInode ){ @@ -43458,7 +43862,7 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ return p->iSize; } -#ifndef SQLITE_OMIT_BUILTIN_TEST +#ifndef SQLITE_UNTESTABLE /* ** Let V[] be an array of unsigned characters sufficient to hold ** up to N bits. Let I be an integer between 0 and N. 0<=Ipgno>0 ); /* Page number is 1 or more */ + assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */ pCache = pPg->pCache; assert( pCache!=0 ); /* Every page has an associated PCache */ if( pPg->flags & PGHDR_CLEAN ){ @@ -43863,6 +44267,12 @@ SQLITE_PRIVATE int sqlite3PcacheSize(void){ return sizeof(PCache); } ** has already been allocated and is passed in as the p pointer. ** The caller discovers how much space needs to be allocated by ** calling sqlite3PcacheSize(). +** +** szExtra is some extra space allocated for each page. The first +** 8 bytes of the extra space will be zeroed as the page is allocated, +** but remaining content will be uninitialized. Though it is opaque +** to this module, the extra space really ends up being the MemPage +** structure in the pager. */ SQLITE_PRIVATE int sqlite3PcacheOpen( int szPage, /* Size of every page */ @@ -43875,6 +44285,7 @@ SQLITE_PRIVATE int sqlite3PcacheOpen( memset(p, 0, sizeof(PCache)); p->szPage = 1; p->szExtra = szExtra; + assert( szExtra>=8 ); /* First 8 bytes will be zeroed */ p->bPurgeable = bPurgeable; p->eCreate = 2; p->xStress = xStress; @@ -43944,7 +44355,6 @@ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch( assert( pCache!=0 ); assert( pCache->pCache!=0 ); assert( createFlag==3 || createFlag==0 ); - assert( pgno>0 ); assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) ); /* eCreate defines what to do if the page does not exist. @@ -44044,7 +44454,7 @@ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( pPgHdr->pPage = pPage; pPgHdr->pData = pPage->pBuf; pPgHdr->pExtra = (void *)&pPgHdr[1]; - memset(pPgHdr->pExtra, 0, pCache->szExtra); + memset(pPgHdr->pExtra, 0, 8); pPgHdr->pCache = pCache; pPgHdr->pgno = pgno; pPgHdr->flags = PGHDR_CLEAN; @@ -46268,7 +46678,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 #ifdef SQLITE_OMIT_WAL # define sqlite3WalOpen(x,y,z) 0 # define sqlite3WalLimit(x,y) -# define sqlite3WalClose(w,x,y,z) 0 +# define sqlite3WalClose(v,w,x,y,z) 0 # define sqlite3WalBeginReadTransaction(y,z) 0 # define sqlite3WalEndReadTransaction(z) # define sqlite3WalDbsize(y) 0 @@ -46278,7 +46688,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 # define sqlite3WalSavepoint(y,z) # define sqlite3WalSavepointUndo(y,z) 0 # define sqlite3WalFrames(u,v,w,x,y,z) 0 -# define sqlite3WalCheckpoint(r,s,t,u,v,w,x,y,z) 0 +# define sqlite3WalCheckpoint(q,r,s,t,u,v,w,x,y,z) 0 # define sqlite3WalCallback(z) 0 # define sqlite3WalExclusiveMode(y,z) 0 # define sqlite3WalHeapMemory(z) 0 @@ -46296,7 +46706,7 @@ typedef struct Wal Wal; /* Open and close a connection to a write-ahead log. */ SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *, int, i64, Wal**); -SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, int sync_flags, int, u8 *); +SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, sqlite3*, int sync_flags, int, u8 *); /* Set the limiting size of a WAL file. */ SQLITE_PRIVATE void sqlite3WalLimit(Wal*, i64); @@ -46339,6 +46749,7 @@ SQLITE_PRIVATE int sqlite3WalFrames(Wal *pWal, int, PgHdr *, Pgno, int, int); /* Copy pages from the log to the database file */ SQLITE_PRIVATE int sqlite3WalCheckpoint( Wal *pWal, /* Write-ahead log connection */ + sqlite3 *db, /* Check this handle's interrupt flag */ int eMode, /* One of PASSIVE, FULL and RESTART */ int (*xBusy)(void*), /* Function to call when busy */ void *pBusyArg, /* Context argument for xBusyHandler */ @@ -46370,6 +46781,7 @@ SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal); #ifdef SQLITE_ENABLE_SNAPSHOT SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot); SQLITE_PRIVATE void sqlite3WalSnapshotOpen(Wal *pWal, sqlite3_snapshot *pSnapshot); +SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal); #endif #ifdef SQLITE_ENABLE_ZIPVFS @@ -47059,6 +47471,7 @@ struct Pager { int nRead; /* Database pages read */ #endif void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */ + int (*xGet)(Pager*,Pgno,DbPage**,int); /* Routine to fetch a patch */ #ifdef SQLITE_HAS_CODEC void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ void (*xCodecSizeChng)(void*,int,int); /* Notify of page size changes */ @@ -47179,14 +47592,20 @@ static const unsigned char aJournalMagic[] = { #define isOpen(pFd) ((pFd)->pMethods!=0) /* -** Return true if this pager uses a write-ahead log instead of the usual -** rollback journal. Otherwise false. +** Return true if this pager uses a write-ahead log to read page pgno. +** Return false if the pager reads pgno directly from the database. */ -#ifndef SQLITE_OMIT_WAL -SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager){ - return (pPager->pWal!=0); +#if !defined(SQLITE_OMIT_WAL) && defined(SQLITE_DIRECT_OVERFLOW_READ) +SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager, Pgno pgno){ + u32 iRead = 0; + int rc; + if( pPager->pWal==0 ) return 0; + rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); + return rc || iRead; } -# define pagerUseWal(x) sqlite3PagerUseWal(x) +#endif +#ifndef SQLITE_OMIT_WAL +# define pagerUseWal(x) ((x)->pWal!=0) #else # define pagerUseWal(x) 0 # define pagerRollbackWal(x) 0 @@ -47385,6 +47804,33 @@ static char *print_pager_state(Pager *p){ } #endif +/* Forward references to the various page getters */ +static int getPageNormal(Pager*,Pgno,DbPage**,int); +static int getPageError(Pager*,Pgno,DbPage**,int); +#if SQLITE_MAX_MMAP_SIZE>0 +static int getPageMMap(Pager*,Pgno,DbPage**,int); +#endif + +/* +** Set the Pager.xGet method for the appropriate routine used to fetch +** content from the pager. +*/ +static void setGetterMethod(Pager *pPager){ + if( pPager->errCode ){ + pPager->xGet = getPageError; +#if SQLITE_MAX_MMAP_SIZE>0 + }else if( USEFETCH(pPager) +#ifdef SQLITE_HAS_CODEC + && pPager->xCodec==0 +#endif + ){ + pPager->xGet = getPageMMap; +#endif /* SQLITE_MAX_MMAP_SIZE>0 */ + }else{ + pPager->xGet = getPageNormal; + } +} + /* ** Return true if it is necessary to write page *pPg into the sub-journal. ** A page needs to be written into the sub-journal if there exists one @@ -48199,6 +48645,7 @@ static void pager_unlock(Pager *pPager){ } if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); pPager->errCode = SQLITE_OK; + setGetterMethod(pPager); } pPager->journalOff = 0; @@ -48236,6 +48683,7 @@ static int pager_error(Pager *pPager, int rc){ if( rc2==SQLITE_FULL || rc2==SQLITE_IOERR ){ pPager->errCode = rc; pPager->eState = PAGER_ERROR; + setGetterMethod(pPager); } return rc; } @@ -48404,7 +48852,7 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){ pPager->pInJournal = 0; pPager->nRec = 0; if( rc==SQLITE_OK ){ - if( pagerFlushOnCommit(pPager, bCommit) ){ + if( MEMDB || pagerFlushOnCommit(pPager, bCommit) ){ sqlite3PcacheCleanAll(pPager->pPCache); }else{ sqlite3PcacheClearWritable(pPager->pPCache); @@ -49803,6 +50251,7 @@ static void pagerFixMaplimit(Pager *pPager){ sqlite3_int64 sz; sz = pPager->szMmap; pPager->bUseFetch = (sz>0); + setGetterMethod(pPager); sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_MMAP_SIZE, &sz); } #endif @@ -50299,6 +50748,7 @@ static int pagerSyncHotJournal(Pager *pPager){ return rc; } +#if SQLITE_MAX_MMAP_SIZE>0 /* ** Obtain a reference to a memory mapped page object for page number pgno. ** The new object will use the pointer pData, obtained from xFetch(). @@ -50321,7 +50771,8 @@ static int pagerAcquireMapPage( *ppPage = p = pPager->pMmapFreelist; pPager->pMmapFreelist = p->pDirty; p->pDirty = 0; - memset(p->pExtra, 0, pPager->nExtra); + assert( pPager->nExtra>=8 ); + memset(p->pExtra, 0, 8); }else{ *ppPage = p = (PgHdr *)sqlite3MallocZero(sizeof(PgHdr) + pPager->nExtra); if( p==0 ){ @@ -50346,6 +50797,7 @@ static int pagerAcquireMapPage( return SQLITE_OK; } +#endif /* ** Release a reference to page pPg. pPg must have been returned by an @@ -50388,9 +50840,10 @@ static void pagerFreeMapHdrs(Pager *pPager){ ** a hot journal may be left in the filesystem but no error is returned ** to the caller. */ -SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager){ +SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3 *db){ u8 *pTmp = (u8 *)pPager->pTmpSpace; + assert( db || pagerUseWal(pPager)==0 ); assert( assert_pager_state(pPager) ); disable_simulated_io_errors(); sqlite3BeginBenignMalloc(); @@ -50398,7 +50851,10 @@ SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager){ /* pPager->errCode = 0; */ pPager->exclusiveMode = 0; #ifndef SQLITE_OMIT_WAL - sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags, pPager->pageSize, pTmp); + assert( db || pPager->pWal==0 ); + sqlite3WalClose(pPager->pWal, db, pPager->ckptSyncFlags, pPager->pageSize, + (db && (db->flags & SQLITE_NoCkptOnClose) ? 0 : pTmp) + ); pPager->pWal = 0; #endif pager_reset(pPager); @@ -50917,7 +51373,9 @@ SQLITE_PRIVATE int sqlite3PagerFlush(Pager *pPager){ ** ** The nExtra parameter specifies the number of bytes of space allocated ** along with each page reference. This space is available to the user -** via the sqlite3PagerGetExtra() API. +** via the sqlite3PagerGetExtra() API. When a new page is allocated, the +** first 8 bytes of this space are zeroed but the remainder is uninitialized. +** (The extra space is used by btree as the MemPage object.) ** ** The flags argument is used to specify properties that affect the ** operation of the pager. It should be passed some bitwise combination @@ -51147,8 +51605,8 @@ SQLITE_PRIVATE int sqlite3PagerOpen( /* Initialize the PCache object. */ if( rc==SQLITE_OK ){ - assert( nExtra<1000 ); nExtra = ROUND8(nExtra); + assert( nExtra>=8 && nExtra<1000 ); rc = sqlite3PcacheOpen(szPageDflt, nExtra, !memDb, !memDb?pagerStress:0, (void *)pPager, pPager->pPCache); } @@ -51213,6 +51671,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( /* pPager->xBusyHandler = 0; */ /* pPager->pBusyHandlerArg = 0; */ pPager->xReiniter = xReinit; + setGetterMethod(pPager); /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ /* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */ @@ -51626,10 +52085,17 @@ static void pagerUnlockIfUnused(Pager *pPager){ } /* -** Acquire a reference to page number pgno in pager pPager (a page -** reference has type DbPage*). If the requested reference is +** The page getter methods each try to acquire a reference to a +** page with page number pgno. If the requested reference is ** successfully obtained, it is copied to *ppPage and SQLITE_OK returned. ** +** There are different implementations of the getter method depending +** on the current state of the pager. +** +** getPageNormal() -- The normal getter +** getPageError() -- Used if the pager is in an error state +** getPageMmap() -- Used if memory-mapped I/O is enabled +** ** If the requested page is already in the cache, it is returned. ** Otherwise, a new page object is allocated and populated with data ** read from the database file. In some cases, the pcache module may @@ -51641,14 +52107,14 @@ static void pagerUnlockIfUnused(Pager *pPager){ ** already in the cache when this function is called, then the extra ** data is left as it was when the page object was last used. ** -** If the database image is smaller than the requested page or if a -** non-zero value is passed as the noContent parameter and the +** If the database image is smaller than the requested page or if +** the flags parameter contains the PAGER_GET_NOCONTENT bit and the ** requested page is not already stored in the cache, then no ** actual disk read occurs. In this case the memory image of the ** page is initialized to all zeros. ** -** If noContent is true, it means that we do not care about the contents -** of the page. This occurs in two scenarios: +** If PAGER_GET_NOCONTENT is true, it means that we do not care about +** the contents of the page. This occurs in two scenarios: ** ** a) When reading a free-list leaf page from the database, and ** @@ -51656,8 +52122,8 @@ static void pagerUnlockIfUnused(Pager *pPager){ ** a new page into the cache to be filled with the data read ** from the savepoint journal. ** -** If noContent is true, then the data returned is zeroed instead of -** being read from the database. Additionally, the bits corresponding +** If PAGER_GET_NOCONTENT is true, then the data returned is zeroed instead +** of being read from the database. Additionally, the bits corresponding ** to pgno in Pager.pInJournal (bitvec of pages already written to the ** journal file) and the PagerSavepoint.pInSavepoint bitvecs of any open ** savepoints are set. This means if the page is made writable at any @@ -51675,106 +52141,39 @@ static void pagerUnlockIfUnused(Pager *pPager){ ** Since Lookup() never goes to disk, it never has to deal with locks ** or journal files. */ -SQLITE_PRIVATE int sqlite3PagerGet( +static int getPageNormal( Pager *pPager, /* The pager open on the database file */ Pgno pgno, /* Page number to fetch */ DbPage **ppPage, /* Write a pointer to the page here */ int flags /* PAGER_GET_XXX flags */ ){ int rc = SQLITE_OK; - PgHdr *pPg = 0; - u32 iFrame = 0; /* Frame to read from WAL file */ - const int noContent = (flags & PAGER_GET_NOCONTENT); - - /* It is acceptable to use a read-only (mmap) page for any page except - ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY - ** flag was specified by the caller. And so long as the db is not a - ** temporary or in-memory database. */ - const int bMmapOk = (pgno>1 && USEFETCH(pPager) - && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY)) -#ifdef SQLITE_HAS_CODEC - && pPager->xCodec==0 -#endif - ); + PgHdr *pPg; + u8 noContent; /* True if PAGER_GET_NOCONTENT is set */ + sqlite3_pcache_page *pBase; - /* Optimization note: Adding the "pgno<=1" term before "pgno==0" here - ** allows the compiler optimizer to reuse the results of the "pgno>1" - ** test in the previous statement, and avoid testing pgno==0 in the - ** common case where pgno is large. */ - if( pgno<=1 && pgno==0 ){ - return SQLITE_CORRUPT_BKPT; - } + assert( pPager->errCode==SQLITE_OK ); assert( pPager->eState>=PAGER_READER ); assert( assert_pager_state(pPager) ); - assert( noContent==0 || bMmapOk==0 ); - assert( pPager->hasHeldSharedLock==1 ); - /* If the pager is in the error state, return an error immediately. - ** Otherwise, request the page from the PCache layer. */ - if( pPager->errCode!=SQLITE_OK ){ - rc = pPager->errCode; - }else{ - if( bMmapOk && pagerUseWal(pPager) ){ - rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); - if( rc!=SQLITE_OK ) goto pager_acquire_err; - } - - if( bMmapOk && iFrame==0 ){ - void *pData = 0; - - rc = sqlite3OsFetch(pPager->fd, - (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData - ); - - if( rc==SQLITE_OK && pData ){ - if( pPager->eState>PAGER_READER || pPager->tempFile ){ - pPg = sqlite3PagerLookup(pPager, pgno); - } - if( pPg==0 ){ - rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg); - }else{ - sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData); - } - if( pPg ){ - assert( rc==SQLITE_OK ); - *ppPage = pPg; - return SQLITE_OK; - } - } - if( rc!=SQLITE_OK ){ - goto pager_acquire_err; - } - } - - { - sqlite3_pcache_page *pBase; - pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3); - if( pBase==0 ){ - rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase); - if( rc!=SQLITE_OK ) goto pager_acquire_err; - if( pBase==0 ){ - pPg = *ppPage = 0; - rc = SQLITE_NOMEM_BKPT; - goto pager_acquire_err; - } - } - pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase); - assert( pPg!=0 ); - } - } - - if( rc!=SQLITE_OK ){ - /* Either the call to sqlite3PcacheFetch() returned an error or the - ** pager was already in the error-state when this function was called. - ** Set pPg to 0 and jump to the exception handler. */ + if( pgno==0 ) return SQLITE_CORRUPT_BKPT; + pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3); + if( pBase==0 ){ pPg = 0; - goto pager_acquire_err; + rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase); + if( rc!=SQLITE_OK ) goto pager_acquire_err; + if( pBase==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto pager_acquire_err; + } } + pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase); assert( pPg==(*ppPage) ); assert( pPg->pgno==pgno ); assert( pPg->pPager==pPager || pPg->pPager==0 ); + noContent = (flags & PAGER_GET_NOCONTENT)!=0; if( pPg->pPager && !noContent ){ /* In this case the pcache already contains an initialized copy of ** the page. Return without further ado. */ @@ -51784,17 +52183,18 @@ SQLITE_PRIVATE int sqlite3PagerGet( }else{ /* The pager cache has created a new page. Its content needs to - ** be initialized. */ - - pPg->pPager = pPager; - - /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page - ** number greater than this, or the unused locking-page, is requested. */ + ** be initialized. But first some error checks: + ** + ** (1) The maximum page number is 2^31 + ** (2) Never try to fetch the locking page + */ if( pgno>PAGER_MAX_PGNO || pgno==PAGER_MJ_PGNO(pPager) ){ rc = SQLITE_CORRUPT_BKPT; goto pager_acquire_err; } + pPg->pPager = pPager; + assert( !isOpen(pPager->fd) || !MEMDB ); if( !isOpen(pPager->fd) || pPager->dbSizepPager->mxPgno ){ @@ -51820,7 +52220,8 @@ SQLITE_PRIVATE int sqlite3PagerGet( memset(pPg->pData, 0, pPager->pageSize); IOTRACE(("ZERO %p %d\n", pPager, pgno)); }else{ - if( pagerUseWal(pPager) && bMmapOk==0 ){ + u32 iFrame = 0; /* Frame to read from WAL file */ + if( pagerUseWal(pPager) ){ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); if( rc!=SQLITE_OK ) goto pager_acquire_err; } @@ -51833,7 +52234,6 @@ SQLITE_PRIVATE int sqlite3PagerGet( } pager_set_pagehash(pPg); } - return SQLITE_OK; pager_acquire_err: @@ -51842,11 +52242,109 @@ SQLITE_PRIVATE int sqlite3PagerGet( sqlite3PcacheDrop(pPg); } pagerUnlockIfUnused(pPager); - *ppPage = 0; return rc; } +#if SQLITE_MAX_MMAP_SIZE>0 +/* The page getter for when memory-mapped I/O is enabled */ +static int getPageMMap( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int flags /* PAGER_GET_XXX flags */ +){ + int rc = SQLITE_OK; + PgHdr *pPg = 0; + u32 iFrame = 0; /* Frame to read from WAL file */ + + /* It is acceptable to use a read-only (mmap) page for any page except + ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY + ** flag was specified by the caller. And so long as the db is not a + ** temporary or in-memory database. */ + const int bMmapOk = (pgno>1 + && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY)) + ); + + assert( USEFETCH(pPager) ); +#ifdef SQLITE_HAS_CODEC + assert( pPager->xCodec==0 ); +#endif + + /* Optimization note: Adding the "pgno<=1" term before "pgno==0" here + ** allows the compiler optimizer to reuse the results of the "pgno>1" + ** test in the previous statement, and avoid testing pgno==0 in the + ** common case where pgno is large. */ + if( pgno<=1 && pgno==0 ){ + return SQLITE_CORRUPT_BKPT; + } + assert( pPager->eState>=PAGER_READER ); + assert( assert_pager_state(pPager) ); + assert( pPager->hasHeldSharedLock==1 ); + assert( pPager->errCode==SQLITE_OK ); + + if( bMmapOk && pagerUseWal(pPager) ){ + rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); + if( rc!=SQLITE_OK ){ + *ppPage = 0; + return rc; + } + } + if( bMmapOk && iFrame==0 ){ + void *pData = 0; + rc = sqlite3OsFetch(pPager->fd, + (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData + ); + if( rc==SQLITE_OK && pData ){ + if( pPager->eState>PAGER_READER || pPager->tempFile ){ + pPg = sqlite3PagerLookup(pPager, pgno); + } + if( pPg==0 ){ + rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg); + }else{ + sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData); + } + if( pPg ){ + assert( rc==SQLITE_OK ); + *ppPage = pPg; + return SQLITE_OK; + } + } + if( rc!=SQLITE_OK ){ + *ppPage = 0; + return rc; + } + } + return getPageNormal(pPager, pgno, ppPage, flags); +} +#endif /* SQLITE_MAX_MMAP_SIZE>0 */ + +/* The page getter method for when the pager is an error state */ +static int getPageError( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int flags /* PAGER_GET_XXX flags */ +){ + UNUSED_PARAMETER(pgno); + UNUSED_PARAMETER(flags); + assert( pPager->errCode!=SQLITE_OK ); + *ppPage = 0; + return pPager->errCode; +} + + +/* Dispatch all page fetch requests to the appropriate getter method. +*/ +SQLITE_PRIVATE int sqlite3PagerGet( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int flags /* PAGER_GET_XXX flags */ +){ + return pPager->xGet(pPager, pgno, ppPage, flags); +} + /* ** Acquire a page if it is already in the in-memory cache. Do ** not read the page from disk. Return a pointer to the page, @@ -52320,11 +52818,11 @@ SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){ assert( (pPg->flags & PGHDR_MMAP)==0 ); assert( pPager->eState>=PAGER_WRITER_LOCKED ); assert( assert_pager_state(pPager) ); - if( pPager->errCode ){ - return pPager->errCode; - }else if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){ + if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){ if( pPager->nSavepoint ) return subjournalPageIfRequired(pPg); return SQLITE_OK; + }else if( pPager->errCode ){ + return pPager->errCode; }else if( pPager->sectorSize > (u32)pPager->pageSize ){ assert( pPager->tempFile==0 ); return pagerWriteLargeSector(pPg); @@ -52819,6 +53317,7 @@ SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){ */ pPager->errCode = SQLITE_ABORT; pPager->eState = PAGER_ERROR; + setGetterMethod(pPager); return rc; } }else{ @@ -53080,6 +53579,7 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ ){ pPager->errCode = SQLITE_ABORT; pPager->eState = PAGER_ERROR; + setGetterMethod(pPager); } #endif } @@ -53152,6 +53652,7 @@ SQLITE_PRIVATE void sqlite3PagerSetCodec( pPager->xCodecSizeChng = xCodecSizeChng; pPager->xCodecFree = xCodecFree; pPager->pCodec = pCodec; + setGetterMethod(pPager); pagerReportSize(pPager); } SQLITE_PRIVATE void *sqlite3PagerGetCodec(Pager *pPager){ @@ -53561,10 +54062,16 @@ SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *pPager){ ** ** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. */ -SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int eMode, int *pnLog, int *pnCkpt){ +SQLITE_PRIVATE int sqlite3PagerCheckpoint( + Pager *pPager, /* Checkpoint on this pager */ + sqlite3 *db, /* Db handle used to check for interrupts */ + int eMode, /* Type of checkpoint */ + int *pnLog, /* OUT: Final number of frames in log */ + int *pnCkpt /* OUT: Final number of checkpointed frames */ +){ int rc = SQLITE_OK; if( pPager->pWal ){ - rc = sqlite3WalCheckpoint(pPager->pWal, eMode, + rc = sqlite3WalCheckpoint(pPager->pWal, db, eMode, (eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler), pPager->pBusyHandlerArg, pPager->ckptSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace, @@ -53696,7 +54203,7 @@ SQLITE_PRIVATE int sqlite3PagerOpenWal( ** error (SQLITE_BUSY) is returned and the log connection is not closed. ** If successful, the EXCLUSIVE lock is not released before returning. */ -SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){ +SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3 *db){ int rc = SQLITE_OK; assert( pPager->journalMode==PAGER_JOURNALMODE_WAL ); @@ -53724,7 +54231,7 @@ SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){ if( rc==SQLITE_OK && pPager->pWal ){ rc = pagerExclusiveLock(pPager); if( rc==SQLITE_OK ){ - rc = sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags, + rc = sqlite3WalClose(pPager->pWal, db, pPager->ckptSyncFlags, pPager->pageSize, (u8*)pPager->pTmpSpace); pPager->pWal = 0; pagerFixMaplimit(pPager); @@ -53761,6 +54268,20 @@ SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSn } return rc; } + +/* +** If this is a WAL database, call sqlite3WalSnapshotRecover(). If this +** is not a WAL database, return an error. +*/ +SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager){ + int rc; + if( pPager->pWal ){ + rc = sqlite3WalSnapshotRecover(pPager->pWal); + }else{ + rc = SQLITE_ERROR; + } + return rc; +} #endif /* SQLITE_ENABLE_SNAPSHOT */ #endif /* !SQLITE_OMIT_WAL */ @@ -55507,6 +56028,7 @@ static void walRestartHdr(Wal *pWal, u32 salt1){ */ static int walCheckpoint( Wal *pWal, /* Wal connection */ + sqlite3 *db, /* Check for interrupts on this handle */ int eMode, /* One of PASSIVE, FULL or RESTART */ int (*xBusy)(void*), /* Function to call when busy */ void *pBusyArg, /* Context argument for xBusyHandler */ @@ -55601,6 +56123,10 @@ static int walCheckpoint( while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ i64 iOffset; assert( walFramePgno(pWal, iFrame)==iDbpage ); + if( db->u1.isInterrupted ){ + rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; + break; + } if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){ continue; } @@ -55705,6 +56231,7 @@ static void walLimitSize(Wal *pWal, i64 nMax){ */ SQLITE_PRIVATE int sqlite3WalClose( Wal *pWal, /* Wal to close */ + sqlite3 *db, /* For interrupt flag */ int sync_flags, /* Flags to pass to OsSync() (or 0) */ int nBuf, u8 *zBuf /* Buffer of at least nBuf bytes */ @@ -55721,13 +56248,14 @@ SQLITE_PRIVATE int sqlite3WalClose( ** ** The EXCLUSIVE lock is not released before returning. */ - rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE); - if( rc==SQLITE_OK ){ + if( zBuf!=0 + && SQLITE_OK==(rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE)) + ){ if( pWal->exclusiveMode==WAL_NORMAL_MODE ){ pWal->exclusiveMode = WAL_EXCLUSIVE_MODE; } - rc = sqlite3WalCheckpoint( - pWal, SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0 + rc = sqlite3WalCheckpoint(pWal, db, + SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0 ); if( rc==SQLITE_OK ){ int bPersist = -1; @@ -56156,6 +56684,84 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ return rc; } +#ifdef SQLITE_ENABLE_SNAPSHOT +/* +** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted +** variable so that older snapshots can be accessed. To do this, loop +** through all wal frames from nBackfillAttempted to (nBackfill+1), +** comparing their content to the corresponding page with the database +** file, if any. Set nBackfillAttempted to the frame number of the +** first frame for which the wal file content matches the db file. +** +** This is only really safe if the file-system is such that any page +** writes made by earlier checkpointers were atomic operations, which +** is not always true. It is also possible that nBackfillAttempted +** may be left set to a value larger than expected, if a wal frame +** contains content that duplicate of an earlier version of the same +** page. +** +** SQLITE_OK is returned if successful, or an SQLite error code if an +** error occurs. It is not an error if nBackfillAttempted cannot be +** decreased at all. +*/ +SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){ + int rc; + + assert( pWal->readLock>=0 ); + rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); + if( rc==SQLITE_OK ){ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + int szPage = (int)pWal->szPage; + i64 szDb; /* Size of db file in bytes */ + + rc = sqlite3OsFileSize(pWal->pDbFd, &szDb); + if( rc==SQLITE_OK ){ + void *pBuf1 = sqlite3_malloc(szPage); + void *pBuf2 = sqlite3_malloc(szPage); + if( pBuf1==0 || pBuf2==0 ){ + rc = SQLITE_NOMEM; + }else{ + u32 i = pInfo->nBackfillAttempted; + for(i=pInfo->nBackfillAttempted; i>pInfo->nBackfill; i--){ + volatile ht_slot *dummy; + volatile u32 *aPgno; /* Array of page numbers */ + u32 iZero; /* Frame corresponding to aPgno[0] */ + u32 pgno; /* Page number in db file */ + i64 iDbOff; /* Offset of db file entry */ + i64 iWalOff; /* Offset of wal file entry */ + + rc = walHashGet(pWal, walFramePage(i), &dummy, &aPgno, &iZero); + if( rc!=SQLITE_OK ) break; + pgno = aPgno[i-iZero]; + iDbOff = (i64)(pgno-1) * szPage; + + if( iDbOff+szPage<=szDb ){ + iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE; + rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff); + + if( rc==SQLITE_OK ){ + rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff); + } + + if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){ + break; + } + } + + pInfo->nBackfillAttempted = i-1; + } + } + + sqlite3_free(pBuf1); + sqlite3_free(pBuf2); + } + walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); + } + + return rc; +} +#endif /* SQLITE_ENABLE_SNAPSHOT */ + /* ** Begin a read transaction on the database. ** @@ -56218,7 +56824,11 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ ** has not yet set the pInfo->nBackfillAttempted variable to indicate ** its intent. To avoid the race condition this leads to, ensure that ** there is no checkpointer process by taking a shared CKPT lock - ** before checking pInfo->nBackfillAttempted. */ + ** before checking pInfo->nBackfillAttempted. + ** + ** TODO: Does the aReadMark[] lock prevent a checkpointer from doing + ** this already? + */ rc = walLockShared(pWal, WAL_CKPT_LOCK); if( rc==SQLITE_OK ){ @@ -56975,6 +57585,7 @@ SQLITE_PRIVATE int sqlite3WalFrames( */ SQLITE_PRIVATE int sqlite3WalCheckpoint( Wal *pWal, /* Wal connection */ + sqlite3 *db, /* Check this handle's interrupt flag */ int eMode, /* PASSIVE, FULL, RESTART, or TRUNCATE */ int (*xBusy)(void*), /* Function to call when busy */ void *pBusyArg, /* Context argument for xBusyHandler */ @@ -57049,7 +57660,7 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ rc = SQLITE_CORRUPT_BKPT; }else{ - rc = walCheckpoint(pWal, eMode2, xBusy2, pBusyArg, sync_flags, zBuf); + rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf); } /* If no error occurred, set the output variables. */ @@ -57169,9 +57780,14 @@ SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal){ SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot){ int rc = SQLITE_OK; WalIndexHdr *pRet; + static const u32 aZero[4] = { 0, 0, 0, 0 }; assert( pWal->readLock>=0 && pWal->writeLock==0 ); + if( memcmp(&pWal->hdr.aFrameCksum[0],aZero,16)==0 ){ + *ppSnapshot = 0; + return SQLITE_ERROR; + } pRet = (WalIndexHdr*)sqlite3_malloc(sizeof(WalIndexHdr)); if( pRet==0 ){ rc = SQLITE_NOMEM_BKPT; @@ -57509,37 +58125,39 @@ typedef struct CellInfo CellInfo; #define PTF_LEAF 0x08 /* -** As each page of the file is loaded into memory, an instance of the following -** structure is appended and initialized to zero. This structure stores -** information about the page that is decoded from the raw file page. +** An instance of this object stores information about each a single database +** page that has been loaded into memory. The information in this object +** is derived from the raw on-disk page content. ** -** The pParent field points back to the parent page. This allows us to -** walk up the BTree from any leaf to the root. Care must be taken to -** unref() the parent page pointer when this page is no longer referenced. -** The pageDestructor() routine handles that chore. +** As each database page is loaded into memory, the pager allocats an +** instance of this object and zeros the first 8 bytes. (This is the +** "extra" information associated with each page of the pager.) ** ** Access to all fields of this structure is controlled by the mutex ** stored in MemPage.pBt->mutex. */ struct MemPage { u8 isInit; /* True if previously initialized. MUST BE FIRST! */ - u8 nOverflow; /* Number of overflow cell bodies in aCell[] */ + u8 bBusy; /* Prevent endless loops on corrupt database files */ u8 intKey; /* True if table b-trees. False for index b-trees */ u8 intKeyLeaf; /* True if the leaf of an intKey table */ + Pgno pgno; /* Page number for this page */ + /* Only the first 8 bytes (above) are zeroed by pager.c when a new page + ** is allocated. All fields that follow must be initialized before use */ u8 leaf; /* True if a leaf page */ u8 hdrOffset; /* 100 for page 1. 0 otherwise */ u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */ u8 max1bytePayload; /* min(maxLocal,127) */ - u8 bBusy; /* Prevent endless loops on corrupt database files */ + u8 nOverflow; /* Number of overflow cell bodies in aCell[] */ u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ u16 cellOffset; /* Index in aData of first cell pointer */ u16 nFree; /* Number of free bytes on the page */ u16 nCell; /* Number of cells on this page, local and ovfl */ u16 maskPage; /* Mask for page offset */ - u16 aiOvfl[5]; /* Insert the i-th overflow cell before the aiOvfl-th + u16 aiOvfl[4]; /* Insert the i-th overflow cell before the aiOvfl-th ** non-overflow cell */ - u8 *apOvfl[5]; /* Pointers to the body of overflow cells */ + u8 *apOvfl[4]; /* Pointers to the body of overflow cells */ BtShared *pBt; /* Pointer to BtShared that this page is part of */ u8 *aData; /* Pointer to disk image of the page data */ u8 *aDataEnd; /* One byte past the end of usable data */ @@ -57548,16 +58166,8 @@ struct MemPage { DbPage *pDbPage; /* Pager page handle */ u16 (*xCellSize)(MemPage*,u8*); /* cellSizePtr method */ void (*xParseCell)(MemPage*,u8*,CellInfo*); /* btreeParseCell method */ - Pgno pgno; /* Page number for this page */ }; -/* -** The in-memory image of a disk page has the auxiliary information appended -** to the end. EXTRA_SIZE is the number of bytes of space needed to hold -** that extra information. -*/ -#define EXTRA_SIZE sizeof(MemPage) - /* ** A linked list of the following structures is stored at BtShared.pLock. ** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor @@ -57948,11 +58558,9 @@ struct IntegrityCk { */ #if SQLITE_BYTEORDER==4321 # define get2byteAligned(x) (*(u16*)(x)) -#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \ - && GCC_VERSION>=4008000 +#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4008000 # define get2byteAligned(x) __builtin_bswap16(*(u16*)(x)) -#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \ - && defined(_MSC_VER) && _MSC_VER>=1300 +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 # define get2byteAligned(x) _byteswap_ushort(*(u16*)(x)) #else # define get2byteAligned(x) ((x)[0]<<8 | (x)[1]) @@ -58127,16 +58735,24 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ ** two or more btrees in common both try to lock all their btrees ** at the same instant. */ -SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ +static void SQLITE_NOINLINE btreeEnterAll(sqlite3 *db){ int i; + int skipOk = 1; Btree *p; assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inDb; i++){ p = db->aDb[i].pBt; - if( p ) sqlite3BtreeEnter(p); + if( p && p->sharable ){ + sqlite3BtreeEnter(p); + skipOk = 0; + } } + db->skipBtreeMutex = skipOk; } -SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){ +SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ + if( db->skipBtreeMutex==0 ) btreeEnterAll(db); +} +static void SQLITE_NOINLINE btreeLeaveAll(sqlite3 *db){ int i; Btree *p; assert( sqlite3_mutex_held(db->mutex) ); @@ -58145,6 +58761,9 @@ SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){ if( p ) sqlite3BtreeLeave(p); } } +SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){ + if( db->skipBtreeMutex==0 ) btreeLeaveAll(db); +} #ifndef NDEBUG /* @@ -58876,7 +59495,7 @@ static int saveCursorKey(BtCursor *pCur){ pCur->nKey = sqlite3BtreePayloadSize(pCur); pKey = sqlite3Malloc( pCur->nKey ); if( pKey ){ - rc = sqlite3BtreeKey(pCur, 0, (int)pCur->nKey, pKey); + rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ pCur->pKey = pKey; }else{ @@ -59007,26 +59626,23 @@ static int btreeMoveto( ){ int rc; /* Status code */ UnpackedRecord *pIdxKey; /* Unpacked index key */ - char aSpace[384]; /* Temp space for pIdxKey - to avoid a malloc */ - char *pFree = 0; if( pKey ){ assert( nKey==(i64)(int)nKey ); - pIdxKey = sqlite3VdbeAllocUnpackedRecord( - pCur->pKeyInfo, aSpace, sizeof(aSpace), &pFree - ); + pIdxKey = sqlite3VdbeAllocUnpackedRecord(pCur->pKeyInfo); if( pIdxKey==0 ) return SQLITE_NOMEM_BKPT; sqlite3VdbeRecordUnpack(pCur->pKeyInfo, (int)nKey, pKey, pIdxKey); if( pIdxKey->nField==0 ){ - sqlite3DbFree(pCur->pKeyInfo->db, pFree); - return SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_BKPT; + goto moveto_done; } }else{ pIdxKey = 0; } rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes); - if( pFree ){ - sqlite3DbFree(pCur->pKeyInfo->db, pFree); +moveto_done: + if( pIdxKey ){ + sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey); } return rc; } @@ -59987,7 +60603,7 @@ static int btreeInitPage(MemPage *pPage){ assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); if( !pPage->isInit ){ - u16 pc; /* Address of a freeblock within pPage->aData[] */ + int pc; /* Address of a freeblock within pPage->aData[] */ u8 hdr; /* Offset to beginning of page header */ u8 *data; /* Equal to pPage->aData */ BtShared *pBt; /* The main btree structure */ @@ -60067,25 +60683,30 @@ static int btreeInitPage(MemPage *pPage){ ** freeblocks. */ pc = get2byte(&data[hdr+1]); nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */ - while( pc>0 ){ - u16 next, size; - if( pciCellLast ){ + if( pc>0 ){ + u32 next, size; + if( pc0 && next<=pc+size+3) || pc+size>usableSize ){ - /* Free blocks must be in ascending order. And the last byte of - ** the free-block must lie on the database page. */ - return SQLITE_CORRUPT_BKPT; + while( 1 ){ + if( pc>iCellLast ){ + return SQLITE_CORRUPT_BKPT; /* Freeblock off the end of the page */ + } + next = get2byte(&data[pc]); + size = get2byte(&data[pc+2]); + nFree = nFree + size; + if( next<=pc+size+3 ) break; + pc = next; + } + if( next>0 ){ + return SQLITE_CORRUPT_BKPT; /* Freeblock not in ascending order */ + } + if( pc+size>(unsigned int)usableSize ){ + return SQLITE_CORRUPT_BKPT; /* Last freeblock extends past page end */ } - nFree = nFree + size; - pc = next; } /* At this point, nFree contains the sum of the offset to the start @@ -60526,7 +61147,7 @@ SQLITE_PRIVATE int sqlite3BtreeOpen( goto btree_open_out; } rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename, - EXTRA_SIZE, flags, vfsFlags, pageReinit); + sizeof(MemPage), flags, vfsFlags, pageReinit); if( rc==SQLITE_OK ){ sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap); rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); @@ -60639,12 +61260,14 @@ SQLITE_PRIVATE int sqlite3BtreeOpen( btree_open_out: if( rc!=SQLITE_OK ){ if( pBt && pBt->pPager ){ - sqlite3PagerClose(pBt->pPager); + sqlite3PagerClose(pBt->pPager, 0); } sqlite3_free(pBt); sqlite3_free(p); *ppBtree = 0; }else{ + sqlite3_file *pFile; + /* If the B-Tree was successfully opened, set the pager-cache size to the ** default value. Except, when opening on an existing shared pager-cache, ** do not change the pager-cache size. @@ -60652,6 +61275,11 @@ SQLITE_PRIVATE int sqlite3BtreeOpen( if( sqlite3BtreeSchema(p, 0, 0)==0 ){ sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE); } + + pFile = sqlite3PagerFile(pBt->pPager); + if( pFile->pMethods ){ + sqlite3OsFileControlHint(pFile, SQLITE_FCNTL_PDB, (void*)&pBt->db); + } } if( mutexOpen ){ assert( sqlite3_mutex_held(mutexOpen) ); @@ -60781,7 +61409,7 @@ SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){ ** Clean out and delete the BtShared object. */ assert( !pBt->pCursor ); - sqlite3PagerClose(pBt->pPager); + sqlite3PagerClose(pBt->pPager, p->db); if( pBt->xFreeSchema && pBt->pSchema ){ pBt->xFreeSchema(pBt->pSchema); } @@ -61528,14 +62156,11 @@ static int setChildPtrmaps(MemPage *pPage){ int nCell; /* Number of cells in page pPage */ int rc; /* Return code */ BtShared *pBt = pPage->pBt; - u8 isInitOrig = pPage->isInit; Pgno pgno = pPage->pgno; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); rc = btreeInitPage(pPage); - if( rc!=SQLITE_OK ){ - goto set_child_ptrmaps_out; - } + if( rc!=SQLITE_OK ) return rc; nCell = pPage->nCell; for(i=0; iisInit = isInitOrig; return rc; } @@ -61583,7 +62206,6 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ } put4byte(pPage->aData, iTo); }else{ - u8 isInitOrig = pPage->isInit; int i; int nCell; int rc; @@ -61597,12 +62219,14 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ if( eType==PTRMAP_OVERFLOW1 ){ CellInfo info; pPage->xParseCell(pPage, pCell, &info); - if( info.nLocalaData+pPage->maskPage - && iFrom==get4byte(pCell+info.nSize-4) - ){ - put4byte(pCell+info.nSize-4, iTo); - break; + if( info.nLocal pPage->aData+pPage->pBt->usableSize ){ + return SQLITE_CORRUPT_BKPT; + } + if( iFrom==get4byte(pCell+info.nSize-4) ){ + put4byte(pCell+info.nSize-4, iTo); + break; + } } }else{ if( get4byte(pCell)==iFrom ){ @@ -61619,8 +62243,6 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ } put4byte(&pPage->aData[pPage->hdrOffset+8], iTo); } - - pPage->isInit = isInitOrig; } return SQLITE_OK; } @@ -62279,7 +62901,12 @@ SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){ assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); sqlite3BtreeEnter(p); - rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); + if( op==SAVEPOINT_ROLLBACK ){ + rc = saveAllCursors(pBt, 0, 0); + } + if( rc==SQLITE_OK ){ + rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); + } if( rc==SQLITE_OK ){ if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){ pBt->nPage = 0; @@ -62515,6 +63142,10 @@ SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){ return pCur && pCur->eState==CURSOR_VALID; } #endif /* NDEBUG */ +SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor *pCur){ + assert( pCur!=0 ); + return pCur->eState==CURSOR_VALID; +} /* ** Return the value of the integer key or "rowid" for a table btree. @@ -62661,7 +63292,6 @@ static int copyPayload( ** ** 0: The operation is a read. Populate the overflow cache. ** 1: The operation is a write. Populate the overflow cache. -** 2: The operation is a read. Do not populate the overflow cache. ** ** A total of "amt" bytes are read or written beginning at "offset". ** Data is read to or from the buffer pBuf. @@ -62669,13 +63299,13 @@ static int copyPayload( ** The content being read or written might appear on the main page ** or be scattered out on multiple overflow pages. ** -** If the current cursor entry uses one or more overflow pages and the -** eOp argument is not 2, this function may allocate space for and lazily -** populates the overflow page-list cache array (BtCursor.aOverflow). +** If the current cursor entry uses one or more overflow pages +** this function may allocate space for and lazily populate +** the overflow page-list cache array (BtCursor.aOverflow). ** Subsequent calls use this cache to make seeking to the supplied offset ** more efficient. ** -** Once an overflow page-list cache has been allocated, it may be +** Once an overflow page-list cache has been allocated, it must be ** invalidated if some other cursor writes to the same table, or if ** the cursor is moved to a different row. Additionally, in auto-vacuum ** mode, the following events may invalidate an overflow page-list cache. @@ -62697,21 +63327,17 @@ static int accessPayload( MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */ BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */ #ifdef SQLITE_DIRECT_OVERFLOW_READ - unsigned char * const pBufStart = pBuf; - int bEnd; /* True if reading to end of data */ + unsigned char * const pBufStart = pBuf; /* Start of original out buffer */ #endif assert( pPage ); + assert( eOp==0 || eOp==1 ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->aiIdx[pCur->iPage]nCell ); assert( cursorHoldsMutex(pCur) ); - assert( eOp!=2 || offset==0 ); /* Always start from beginning for eOp==2 */ getCellInfo(pCur); aPayload = pCur->info.pPayload; -#ifdef SQLITE_DIRECT_OVERFLOW_READ - bEnd = offset+amt==pCur->info.nPayload; -#endif assert( offset+amt <= pCur->info.nPayload ); assert( aPayload > pPage->aData ); @@ -62730,7 +63356,7 @@ static int accessPayload( if( a+offset>pCur->info.nLocal ){ a = pCur->info.nLocal - offset; } - rc = copyPayload(&aPayload[offset], pBuf, a, (eOp & 0x01), pPage->pDbPage); + rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage); offset = 0; pBuf += a; amt -= a; @@ -62746,53 +63372,46 @@ static int accessPayload( nextPage = get4byte(&aPayload[pCur->info.nLocal]); /* If the BtCursor.aOverflow[] has not been allocated, allocate it now. - ** Except, do not allocate aOverflow[] for eOp==2. ** ** The aOverflow[] array is sized at one entry for each overflow page ** in the overflow chain. The page number of the first overflow page is ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array ** means "not yet known" (the cache is lazily populated). */ - if( eOp!=2 && (pCur->curFlags & BTCF_ValidOvfl)==0 ){ + if( (pCur->curFlags & BTCF_ValidOvfl)==0 ){ int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; if( nOvfl>pCur->nOvflAlloc ){ Pgno *aNew = (Pgno*)sqlite3Realloc( pCur->aOverflow, nOvfl*2*sizeof(Pgno) ); if( aNew==0 ){ - rc = SQLITE_NOMEM_BKPT; + return SQLITE_NOMEM_BKPT; }else{ pCur->nOvflAlloc = nOvfl*2; pCur->aOverflow = aNew; } } - if( rc==SQLITE_OK ){ - memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); - pCur->curFlags |= BTCF_ValidOvfl; + memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); + pCur->curFlags |= BTCF_ValidOvfl; + }else{ + /* If the overflow page-list cache has been allocated and the + ** entry for the first required overflow page is valid, skip + ** directly to it. + */ + if( pCur->aOverflow[offset/ovflSize] ){ + iIdx = (offset/ovflSize); + nextPage = pCur->aOverflow[iIdx]; + offset = (offset%ovflSize); } } - /* If the overflow page-list cache has been allocated and the - ** entry for the first required overflow page is valid, skip - ** directly to it. - */ - if( (pCur->curFlags & BTCF_ValidOvfl)!=0 - && pCur->aOverflow[offset/ovflSize] - ){ - iIdx = (offset/ovflSize); - nextPage = pCur->aOverflow[iIdx]; - offset = (offset%ovflSize); - } - - for( ; rc==SQLITE_OK && amt>0 && nextPage; iIdx++){ - + assert( rc==SQLITE_OK && amt>0 ); + while( nextPage ){ /* If required, populate the overflow page-list cache. */ - if( (pCur->curFlags & BTCF_ValidOvfl)!=0 ){ - assert( pCur->aOverflow[iIdx]==0 - || pCur->aOverflow[iIdx]==nextPage - || CORRUPT_DB ); - pCur->aOverflow[iIdx] = nextPage; - } + assert( pCur->aOverflow[iIdx]==0 + || pCur->aOverflow[iIdx]==nextPage + || CORRUPT_DB ); + pCur->aOverflow[iIdx] = nextPage; if( offset>=ovflSize ){ /* The only reason to read this page is to obtain the page @@ -62800,11 +63419,7 @@ static int accessPayload( ** data is not required. So first try to lookup the overflow ** page-list cache, if any, then fall back to the getOverflowPage() ** function. - ** - ** Note that the aOverflow[] array must be allocated because eOp!=2 - ** here. If eOp==2, then offset==0 and this branch is never taken. */ - assert( eOp!=2 ); assert( pCur->curFlags & BTCF_ValidOvfl ); assert( pCur->pBtree->db==pBt->db ); if( pCur->aOverflow[iIdx+1] ){ @@ -62818,7 +63433,7 @@ static int accessPayload( ** range of data that is being read (eOp==0) or written (eOp!=0). */ #ifdef SQLITE_DIRECT_OVERFLOW_READ - sqlite3_file *fd; + sqlite3_file *fd; /* File from which to do direct overflow read */ #endif int a = amt; if( a + offset > ovflSize ){ @@ -62830,27 +63445,25 @@ static int accessPayload( ** ** 1) this is a read operation, and ** 2) data is required from the start of this overflow page, and - ** 3) the database is file-backed, and - ** 4) there is no open write-transaction, and - ** 5) the database is not a WAL database, - ** 6) all data from the page is being read. - ** 7) at least 4 bytes have already been read into the output buffer + ** 3) there is no open write-transaction, and + ** 4) the database is file-backed, and + ** 5) the page is not in the WAL file + ** 6) at least 4 bytes have already been read into the output buffer ** ** then data can be read directly from the database file into the ** output buffer, bypassing the page-cache altogether. This speeds ** up loading large records that span many overflow pages. */ - if( (eOp&0x01)==0 /* (1) */ + if( eOp==0 /* (1) */ && offset==0 /* (2) */ - && (bEnd || a==ovflSize) /* (6) */ - && pBt->inTransaction==TRANS_READ /* (4) */ - && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (3) */ - && 0==sqlite3PagerUseWal(pBt->pPager) /* (5) */ - && &pBuf[-4]>=pBufStart /* (7) */ + && pBt->inTransaction==TRANS_READ /* (3) */ + && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (4) */ + && 0==sqlite3PagerUseWal(pBt->pPager, nextPage) /* (5) */ + && &pBuf[-4]>=pBufStart /* (6) */ ){ u8 aSave[4]; u8 *aWrite = &pBuf[-4]; - assert( aWrite>=pBufStart ); /* hence (7) */ + assert( aWrite>=pBufStart ); /* due to (6) */ memcpy(aSave, aWrite, 4); rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); nextPage = get4byte(aWrite); @@ -62861,41 +63474,49 @@ static int accessPayload( { DbPage *pDbPage; rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage, - ((eOp&0x01)==0 ? PAGER_GET_READONLY : 0) + (eOp==0 ? PAGER_GET_READONLY : 0) ); if( rc==SQLITE_OK ){ aPayload = sqlite3PagerGetData(pDbPage); nextPage = get4byte(aPayload); - rc = copyPayload(&aPayload[offset+4], pBuf, a, (eOp&0x01), pDbPage); + rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage); sqlite3PagerUnref(pDbPage); offset = 0; } } amt -= a; + if( amt==0 ) return rc; pBuf += a; } + if( rc ) break; + iIdx++; } } if( rc==SQLITE_OK && amt>0 ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_BKPT; /* Overflow chain ends prematurely */ } return rc; } /* -** Read part of the key associated with cursor pCur. Exactly -** "amt" bytes will be transferred into pBuf[]. The transfer +** Read part of the payload for the row at which that cursor pCur is currently +** pointing. "amt" bytes will be transferred into pBuf[]. The transfer ** begins at "offset". ** -** The caller must ensure that pCur is pointing to a valid row -** in the table. +** pCur can be pointing to either a table or an index b-tree. +** If pointing to a table btree, then the content section is read. If +** pCur is pointing to an index b-tree then the key section is read. +** +** For sqlite3BtreePayload(), the caller must ensure that pCur is pointing +** to a valid row in the table. For sqlite3BtreePayloadChecked(), the +** cursor might be invalid or might need to be restored before being read. ** ** Return SQLITE_OK on success or an error code if anything goes ** wrong. An error is returned if "offset+amt" is larger than ** the available payload. */ -SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ +SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); @@ -62904,33 +63525,34 @@ SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pB } /* -** Read part of the data associated with cursor pCur. Exactly -** "amt" bytes will be transfered into pBuf[]. The transfer -** begins at "offset". -** -** Return SQLITE_OK on success or an error code if anything goes -** wrong. An error is returned if "offset+amt" is larger than -** the available payload. +** This variant of sqlite3BtreePayload() works even if the cursor has not +** in the CURSOR_VALID state. It is only used by the sqlite3_blob_read() +** interface. */ -SQLITE_PRIVATE int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ - int rc; - #ifndef SQLITE_OMIT_INCRBLOB +static SQLITE_NOINLINE int accessPayloadChecked( + BtCursor *pCur, + u32 offset, + u32 amt, + void *pBuf +){ + int rc; if ( pCur->eState==CURSOR_INVALID ){ return SQLITE_ABORT; } -#endif - assert( cursorOwnsBtShared(pCur) ); - rc = restoreCursorPosition(pCur); - if( rc==SQLITE_OK ){ - assert( pCur->eState==CURSOR_VALID ); - assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); - assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); - rc = accessPayload(pCur, offset, amt, pBuf, 0); + rc = btreeRestoreCursorPosition(pCur); + return rc ? rc : accessPayload(pCur, offset, amt, pBuf, 0); +} +SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ + if( pCur->eState==CURSOR_VALID ){ + assert( cursorOwnsBtShared(pCur) ); + return accessPayload(pCur, offset, amt, pBuf, 0); + }else{ + return accessPayloadChecked(pCur, offset, amt, pBuf); } - return rc; } +#endif /* SQLITE_OMIT_INCRBLOB */ /* ** Return a pointer to payload information from the entry that the @@ -63101,9 +63723,12 @@ static int moveToRoot(BtCursor *pCur){ } if( pCur->iPage>=0 ){ - while( pCur->iPage ){ - assert( pCur->apPage[pCur->iPage]!=0 ); - releasePageNotNull(pCur->apPage[pCur->iPage--]); + if( pCur->iPage ){ + do{ + assert( pCur->apPage[pCur->iPage]!=0 ); + releasePageNotNull(pCur->apPage[pCur->iPage--]); + }while( pCur->iPage); + goto skip_init; } }else if( pCur->pgnoRoot==0 ){ pCur->eState = CURSOR_INVALID; @@ -63114,7 +63739,7 @@ static int moveToRoot(BtCursor *pCur){ 0, pCur->curPagerFlags); if( rc!=SQLITE_OK ){ pCur->eState = CURSOR_INVALID; - return rc; + return rc; } pCur->iPage = 0; pCur->curIntKey = pCur->apPage[0]->intKey; @@ -63137,10 +63762,12 @@ static int moveToRoot(BtCursor *pCur){ return SQLITE_CORRUPT_BKPT; } +skip_init: pCur->aiIdx[0] = 0; pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl); + pRoot = pCur->apPage[0]; if( pRoot->nCell>0 ){ pCur->eState = CURSOR_VALID; }else if( !pRoot->leaf ){ @@ -63329,9 +63956,26 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( *pRes = 0; return SQLITE_OK; } - if( (pCur->curFlags & BTCF_AtLast)!=0 && pCur->info.nKeyinfo.nKeycurFlags & BTCF_AtLast)!=0 ){ + *pRes = -1; + return SQLITE_OK; + } + /* If the requested key is one more than the previous key, then + ** try to get there using sqlite3BtreeNext() rather than a full + ** binary search. This is an optimization only. The correct answer + ** is still obtained without this ase, only a little more slowely */ + if( pCur->info.nKey+1==intKey && !pCur->skipNext ){ + *pRes = 0; + rc = sqlite3BtreeNext(pCur, pRes); + if( rc ) return rc; + if( *pRes==0 ){ + getCellInfo(pCur); + if( pCur->info.nKey==intKey ){ + return SQLITE_OK; + } + } + } } } @@ -63397,16 +64041,16 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( if( lwr>upr ){ c = +1; break; } }else{ assert( nCellKey==intKey ); - pCur->curFlags |= BTCF_ValidNKey; - pCur->info.nKey = nCellKey; pCur->aiIdx[pCur->iPage] = (u16)idx; if( !pPage->leaf ){ lwr = idx; goto moveto_next_layer; }else{ + pCur->curFlags |= BTCF_ValidNKey; + pCur->info.nKey = nCellKey; + pCur->info.nSize = 0; *pRes = 0; - rc = SQLITE_OK; - goto moveto_finish; + return SQLITE_OK; } } assert( lwr+upr>=0 ); @@ -63467,7 +64111,8 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( goto moveto_finish; } pCur->aiIdx[pCur->iPage] = (u16)idx; - rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 2); + rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0); + pCur->curFlags &= ~BTCF_ValidOvfl; if( rc ){ sqlite3_free(pCellKey); goto moveto_finish; @@ -63517,7 +64162,7 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( } moveto_finish: pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); + assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); return rc; } @@ -63715,7 +64360,7 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){ moveToParent(pCur); } assert( pCur->info.nSize==0 ); - assert( (pCur->curFlags & (BTCF_ValidNKey|BTCF_ValidOvfl))==0 ); + assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 ); pCur->aiIdx[pCur->iPage]--; pPage = pCur->apPage[pCur->iPage]; @@ -64231,30 +64876,28 @@ static void freePage(MemPage *pPage, int *pRC){ static int clearCell( MemPage *pPage, /* The page that contains the Cell */ unsigned char *pCell, /* First byte of the Cell */ - u16 *pnSize /* Write the size of the Cell here */ + CellInfo *pInfo /* Size information about the cell */ ){ BtShared *pBt = pPage->pBt; - CellInfo info; Pgno ovflPgno; int rc; int nOvfl; u32 ovflPageSize; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - pPage->xParseCell(pPage, pCell, &info); - *pnSize = info.nSize; - if( info.nLocal==info.nPayload ){ + pPage->xParseCell(pPage, pCell, pInfo); + if( pInfo->nLocal==pInfo->nPayload ){ return SQLITE_OK; /* No overflow pages. Return without doing anything */ } - if( pCell+info.nSize-1 > pPage->aData+pPage->maskPage ){ + if( pCell+pInfo->nSize-1 > pPage->aData+pPage->maskPage ){ return SQLITE_CORRUPT_BKPT; /* Cell extends past end of page */ } - ovflPgno = get4byte(pCell + info.nSize - 4); + ovflPgno = get4byte(pCell + pInfo->nSize - 4); assert( pBt->usableSize > 4 ); ovflPageSize = pBt->usableSize - 4; - nOvfl = (info.nPayload - info.nLocal + ovflPageSize - 1)/ovflPageSize; + nOvfl = (pInfo->nPayload - pInfo->nLocal + ovflPageSize - 1)/ovflPageSize; assert( nOvfl>0 || - (CORRUPT_DB && (info.nPayload + ovflPageSize)nPayload + ovflPageSize)=0 && idxnCell ); assert( CORRUPT_DB || sz==cellSize(pPage, idx) ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); @@ -64578,7 +65220,10 @@ static void insertCell( put4byte(pCell, iChild); } j = pPage->nOverflow++; - assert( j<(int)(sizeof(pPage->apOvfl)/sizeof(pPage->apOvfl[0])) ); + /* Comparison against ArraySize-1 since we hold back one extra slot + ** as a contingency. In other words, never need more than 3 overflow + ** slots but 4 are allocated, just to be safe. */ + assert( j < ArraySize(pPage->apOvfl)-1 ); pPage->apOvfl[j] = pCell; pPage->aiOvfl[j] = (u16)i; @@ -65318,7 +65963,7 @@ static int balance_nonroot( nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; if( (i--)==0 ) break; - if( i+nxDiv==pParent->aiOvfl[0] && pParent->nOverflow ){ + if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){ apDiv[i] = pParent->apOvfl[0]; pgno = get4byte(apDiv[i]); szNew[i] = pParent->xCellSize(pParent, apDiv[i]); @@ -65510,7 +66155,6 @@ static int balance_nonroot( for(i=0; inFree; - if( szNew[i]<0 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } for(j=0; jnOverflow; j++){ szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]); } @@ -66172,22 +66816,24 @@ static int balance(BtCursor *pCur){ ** pX.pData,nData,nZero fields must be zero. ** ** If the seekResult parameter is non-zero, then a successful call to -** MovetoUnpacked() to seek cursor pCur to (pKey, nKey) has already -** been performed. seekResult is the search result returned (a negative -** number if pCur points at an entry that is smaller than (pKey, nKey), or -** a positive value if pCur points at an entry that is larger than -** (pKey, nKey)). -** -** If the seekResult parameter is non-zero, then the caller guarantees that -** cursor pCur is pointing at the existing copy of a row that is to be -** overwritten. If the seekResult parameter is 0, then cursor pCur may -** point to any entry or to no entry at all and so this function has to seek -** the cursor before the new key can be inserted. +** MovetoUnpacked() to seek cursor pCur to (pKey,nKey) has already +** been performed. In other words, if seekResult!=0 then the cursor +** is currently pointing to a cell that will be adjacent to the cell +** to be inserted. If seekResult<0 then pCur points to a cell that is +** smaller then (pKey,nKey). If seekResult>0 then pCur points to a cell +** that is larger than (pKey,nKey). +** +** If seekResult==0, that means pCur is pointing at some unknown location. +** In that case, this routine must seek the cursor to the correct insertion +** point for (pKey,nKey) before doing the insertion. For index btrees, +** if pX->nMem is non-zero, then pX->aMem contains pointers to the unpacked +** key values and pX->aMem can be used instead of pX->pKey to avoid having +** to decode the key. */ SQLITE_PRIVATE int sqlite3BtreeInsert( BtCursor *pCur, /* Insert data into the table of this cursor */ const BtreePayload *pX, /* Content of the row to be inserted */ - int appendBias, /* True if this is likely an append */ + int flags, /* True if this is likely an append */ int seekResult /* Result of prior MovetoUnpacked() call */ ){ int rc; @@ -66200,6 +66846,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( unsigned char *oldCell; unsigned char *newCell = 0; + assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags ); + if( pCur->eState==CURSOR_FAULT ){ assert( pCur->skipNext!=SQLITE_OK ); return pCur->skipNext; @@ -66240,18 +66888,38 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** cursors open on the row being replaced */ invalidateIncrblobCursors(p, pX->nKey, 0); + /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing + ** to a row with the same key as the new entry being inserted. */ + assert( (flags & BTREE_SAVEPOSITION)==0 || + ((pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey) ); + /* If the cursor is currently on the last row and we are appending a ** new row onto the end, set the "loc" to avoid an unnecessary ** btreeMoveto() call */ - if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey>0 - && pCur->info.nKey==pX->nKey-1 ){ - loc = -1; + if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey ){ + loc = 0; + }else if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey>0 + && pCur->info.nKey==pX->nKey-1 ){ + loc = -1; }else if( loc==0 ){ - rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, appendBias, &loc); + rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, flags!=0, &loc); if( rc ) return rc; } - }else if( loc==0 ){ - rc = btreeMoveto(pCur, pX->pKey, pX->nKey, appendBias, &loc); + }else if( loc==0 && (flags & BTREE_SAVEPOSITION)==0 ){ + if( pX->nMem ){ + UnpackedRecord r; + r.pKeyInfo = pCur->pKeyInfo; + r.aMem = pX->aMem; + r.nField = pX->nMem; + r.default_rc = 0; + r.errCode = 0; + r.r1 = 0; + r.r2 = 0; + r.eqSeen = 0; + rc = sqlite3BtreeMovetoUnpacked(pCur, &r, 0, flags!=0, &loc); + }else{ + rc = btreeMoveto(pCur, pX->pKey, pX->nKey, flags!=0, &loc); + } if( rc ) return rc; } assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); @@ -66272,7 +66940,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( assert( szNew <= MX_CELL_SIZE(pBt) ); idx = pCur->aiIdx[pCur->iPage]; if( loc==0 ){ - u16 szOld; + CellInfo info; assert( idxnCell ); rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ @@ -66282,8 +66950,19 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( !pPage->leaf ){ memcpy(newCell, oldCell, 4); } - rc = clearCell(pPage, oldCell, &szOld); - dropCell(pPage, idx, szOld, &rc); + rc = clearCell(pPage, oldCell, &info); + if( info.nSize==szNew && info.nLocal==info.nPayload ){ + /* Overwrite the old cell with the new if they are the same size. + ** We could also try to do this if the old cell is smaller, then add + ** the leftover space to the free list. But experiments show that + ** doing that is no faster then skipping this optimization and just + ** calling dropCell() and insertCell(). */ + assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */ + if( oldCell+szNew > pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT; + memcpy(oldCell, newCell, szNew); + return SQLITE_OK; + } + dropCell(pPage, idx, info.nSize, &rc); if( rc ) goto end_insert; }else if( loc<0 && pPage->nCell>0 ){ assert( pPage->leaf ); @@ -66327,6 +67006,20 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** from trying to save the current position of the cursor. */ pCur->apPage[pCur->iPage]->nOverflow = 0; pCur->eState = CURSOR_INVALID; + if( (flags & BTREE_SAVEPOSITION) && rc==SQLITE_OK ){ + rc = moveToRoot(pCur); + if( pCur->pKeyInfo ){ + assert( pCur->pKey==0 ); + pCur->pKey = sqlite3Malloc( pX->nKey ); + if( pCur->pKey==0 ){ + rc = SQLITE_NOMEM; + }else{ + memcpy(pCur->pKey, pX->pKey, pX->nKey); + } + } + pCur->eState = CURSOR_REQUIRESEEK; + pCur->nKey = pX->nKey; + } } assert( pCur->apPage[pCur->iPage]->nOverflow==0 ); @@ -66359,7 +67052,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ unsigned char *pCell; /* Pointer to cell to delete */ int iCellIdx; /* Index of cell to delete */ int iCellDepth; /* Depth of node containing pCell */ - u16 szCell; /* Size of the cell being deleted */ + CellInfo info; /* Size of the cell being deleted */ int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */ u8 bPreserve = flags & BTREE_SAVEPOSITION; /* Keep cursor valid */ @@ -66431,8 +67124,8 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ ** itself from within the page. */ rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; - rc = clearCell(pPage, pCell, &szCell); - dropCell(pPage, iCellIdx, szCell, &rc); + rc = clearCell(pPage, pCell, &info); + dropCell(pPage, iCellIdx, info.nSize, &rc); if( rc ) return rc; /* If the cell deleted was not located on a leaf page, then the cursor @@ -66682,7 +67375,7 @@ static int clearDatabasePage( unsigned char *pCell; int i; int hdr; - u16 szCell; + CellInfo info; assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno>btreePagecount(pBt) ){ @@ -66702,7 +67395,7 @@ static int clearDatabasePage( rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); if( rc ) goto cleardatabasepage_out; } - rc = clearCell(pPage, pCell, &szCell); + rc = clearCell(pPage, pCell, &info); if( rc ) goto cleardatabasepage_out; } if( !pPage->leaf ){ @@ -66793,27 +67486,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ assert( sqlite3BtreeHoldsMutex(p) ); assert( p->inTrans==TRANS_WRITE ); - - /* It is illegal to drop a table if any cursors are open on the - ** database. This is because in auto-vacuum mode the backend may - ** need to move another root-page to fill a gap left by the deleted - ** root page. If an open cursor was using this page a problem would - ** occur. - ** - ** This error is caught long before control reaches this point. - */ - if( NEVER(pBt->pCursor) ){ - sqlite3ConnectionBlocked(p->db, pBt->pCursor->pBtree->db); - return SQLITE_LOCKED_SHAREDCACHE; - } - - /* - ** It is illegal to drop the sqlite_master table on page 1. But again, - ** this error is caught long before reaching this point. - */ - if( NEVER(iTable<2) ){ - return SQLITE_CORRUPT_BKPT; - } + assert( iTable>=2 ); rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0); if( rc ) return rc; @@ -67721,7 +68394,7 @@ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int * if( pBt->inTransaction!=TRANS_NONE ){ rc = SQLITE_LOCKED; }else{ - rc = sqlite3PagerCheckpoint(pBt->pPager, eMode, pnLog, pnCkpt); + rc = sqlite3PagerCheckpoint(pBt->pPager, p->db, eMode, pnLog, pnCkpt); } sqlite3BtreeLeave(p); } @@ -69697,10 +70370,9 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( /* ** Move data out of a btree key or data field and into a Mem structure. -** The data or key is taken from the entry that pCur is currently pointing +** The data is payload from the entry that pCur is currently pointing ** to. offset and amt determine what portion of the data or key to retrieve. -** key is true to get the key or false to get data. The result is written -** into the pMem element. +** The result is written into the pMem element. ** ** The pMem object must have been initialized. This routine will use ** pMem->zMalloc to hold the content from the btree, if possible. New @@ -69715,17 +70387,12 @@ static SQLITE_NOINLINE int vdbeMemFromBtreeResize( BtCursor *pCur, /* Cursor pointing at record to retrieve. */ u32 offset, /* Offset from the start of data to return bytes from. */ u32 amt, /* Number of bytes to return. */ - int key, /* If true, retrieve from the btree key, not data. */ Mem *pMem /* OUT: Return data in this Mem structure. */ ){ int rc; pMem->flags = MEM_Null; if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+2)) ){ - if( key ){ - rc = sqlite3BtreeKey(pCur, offset, amt, pMem->z); - }else{ - rc = sqlite3BtreeData(pCur, offset, amt, pMem->z); - } + rc = sqlite3BtreePayload(pCur, offset, amt, pMem->z); if( rc==SQLITE_OK ){ pMem->z[amt] = 0; pMem->z[amt+1] = 0; @@ -69741,7 +70408,6 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree( BtCursor *pCur, /* Cursor pointing at record to retrieve. */ u32 offset, /* Offset from the start of data to return bytes from. */ u32 amt, /* Number of bytes to return. */ - int key, /* If true, retrieve from the btree key, not data. */ Mem *pMem /* OUT: Return data in this Mem structure. */ ){ char *zData; /* Data from the btree layer */ @@ -69762,7 +70428,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree( pMem->flags = MEM_Blob|MEM_Ephem; pMem->n = (int)amt; }else{ - rc = vdbeMemFromBtreeResize(pCur, offset, amt, key, pMem); + rc = vdbeMemFromBtreeResize(pCur, offset, amt, pMem); } return rc; @@ -69780,6 +70446,7 @@ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){ assert( (pVal->flags & MEM_RowSet)==0 ); assert( (pVal->flags & (MEM_Null))==0 ); if( pVal->flags & (MEM_Blob|MEM_Str) ){ + if( ExpandBlob(pVal) ) return 0; pVal->flags |= MEM_Str; if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){ sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED); @@ -70103,6 +70770,7 @@ static int valueFromExpr( }else if( op==TK_NULL ){ pVal = valueNew(db, pCtx); if( pVal==0 ) goto no_mem; + sqlite3VdbeMemNumerify(pVal); } #ifndef SQLITE_OMIT_BLOB_LITERAL else if( op==TK_BLOB ){ @@ -70792,7 +71460,11 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( int p4 /* The P4 operand as an integer */ ){ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); - sqlite3VdbeChangeP4(p, addr, SQLITE_INT_TO_PTR(p4), P4_INT32); + if( p->db->mallocFailed==0 ){ + VdbeOp *pOp = &p->aOp[addr]; + pOp->p4type = P4_INT32; + pOp->p4.i = p4; + } return addr; } @@ -71123,6 +71795,22 @@ SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N){ } #endif +/* +** Verify that the VM passed as the only argument does not contain +** an OP_ResultRow opcode. Fail an assert() if it does. This is used +** by code in pragma.c to ensure that the implementation of certain +** pragmas comports with the flags specified in the mkpragmatab.tcl +** script. +*/ +#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS) +SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p){ + int i; + for(i=0; inOp; i++){ + assert( p->aOp[i].opcode!=OP_ResultRow ); + } +} +#endif + /* ** This function returns a pointer to the array of opcodes associated with ** the Vdbe passed as the first argument. It is the callers responsibility @@ -71242,7 +71930,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){ SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){ sqlite3VdbeGetOp(p,addr)->p3 = val; } -SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 p5){ +SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ assert( p->nOp>0 || p->db->mallocFailed ); if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5; } @@ -71303,10 +71991,6 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ break; } #endif - case P4_MPRINTF: { - if( db->pnBytesFreed==0 ) sqlite3_free(p4); - break; - } case P4_FUNCDEF: { freeEphemeralFunction(db, (FuncDef*)p4); break; @@ -71451,16 +72135,42 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int } } +/* +** Change the P4 operand of the most recently coded instruction +** to the value defined by the arguments. This is a high-speed +** version of sqlite3VdbeChangeP4(). +** +** The P4 operand must not have been previously defined. And the new +** P4 must not be P4_INT32. Use sqlite3VdbeChangeP4() in either of +** those cases. +*/ +SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe *p, void *pP4, int n){ + VdbeOp *pOp; + assert( n!=P4_INT32 && n!=P4_VTAB ); + assert( n<=0 ); + if( p->db->mallocFailed ){ + freeP4(p->db, n, pP4); + }else{ + assert( pP4!=0 ); + assert( p->nOp>0 ); + pOp = &p->aOp[p->nOp-1]; + assert( pOp->p4type==P4_NOTUSED ); + pOp->p4type = n; + pOp->p4.p = pP4; + } +} + /* ** Set the P4 on the most recently added opcode to the KeyInfo for the ** index given. */ SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){ Vdbe *v = pParse->pVdbe; + KeyInfo *pKeyInfo; assert( v!=0 ); assert( pIdx!=0 ); - sqlite3VdbeChangeP4(v, -1, (char*)sqlite3KeyInfoOfIndex(pParse, pIdx), - P4_KEYINFO); + pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pIdx); + if( pKeyInfo ) sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO); } #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS @@ -71750,7 +72460,7 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){ sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg); break; } -#ifdef SQLITE_DEBUG +#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) case P4_FUNCCTX: { FuncDef *pDef = pOp->p4.pCtx->pFunc; sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg); @@ -72428,10 +73138,8 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( x.nFree = x.nNeeded; }while( !db->mallocFailed ); - p->nzVar = pParse->nzVar; - p->azVar = pParse->azVar; - pParse->nzVar = 0; - pParse->azVar = 0; + p->pVList = pParse->pVList; + pParse->pVList = 0; p->explain = pParse->explain; if( db->mallocFailed ){ p->nVar = 0; @@ -72459,15 +73167,15 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx==0 ){ return; } - assert( pCx->pBt==0 || pCx->eCurType==CURTYPE_BTREE ); + assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE ); switch( pCx->eCurType ){ case CURTYPE_SORTER: { sqlite3VdbeSorterClose(p->db, pCx); break; } case CURTYPE_BTREE: { - if( pCx->pBt ){ - sqlite3BtreeClose(pCx->pBt); + if( pCx->pBtx ){ + sqlite3BtreeClose(pCx->pBtx); /* The pCx->pCursor will be close automatically, if it exists, by ** the call above. */ }else{ @@ -72936,60 +73644,59 @@ static void checkActiveVdbeCnt(sqlite3 *db){ ** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned. ** Otherwise SQLITE_OK. */ -SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){ +static SQLITE_NOINLINE int vdbeCloseStatement(Vdbe *p, int eOp){ sqlite3 *const db = p->db; int rc = SQLITE_OK; + int i; + const int iSavepoint = p->iStatement-1; - /* If p->iStatement is greater than zero, then this Vdbe opened a - ** statement transaction that should be closed here. The only exception - ** is that an IO error may have occurred, causing an emergency rollback. - ** In this case (db->nStatement==0), and there is nothing to do. - */ - if( db->nStatement && p->iStatement ){ - int i; - const int iSavepoint = p->iStatement-1; - - assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE); - assert( db->nStatement>0 ); - assert( p->iStatement==(db->nStatement+db->nSavepoint) ); - - for(i=0; inDb; i++){ - int rc2 = SQLITE_OK; - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - if( eOp==SAVEPOINT_ROLLBACK ){ - rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint); - } - if( rc2==SQLITE_OK ){ - rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint); - } - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - db->nStatement--; - p->iStatement = 0; + assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE); + assert( db->nStatement>0 ); + assert( p->iStatement==(db->nStatement+db->nSavepoint) ); - if( rc==SQLITE_OK ){ + for(i=0; inDb; i++){ + int rc2 = SQLITE_OK; + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ if( eOp==SAVEPOINT_ROLLBACK ){ - rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint); + rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint); + } + if( rc2==SQLITE_OK ){ + rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint); } if( rc==SQLITE_OK ){ - rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint); + rc = rc2; } } + } + db->nStatement--; + p->iStatement = 0; - /* If the statement transaction is being rolled back, also restore the - ** database handles deferred constraint counter to the value it had when - ** the statement transaction was opened. */ + if( rc==SQLITE_OK ){ if( eOp==SAVEPOINT_ROLLBACK ){ - db->nDeferredCons = p->nStmtDefCons; - db->nDeferredImmCons = p->nStmtDefImmCons; + rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint); } + if( rc==SQLITE_OK ){ + rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint); + } + } + + /* If the statement transaction is being rolled back, also restore the + ** database handles deferred constraint counter to the value it had when + ** the statement transaction was opened. */ + if( eOp==SAVEPOINT_ROLLBACK ){ + db->nDeferredCons = p->nStmtDefCons; + db->nDeferredImmCons = p->nStmtDefImmCons; } return rc; } +SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){ + if( p->db->nStatement && p->iStatement ){ + return vdbeCloseStatement(p, eOp); + } + return SQLITE_OK; +} + /* ** This function is called when a transaction opened by the database @@ -73425,7 +74132,6 @@ SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3 *db, AuxData **pp, int iOp, */ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ SubProgram *pSub, *pNext; - int i; assert( p->db==0 || p->db==db ); releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); for(pSub=p->pProgram; pSub; pSub=pNext){ @@ -73435,18 +74141,20 @@ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ } if( p->magic!=VDBE_MAGIC_INIT ){ releaseMemArray(p->aVar, p->nVar); - for(i=p->nzVar-1; i>=0; i--) sqlite3DbFree(db, p->azVar[i]); - sqlite3DbFree(db, p->azVar); + sqlite3DbFree(db, p->pVList); sqlite3DbFree(db, p->pFree); } vdbeFreeOpArray(db, p->aOp, p->nOp); sqlite3DbFree(db, p->aColName); sqlite3DbFree(db, p->zSql); #ifdef SQLITE_ENABLE_STMT_SCANSTATUS - for(i=0; inScan; i++){ - sqlite3DbFree(db, p->aScan[i].zName); + { + int i; + for(i=0; inScan; i++){ + sqlite3DbFree(db, p->aScan[i].zName); + } + sqlite3DbFree(db, p->aScan); } - sqlite3DbFree(db, p->aScan); #endif } @@ -73947,30 +74655,13 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( ** If an OOM error occurs, NULL is returned. */ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( - KeyInfo *pKeyInfo, /* Description of the record */ - char *pSpace, /* Unaligned space available */ - int szSpace, /* Size of pSpace[] in bytes */ - char **ppFree /* OUT: Caller should free this pointer */ + KeyInfo *pKeyInfo /* Description of the record */ ){ UnpackedRecord *p; /* Unpacked record to return */ - int nOff; /* Increment pSpace by nOff to align it */ int nByte; /* Number of bytes required for *p */ - - /* We want to shift the pointer pSpace up such that it is 8-byte aligned. - ** Thus, we need to calculate a value, nOff, between 0 and 7, to shift - ** it by. If pSpace is already 8-byte aligned, nOff should be zero. - */ - nOff = (8 - (SQLITE_PTR_TO_INT(pSpace) & 7)) & 7; nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nField+1); - if( nByte>szSpace+nOff ){ - p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); - *ppFree = (char *)p; - if( !p ) return 0; - }else{ - p = (UnpackedRecord*)&pSpace[nOff]; - *ppFree = 0; - } - + p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); + if( !p ) return 0; p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))]; assert( pKeyInfo->aSortOrder!=0 ); p->pKeyInfo = pKeyInfo; @@ -74844,7 +75535,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){ /* Read in the complete content of the index entry */ sqlite3VdbeMemInit(&m, db, 0); - rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, 1, &m); + rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m); if( rc ){ return rc; } @@ -74924,7 +75615,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare( return SQLITE_CORRUPT_BKPT; } sqlite3VdbeMemInit(&m, db, 0); - rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, 1, &m); + rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m); if( rc ){ return rc; } @@ -75040,10 +75731,10 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){ ** This function is used to free UnpackedRecord structures allocated by ** the vdbeUnpackRecord() function found in vdbeapi.c. */ -static void vdbeFreeUnpacked(sqlite3 *db, UnpackedRecord *p){ +static void vdbeFreeUnpacked(sqlite3 *db, int nField, UnpackedRecord *p){ if( p ){ int i; - for(i=0; inField; i++){ + for(i=0; iaMem[i]; if( pMem->zMalloc ) sqlite3VdbeMemRelease(pMem); } @@ -75076,10 +75767,15 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( assert( db->pPreUpdate==0 ); memset(&preupdate, 0, sizeof(PreUpdate)); - if( op==SQLITE_UPDATE ){ - iKey2 = v->aMem[iReg].u.i; + if( HasRowid(pTab)==0 ){ + iKey1 = iKey2 = 0; + preupdate.pPk = sqlite3PrimaryKeyIndex(pTab); }else{ - iKey2 = iKey1; + if( op==SQLITE_UPDATE ){ + iKey2 = v->aMem[iReg].u.i; + }else{ + iKey2 = iKey1; + } } assert( pCsr->nField==pTab->nCol @@ -75102,8 +75798,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2); db->pPreUpdate = 0; sqlite3DbFree(db, preupdate.aRecord); - vdbeFreeUnpacked(db, preupdate.pUnpacked); - vdbeFreeUnpacked(db, preupdate.pNewUnpacked); + vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pUnpacked); + vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pNewUnpacked); if( preupdate.aNew ){ int i; for(i=0; inField; i++){ @@ -76588,10 +77284,8 @@ SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){ */ SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){ Vdbe *p = (Vdbe*)pStmt; - if( p==0 || i<1 || i>p->nzVar ){ - return 0; - } - return p->azVar[i-1]; + if( p==0 ) return 0; + return sqlite3VListNumToName(p->pVList, i); } /* @@ -76600,19 +77294,8 @@ SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){ ** return 0. */ SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nName){ - int i; - if( p==0 ){ - return 0; - } - if( zName ){ - for(i=0; inzVar; i++){ - const char *z = p->azVar[i]; - if( z && strncmp(z,zName,nName)==0 && z[nName]==0 ){ - return i+1; - } - } - } - return 0; + if( p==0 || zName==0 ) return 0; + return sqlite3VListNameToNum(p->pVList, zName, nName); } SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){ return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName)); @@ -76775,10 +77458,9 @@ static UnpackedRecord *vdbeUnpackRecord( int nKey, const void *pKey ){ - char *dummy; /* Dummy argument for AllocUnpackedRecord() */ UnpackedRecord *pRet; /* Return value */ - pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo, 0, 0, &dummy); + pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo); if( pRet ){ memset(pRet->aMem, 0, sizeof(Mem)*(pKeyInfo->nField+1)); sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, pRet); @@ -76792,6 +77474,7 @@ static UnpackedRecord *vdbeUnpackRecord( */ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){ PreUpdate *p = db->pPreUpdate; + Mem *pMem; int rc = SQLITE_OK; /* Test that this call is being made from within an SQLITE_DELETE or @@ -76800,6 +77483,9 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa rc = SQLITE_MISUSE_BKPT; goto preupdate_old_out; } + if( p->pPk ){ + iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx); + } if( iIdx>=p->pCsr->nField || iIdx<0 ){ rc = SQLITE_RANGE; goto preupdate_old_out; @@ -76813,7 +77499,7 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); aRec = sqlite3DbMallocRaw(db, nRec); if( !aRec ) goto preupdate_old_out; - rc = sqlite3BtreeData(p->pCsr->uc.pCursor, 0, nRec, aRec); + rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); if( rc==SQLITE_OK ){ p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); if( !p->pUnpacked ) rc = SQLITE_NOMEM; @@ -76825,17 +77511,14 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa p->aRecord = aRec; } - if( iIdx>=p->pUnpacked->nField ){ + pMem = *ppValue = &p->pUnpacked->aMem[iIdx]; + if( iIdx==p->pTab->iPKey ){ + sqlite3VdbeMemSetInt64(pMem, p->iKey1); + }else if( iIdx>=p->pUnpacked->nField ){ *ppValue = (sqlite3_value *)columnNullValue(); - }else{ - Mem *pMem = *ppValue = &p->pUnpacked->aMem[iIdx]; - *ppValue = &p->pUnpacked->aMem[iIdx]; - if( iIdx==p->pTab->iPKey ){ - sqlite3VdbeMemSetInt64(pMem, p->iKey1); - }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){ - if( pMem->flags & MEM_Int ){ - sqlite3VdbeMemRealify(pMem); - } + }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){ + if( pMem->flags & MEM_Int ){ + sqlite3VdbeMemRealify(pMem); } } @@ -76888,6 +77571,9 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa rc = SQLITE_MISUSE_BKPT; goto preupdate_new_out; } + if( p->pPk && p->op!=SQLITE_UPDATE ){ + iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx); + } if( iIdx>=p->pCsr->nField || iIdx<0 ){ rc = SQLITE_RANGE; goto preupdate_new_out; @@ -76908,13 +77594,11 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa } p->pNewUnpacked = pUnpack; } - if( iIdx>=pUnpack->nField ){ + pMem = &pUnpack->aMem[iIdx]; + if( iIdx==p->pTab->iPKey ){ + sqlite3VdbeMemSetInt64(pMem, p->iKey2); + }else if( iIdx>=pUnpack->nField ){ pMem = (sqlite3_value *)columnNullValue(); - }else{ - pMem = &pUnpack->aMem[iIdx]; - if( iIdx==p->pTab->iPKey ){ - sqlite3VdbeMemSetInt64(pMem, p->iKey2); - } } }else{ /* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required @@ -77330,7 +78014,7 @@ SQLITE_API int sqlite3_found_count = 0; ** Test a register to see if it exceeds the current maximum blob size. ** If it does, record the new maximum blob size. */ -#if defined(SQLITE_TEST) && !defined(SQLITE_OMIT_BUILTIN_TEST) +#if defined(SQLITE_TEST) && !defined(SQLITE_UNTESTABLE) # define UPDATE_MAX_BLOBSIZE(P) updateMaxBlobsize(P) #else # define UPDATE_MAX_BLOBSIZE(P) @@ -77440,7 +78124,7 @@ static VdbeCursor *allocateCursor( } if( SQLITE_OK==sqlite3VdbeMemClearAndResize(pMem, nByte) ){ p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z; - memset(pCx, 0, sizeof(VdbeCursor)); + memset(pCx, 0, offsetof(VdbeCursor,pAltCursor)); pCx->eCurType = eCurType; pCx->iDb = iDb; pCx->nField = nField; @@ -77891,8 +78575,6 @@ SQLITE_PRIVATE int sqlite3VdbeExec( Mem *pIn2 = 0; /* 2nd input operand */ Mem *pIn3 = 0; /* 3rd input operand */ Mem *pOut = 0; /* Output operand */ - int *aPermute = 0; /* Permutation of columns for OP_Compare */ - i64 lastRowid = db->lastRowid; /* Saved value of the last insert ROWID */ #ifdef VDBE_PROFILE u64 start; /* CPU clock count at start of opcode */ #endif @@ -77907,7 +78589,6 @@ SQLITE_PRIVATE int sqlite3VdbeExec( } assert( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_BUSY ); assert( p->bIsReader || p->readOnly!=0 ); - p->rc = SQLITE_OK; p->iCurrentTime = 0; assert( p->explain==0 ); p->pResultSet = 0; @@ -78268,7 +78949,6 @@ case OP_Halt: { p->nFrame--; sqlite3VdbeSetChanges(db, p->nChange); pcx = sqlite3VdbeFrameRestore(pFrame); - lastRowid = db->lastRowid; if( pOp->p2==OE_Ignore ){ /* Instruction pcx is the OP_Program that invoked the sub-program ** currently being halted. If the p2 instruction of this OP_Halt @@ -78285,7 +78965,7 @@ case OP_Halt: { p->rc = pOp->p1; p->errorAction = (u8)pOp->p2; p->pc = pcx; - assert( pOp->p5>=0 && pOp->p5<=4 ); + assert( pOp->p5<=4 ); if( p->rc ){ if( pOp->p5 ){ static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK", @@ -78498,12 +79178,12 @@ case OP_Variable: { /* out2 */ Mem *pVar; /* Value being transferred */ assert( pOp->p1>0 && pOp->p1<=p->nVar ); - assert( pOp->p4.z==0 || pOp->p4.z==p->azVar[pOp->p1-1] ); + assert( pOp->p4.z==0 || pOp->p4.z==sqlite3VListNumToName(p->pVList,pOp->p1) ); pVar = &p->aVar[pOp->p1 - 1]; if( sqlite3VdbeMemTooBig(pVar) ){ goto too_big; } - pOut = out2Prerelease(p, pOp); + pOut = &aMem[pOp->p2]; sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static); UPDATE_MAX_BLOBSIZE(pOut); break; @@ -78990,9 +79670,7 @@ case OP_Function: { #endif MemSetTypeFlag(pCtx->pOut, MEM_Null); pCtx->fErrorOrAux = 0; - db->lastRowid = lastRowid; (*pCtx->pFunc->xSFunc)(pCtx, pCtx->argc, pCtx->argv);/* IMP: R-24505-23230 */ - lastRowid = db->lastRowid; /* Remember rowid changes made by xSFunc */ /* If the function returned an error, throw an exception */ if( pCtx->fErrorOrAux ){ @@ -79311,8 +79989,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ assert( pOp->opcode==OP_Eq || pOp->opcode==OP_Ne ); assert( (flags1 & MEM_Cleared)==0 ); assert( (pOp->p5 & SQLITE_JUMPIFNULL)==0 ); - if( (flags1&MEM_Null)!=0 - && (flags3&MEM_Null)!=0 + if( (flags1&flags3&MEM_Null)!=0 && (flags3&MEM_Cleared)==0 ){ res = 0; /* Operands are equal */ @@ -79449,8 +80126,8 @@ case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */ /* Opcode: Permutation * * * P4 * ** -** Set the permutation used by the OP_Compare operator to be the array -** of integers in P4. +** Set the permutation used by the OP_Compare operator in the next +** instruction. The permutation is stored in the P4 operand. ** ** The permutation is only valid until the next OP_Compare that has ** the OPFLAG_PERMUTE bit set in P5. Typically the OP_Permutation should @@ -79462,7 +80139,8 @@ case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */ case OP_Permutation: { assert( pOp->p4type==P4_INTARRAY ); assert( pOp->p4.ai ); - aPermute = pOp->p4.ai + 1; + assert( pOp[1].opcode==OP_Compare ); + assert( pOp[1].p5 & OPFLAG_PERMUTE ); break; } @@ -79495,8 +80173,17 @@ case OP_Compare: { int idx; CollSeq *pColl; /* Collating sequence to use on this term */ int bRev; /* True for DESCENDING sort order */ + int *aPermute; /* The permutation */ - if( (pOp->p5 & OPFLAG_PERMUTE)==0 ) aPermute = 0; + if( (pOp->p5 & OPFLAG_PERMUTE)==0 ){ + aPermute = 0; + }else{ + assert( pOp>aOp ); + assert( pOp[-1].opcode==OP_Permutation ); + assert( pOp[-1].p4type==P4_INTARRAY ); + aPermute = pOp[-1].p4.ai + 1; + assert( aPermute!=0 ); + } n = pOp->p3; pKeyInfo = pOp->p4.pKeyInfo; assert( n>0 ); @@ -79529,7 +80216,6 @@ case OP_Compare: { break; } } - aPermute = 0; break; } @@ -79779,7 +80465,6 @@ case OP_Column: { assert( pC->eCurType!=CURTYPE_VTAB ); assert( pC->eCurType!=CURTYPE_PSEUDO || pC->nullRow ); assert( pC->eCurType!=CURTYPE_SORTER ); - pCrsr = pC->uc.pCursor; if( pC->cacheStatus!=p->cacheCtr ){ /*OPTIMIZATION-IF-FALSE*/ if( pC->nullRow ){ @@ -79795,6 +80480,7 @@ case OP_Column: { goto op_column_out; } }else{ + pCrsr = pC->uc.pCursor; assert( pC->eCurType==CURTYPE_BTREE ); assert( pCrsr ); assert( sqlite3BtreeCursorIsValid(pCrsr) ); @@ -79858,7 +80544,7 @@ case OP_Column: { /* Make sure zData points to enough of the record to cover the header. */ if( pC->aRow==0 ){ memset(&sMem, 0, sizeof(sMem)); - rc = sqlite3VdbeMemFromBtree(pCrsr, 0, aOffset[0], !pC->isTable, &sMem); + rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, 0, aOffset[0], &sMem); if( rc!=SQLITE_OK ) goto abort_due_to_error; zData = (u8*)sMem.z; }else{ @@ -79971,8 +80657,7 @@ case OP_Column: { static u8 aZero[8]; /* This is the bogus content */ sqlite3VdbeSerialGet(aZero, t, pDest); }else{ - rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, !pC->isTable, - pDest); + rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest); if( rc!=SQLITE_OK ) goto abort_due_to_error; sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest); pDest->flags &= ~MEM_Ephem; @@ -80087,6 +80772,20 @@ case OP_MakeRecord: { }while( zAffinity[0] ); } +#ifdef SQLITE_ENABLE_NULL_TRIM + /* NULLs can be safely trimmed from the end of the record, as long as + ** as the schema format is 2 or more and none of the omitted columns + ** have a non-NULL default value. Also, the record must be left with + ** at least one field. If P5>0 then it will be one more than the + ** index of the right-most column with a non-NULL default value */ + if( pOp->p5 ){ + while( (pLast->flags & MEM_Null)!=0 && nField>pOp->p5 ){ + pLast--; + nField--; + } + } +#endif + /* Loop through the elements that will make up the record to figure ** out how much space is required for the new record. */ @@ -80837,10 +81536,10 @@ case OP_OpenEphemeral: { if( pCx==0 ) goto no_mem; pCx->nullRow = 1; pCx->isEphemeral = 1; - rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBt, + rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBtx, BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags); if( rc==SQLITE_OK ){ - rc = sqlite3BtreeBeginTrans(pCx->pBt, 1); + rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1); } if( rc==SQLITE_OK ){ /* If a transient index is required, create it by calling @@ -80848,21 +81547,20 @@ case OP_OpenEphemeral: { ** opening it. If a transient table is required, just use the ** automatically created table with root-page 1 (an BLOB_INTKEY table). */ - if( (pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ + if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ int pgno; assert( pOp->p4type==P4_KEYINFO ); - rc = sqlite3BtreeCreateTable(pCx->pBt, &pgno, BTREE_BLOBKEY | pOp->p5); + rc = sqlite3BtreeCreateTable(pCx->pBtx, &pgno, BTREE_BLOBKEY | pOp->p5); if( rc==SQLITE_OK ){ assert( pgno==MASTER_ROOT+1 ); assert( pKeyInfo->db==db ); assert( pKeyInfo->enc==ENC(db) ); - pCx->pKeyInfo = pKeyInfo; - rc = sqlite3BtreeCursor(pCx->pBt, pgno, BTREE_WRCSR, + rc = sqlite3BtreeCursor(pCx->pBtx, pgno, BTREE_WRCSR, pKeyInfo, pCx->uc.pCursor); } pCx->isTable = 0; }else{ - rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, BTREE_WRCSR, + rc = sqlite3BtreeCursor(pCx->pBtx, MASTER_ROOT, BTREE_WRCSR, 0, pCx->uc.pCursor); pCx->isTable = 1; } @@ -81094,7 +81792,8 @@ case OP_SeekGT: { /* jump, in3 */ if( pC->isTable ){ /* The BTREE_SEEK_EQ flag is only set on index cursors */ - assert( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ)==0 ); + assert( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ)==0 + || CORRUPT_DB ); /* The input value in P3 might be of any type: integer, real, string, ** blob, or NULL. But it needs to be an integer before we can do @@ -81296,10 +81995,9 @@ case OP_Found: { /* jump, in3 */ int ii; VdbeCursor *pC; int res; - char *pFree; + UnpackedRecord *pFree; UnpackedRecord *pIdxKey; UnpackedRecord r; - char aTempRec[ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*4 + 7]; #ifdef SQLITE_TEST if( pOp->opcode!=OP_NoConflict ) sqlite3_found_count++; @@ -81316,7 +82014,6 @@ case OP_Found: { /* jump, in3 */ assert( pC->eCurType==CURTYPE_BTREE ); assert( pC->uc.pCursor!=0 ); assert( pC->isTable==0 ); - pFree = 0; if( pOp->p4.i>0 ){ r.pKeyInfo = pC->pKeyInfo; r.nField = (u16)pOp->p4.i; @@ -81329,10 +82026,9 @@ case OP_Found: { /* jump, in3 */ } #endif pIdxKey = &r; + pFree = 0; }else{ - pIdxKey = sqlite3VdbeAllocUnpackedRecord( - pC->pKeyInfo, aTempRec, sizeof(aTempRec), &pFree - ); + pFree = pIdxKey = sqlite3VdbeAllocUnpackedRecord(pC->pKeyInfo); if( pIdxKey==0 ) goto no_mem; assert( pIn3->flags & MEM_Blob ); (void)ExpandBlob(pIn3); @@ -81352,7 +82048,7 @@ case OP_Found: { /* jump, in3 */ } } rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, pIdxKey, 0, 0, &res); - sqlite3DbFree(db, pFree); + if( pFree ) sqlite3DbFree(db, pFree); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } @@ -81579,7 +82275,7 @@ case OP_NewRowid: { /* out2 */ sqlite3VdbeMemIntegerify(pMem); assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */ if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){ - rc = SQLITE_FULL; /* IMP: R-12275-61338 */ + rc = SQLITE_FULL; /* IMP: R-17817-00630 */ goto abort_due_to_error; } if( vu.i+1 ){ @@ -81631,15 +82327,10 @@ case OP_NewRowid: { /* out2 */ ** then rowid is stored for subsequent return by the ** sqlite3_last_insert_rowid() function (otherwise it is unmodified). ** -** If the OPFLAG_USESEEKRESULT flag of P5 is set and if the result of -** the last seek operation (OP_NotExists or OP_SeekRowid) was a success, -** then this -** operation will not attempt to find the appropriate row before doing -** the insert but will instead overwrite the row that the cursor is -** currently pointing to. Presumably, the prior OP_NotExists or -** OP_SeekRowid opcode -** has already positioned the cursor correctly. This is an optimization -** that boosts performance by avoiding redundant seeks. +** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might +** run faster by avoiding an unnecessary seek on cursor P1. However, +** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior +** seeks on the cursor or if the most recent seek used a key equal to P3. ** ** If the OPFLAG_ISUPDATE flag is set, then this opcode is part of an ** UPDATE operation. Otherwise (if the flag is clear) then this opcode @@ -81684,7 +82375,7 @@ case OP_InsertInt: { assert( pC!=0 ); assert( pC->eCurType==CURTYPE_BTREE ); assert( pC->uc.pCursor!=0 ); - assert( pC->isTable ); + assert( (pOp->p5 & OPFLAG_ISNOOP) || pC->isTable ); assert( pOp->p4type==P4_TABLE || pOp->p4type>=P4_STATIC ); REGISTER_TRACE(pOp->p2, pData); @@ -81700,14 +82391,13 @@ case OP_InsertInt: { } if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){ - assert( pC->isTable ); assert( pC->iDb>=0 ); zDb = db->aDb[pC->iDb].zDbSName; pTab = pOp->p4.pTab; - assert( HasRowid(pTab) ); + assert( (pOp->p5 & OPFLAG_ISNOOP) || HasRowid(pTab) ); op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); }else{ - pTab = 0; /* Not needed. Silence a comiler warning. */ + pTab = 0; /* Not needed. Silence a compiler warning. */ zDb = 0; /* Not needed. Silence a compiler warning. */ } @@ -81719,10 +82409,11 @@ case OP_InsertInt: { ){ sqlite3VdbePreUpdateHook(p, pC, SQLITE_INSERT, zDb, pTab, x.nKey, pOp->p2); } + if( pOp->p5 & OPFLAG_ISNOOP ) break; #endif if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; - if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = x.nKey; + if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey; if( pData->flags & MEM_Null ){ x.pData = 0; x.nData = 0; @@ -81739,7 +82430,7 @@ case OP_InsertInt: { } x.pKey = 0; rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, - (pOp->p5 & OPFLAG_APPEND)!=0, seekResult + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), seekResult ); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; @@ -81831,8 +82522,11 @@ case OP_Delete: { #ifdef SQLITE_ENABLE_PREUPDATE_HOOK /* Invoke the pre-update-hook if required. */ - if( db->xPreUpdateCallback && pOp->p4.pTab && HasRowid(pTab) ){ - assert( !(opflags & OPFLAG_ISUPDATE) || (aMem[pOp->p3].flags & MEM_Int) ); + if( db->xPreUpdateCallback && pOp->p4.pTab ){ + assert( !(opflags & OPFLAG_ISUPDATE) + || HasRowid(pTab)==0 + || (aMem[pOp->p3].flags & MEM_Int) + ); sqlite3VdbePreUpdateHook(p, pC, (opflags & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_DELETE, zDb, pTab, pC->movetoTarget, @@ -81863,6 +82557,7 @@ case OP_Delete: { rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5); pC->cacheStatus = CACHE_STALE; + pC->seekResult = 0; if( rc ) goto abort_due_to_error; /* Invoke the update-hook if required. */ @@ -81949,50 +82644,51 @@ case OP_SorterData: { break; } -/* Opcode: RowData P1 P2 * * * +/* Opcode: RowData P1 P2 P3 * * ** Synopsis: r[P2]=data ** -** Write into register P2 the complete row data for cursor P1. +** Write into register P2 the complete row content for the row at +** which cursor P1 is currently pointing. ** There is no interpretation of the data. ** It is just copied onto the P2 register exactly as ** it is found in the database file. ** +** If cursor P1 is an index, then the content is the key of the row. +** If cursor P2 is a table, then the content extracted is the data. +** ** If the P1 cursor must be pointing to a valid row (not a NULL row) ** of a real table, not a pseudo-table. -*/ -/* Opcode: RowKey P1 P2 * * * -** Synopsis: r[P2]=key ** -** Write into register P2 the complete row key for cursor P1. -** There is no interpretation of the data. -** The key is copied onto the P2 register exactly as -** it is found in the database file. +** If P3!=0 then this opcode is allowed to make an ephermeral pointer +** into the database page. That means that the content of the output +** register will be invalidated as soon as the cursor moves - including +** moves caused by other cursors that "save" the the current cursors +** position in order that they can write to the same table. If P3==0 +** then a copy of the data is made into memory. P3!=0 is faster, but +** P3==0 is safer. ** -** If the P1 cursor must be pointing to a valid row (not a NULL row) -** of a real table, not a pseudo-table. +** If P3!=0 then the content of the P2 register is unsuitable for use +** in OP_Result and any OP_Result will invalidate the P2 register content. +** The P2 register content is invalidated by opcodes like OP_Function or +** by any use of another cursor pointing to the same table. */ -case OP_RowKey: case OP_RowData: { VdbeCursor *pC; BtCursor *pCrsr; u32 n; - pOut = &aMem[pOp->p2]; - memAboutToChange(p, pOut); + pOut = out2Prerelease(p, pOp); - /* Note that RowKey and RowData are really exactly the same instruction */ assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->eCurType==CURTYPE_BTREE ); assert( isSorter(pC)==0 ); - assert( pC->isTable || pOp->opcode!=OP_RowData ); - assert( pC->isTable==0 || pOp->opcode==OP_RowData ); assert( pC->nullRow==0 ); assert( pC->uc.pCursor!=0 ); pCrsr = pC->uc.pCursor; - /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or + /* The OP_RowData opcodes always follow OP_NotExists or ** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions ** that might invalidate the cursor. ** If this where not the case, on of the following assert()s @@ -82012,18 +82708,9 @@ case OP_RowData: { goto too_big; } testcase( n==0 ); - if( sqlite3VdbeMemClearAndResize(pOut, MAX(n,32)) ){ - goto no_mem; - } - pOut->n = n; - MemSetTypeFlag(pOut, MEM_Blob); - if( pC->isTable==0 ){ - rc = sqlite3BtreeKey(pCrsr, 0, n, pOut->z); - }else{ - rc = sqlite3BtreeData(pCrsr, 0, n, pOut->z); - } + rc = sqlite3VdbeMemFromBtree(pCrsr, 0, n, pOut); if( rc ) goto abort_due_to_error; - pOut->enc = SQLITE_UTF8; /* In case the blob is ever cast to text */ + if( !pOp->p3 ) Deephemeralize(pOut); UPDATE_MAX_BLOBSIZE(pOut); REGISTER_TRACE(pOp->p2, pOut); break; @@ -82112,6 +82799,13 @@ case OP_NullRow: { ** This opcode leaves the cursor configured to move in reverse order, ** from the end toward the beginning. In other words, the cursor is ** configured to use Prev, not Next. +** +** If P3 is -1, then the cursor is positioned at the end of the btree +** for the purpose of appending a new entry onto the btree. In that +** case P2 must be 0. It is assumed that the cursor is used only for +** appending and so if the cursor is valid, then the cursor must already +** be pointing at the end of the btree and so no changes are made to +** the cursor. */ case OP_Last: { /* jump */ VdbeCursor *pC; @@ -82125,23 +82819,36 @@ case OP_Last: { /* jump */ pCrsr = pC->uc.pCursor; res = 0; assert( pCrsr!=0 ); - rc = sqlite3BtreeLast(pCrsr, &res); - pC->nullRow = (u8)res; - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; pC->seekResult = pOp->p3; #ifdef SQLITE_DEBUG pC->seekOp = OP_Last; #endif - if( rc ) goto abort_due_to_error; - if( pOp->p2>0 ){ - VdbeBranchTaken(res!=0,2); - if( res ) goto jump_to_p2; + if( pOp->p3==0 || !sqlite3BtreeCursorIsValidNN(pCrsr) ){ + rc = sqlite3BtreeLast(pCrsr, &res); + pC->nullRow = (u8)res; + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; + if( rc ) goto abort_due_to_error; + if( pOp->p2>0 ){ + VdbeBranchTaken(res!=0,2); + if( res ) goto jump_to_p2; + } + }else{ + assert( pOp->p2==0 ); } break; } +/* Opcode: SorterSort P1 P2 * * * +** +** After all records have been inserted into the Sorter object +** identified by P1, invoke this opcode to actually do the sorting. +** Jump to P2 if there are no records to be sorted. +** +** This opcode is an alias for OP_Sort and OP_Rewind that is used +** for Sorter objects. +*/ /* Opcode: Sort P1 P2 * * * ** ** This opcode does exactly the same thing as OP_Rewind except that @@ -82269,6 +82976,13 @@ case OP_Rewind: { /* jump */ ** This opcode works just like Prev except that if cursor P1 is not ** open it behaves a no-op. */ +/* Opcode: SorterNext P1 P2 * * P5 +** +** This opcode works just like OP_Next except that P1 must be a +** sorter object for which the OP_SorterSort opcode has been +** invoked. This opcode advances the cursor to the next sorted +** record, or jumps to P2 if there are no more sorted records. +*/ case OP_SorterNext: { /* jump */ VdbeCursor *pC; int res; @@ -82325,27 +83039,41 @@ case OP_Next: /* jump */ goto check_for_interrupt; } -/* Opcode: IdxInsert P1 P2 P3 * P5 +/* Opcode: IdxInsert P1 P2 P3 P4 P5 ** Synopsis: key=r[P2] ** ** Register P2 holds an SQL index key made using the ** MakeRecord instructions. This opcode writes that key ** into the index P1. Data for the entry is nil. ** -** P3 is a flag that provides a hint to the b-tree layer that this -** insert is likely to be an append. +** If P4 is not zero, then it is the number of values in the unpacked +** key of reg(P2). In that case, P3 is the index of the first register +** for the unpacked key. The availability of the unpacked key can sometimes +** be an optimization. +** +** If P5 has the OPFLAG_APPEND bit set, that is a hint to the b-tree layer +** that this insert is likely to be an append. ** ** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is ** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear, ** then the change counter is unchanged. ** -** If P5 has the OPFLAG_USESEEKRESULT bit set, then the cursor must have -** just done a seek to the spot where the new entry is to be inserted. -** This flag avoids doing an extra seek. +** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might +** run faster by avoiding an unnecessary seek on cursor P1. However, +** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior +** seeks on the cursor or if the most recent seek used a key equivalent +** to P2. ** ** This instruction only works for indices. The equivalent instruction ** for tables is OP_Insert. */ +/* Opcode: SorterInsert P1 P2 * * * +** Synopsis: key=r[P2] +** +** Register P2 holds an SQL index key made using the +** MakeRecord instructions. This opcode writes that key +** into the sorter P1. Data for the entry is nil. +*/ case OP_SorterInsert: /* in2 */ case OP_IdxInsert: { /* in2 */ VdbeCursor *pC; @@ -82367,7 +83095,10 @@ case OP_IdxInsert: { /* in2 */ }else{ x.nKey = pIn2->n; x.pKey = pIn2->z; - rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, pOp->p3, + x.aMem = aMem + pOp->p3; + x.nMem = (u16)pOp->p4.i; + rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) ); assert( pC->deferredMoveto==0 ); @@ -82411,6 +83142,7 @@ case OP_IdxDelete: { } assert( pC->deferredMoveto==0 ); pC->cacheStatus = CACHE_STALE; + pC->seekResult = 0; break; } @@ -82488,7 +83220,6 @@ case OP_IdxRowid: { /* out2 */ }else{ pOut = out2Prerelease(p, pOp); pOut->u.i = rowid; - pOut->flags = MEM_Int; } }else{ assert( pOp->opcode==OP_IdxRowid ); @@ -82780,7 +83511,7 @@ case OP_ParseSchema: { assert( iDb>=0 && iDbnDb ); assert( DbHasProperty(db, iDb, DB_SchemaLoaded) ); /* Used to be a conditional */ { - zMaster = SCHEMA_TABLE(iDb); + zMaster = MASTER_NAME; initData.db = db; initData.iDb = pOp->p1; initData.pzErrMsg = &p->zErrMsg; @@ -83130,7 +83861,7 @@ case OP_Program: { /* jump */ p->nFrame++; pFrame->pParent = p->pFrame; - pFrame->lastRowid = lastRowid; + pFrame->lastRowid = db->lastRowid; pFrame->nChange = p->nChange; pFrame->nDbChange = p->db->nChange; assert( pFrame->pAuxData==0 ); @@ -83291,29 +84022,42 @@ case OP_IfPos: { /* jump, in1 */ ** Otherwise, r[P2] is set to the sum of r[P1] and r[P3]. */ case OP_OffsetLimit: { /* in1, out2, in3 */ + i64 x; pIn1 = &aMem[pOp->p1]; pIn3 = &aMem[pOp->p3]; pOut = out2Prerelease(p, pOp); assert( pIn1->flags & MEM_Int ); assert( pIn3->flags & MEM_Int ); - pOut->u.i = pIn1->u.i<=0 ? -1 : pIn1->u.i+(pIn3->u.i>0?pIn3->u.i:0); + x = pIn1->u.i; + if( x<=0 || sqlite3AddInt64(&x, pIn3->u.i>0?pIn3->u.i:0) ){ + /* If the LIMIT is less than or equal to zero, loop forever. This + ** is documented. But also, if the LIMIT+OFFSET exceeds 2^63 then + ** also loop forever. This is undocumented. In fact, one could argue + ** that the loop should terminate. But assuming 1 billion iterations + ** per second (far exceeding the capabilities of any current hardware) + ** it would take nearly 300 years to actually reach the limit. So + ** looping forever is a reasonable approximation. */ + pOut->u.i = -1; + }else{ + pOut->u.i = x; + } break; } -/* Opcode: IfNotZero P1 P2 P3 * * -** Synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 +/* Opcode: IfNotZero P1 P2 * * * +** Synopsis: if r[P1]!=0 then r[P1]--, goto P2 ** ** Register P1 must contain an integer. If the content of register P1 is -** initially nonzero, then subtract P3 from the value in register P1 and -** jump to P2. If register P1 is initially zero, leave it unchanged -** and fall through. +** initially greater than zero, then decrement the value in register P1. +** If it is non-zero (negative or positive) and then also jump to P2. +** If register P1 is initially zero, leave it unchanged and fall through. */ case OP_IfNotZero: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags&MEM_Int ); VdbeBranchTaken(pIn1->u.i<0, 2); if( pIn1->u.i ){ - pIn1->u.i -= pOp->p3; + if( pIn1->u.i>0 ) pIn1->u.i--; goto jump_to_p2; } break; @@ -83322,13 +84066,13 @@ case OP_IfNotZero: { /* jump, in1 */ /* Opcode: DecrJumpZero P1 P2 * * * ** Synopsis: if (--r[P1])==0 goto P2 ** -** Register P1 must hold an integer. Decrement the value in register P1 -** then jump to P2 if the new value is exactly zero. +** Register P1 must hold an integer. Decrement the value in P1 +** and jump to P2 if the new value is exactly zero. */ case OP_DecrJumpZero: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags&MEM_Int ); - pIn1->u.i--; + if( pIn1->u.i>SMALLEST_INT64 ) pIn1->u.i--; VdbeBranchTaken(pIn1->u.i==0, 2); if( pIn1->u.i==0 ) goto jump_to_p2; break; @@ -83574,7 +84318,7 @@ case OP_JournalMode: { /* out2 */ ** file. An EXCLUSIVE lock may still be held on the database file ** after a successful return. */ - rc = sqlite3PagerCloseWal(pPager); + rc = sqlite3PagerCloseWal(pPager, db); if( rc==SQLITE_OK ){ sqlite3PagerSetJournalMode(pPager, eNew); } @@ -84058,7 +84802,7 @@ case OP_VUpdate: { sqlite3VtabImportErrmsg(p, pVtab); if( rc==SQLITE_OK && pOp->p1 ){ assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) ); - db->lastRowid = lastRowid = rowid; + db->lastRowid = rowid; } if( (rc&0xff)==SQLITE_CONSTRAINT && pOp->p4.pVtab->bConstraint ){ if( pOp->p5==OE_Ignore ){ @@ -84294,7 +85038,6 @@ default: { /* This is really OP_Noop and OP_Explain */ ** release the mutexes on btrees that were acquired at the ** top. */ vdbe_return: - db->lastRowid = lastRowid; testcase( nVmStep>0 ); p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep; sqlite3VdbeLeave(p); @@ -84358,10 +85101,9 @@ default: { /* This is really OP_Noop and OP_Explain */ */ typedef struct Incrblob Incrblob; struct Incrblob { - int flags; /* Copy of "flags" passed to sqlite3_blob_open() */ int nByte; /* Size of open blob, in bytes */ int iOffset; /* Byte offset of blob in cursor data */ - int iCol; /* Table column this handle is open on */ + u16 iCol; /* Table column this handle is open on */ BtCursor *pCsr; /* Cursor pointing at blob row */ sqlite3_stmt *pStmt; /* Statement holding cursor open */ sqlite3 *db; /* The associated database */ @@ -84392,17 +85134,27 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){ char *zErr = 0; /* Error message */ Vdbe *v = (Vdbe *)p->pStmt; - /* Set the value of the SQL statements only variable to integer iRow. - ** This is done directly instead of using sqlite3_bind_int64() to avoid - ** triggering asserts related to mutexes. + /* Set the value of register r[1] in the SQL statement to integer iRow. + ** This is done directly as a performance optimization */ - assert( v->aVar[0].flags&MEM_Int ); - v->aVar[0].u.i = iRow; + v->aMem[1].flags = MEM_Int; + v->aMem[1].u.i = iRow; - rc = sqlite3_step(p->pStmt); + /* If the statement has been run before (and is paused at the OP_ResultRow) + ** then back it up to the point where it does the OP_SeekRowid. This could + ** have been down with an extra OP_Goto, but simply setting the program + ** counter is faster. */ + if( v->pc>3 ){ + v->pc = 3; + rc = sqlite3VdbeExec(v); + }else{ + rc = sqlite3_step(p->pStmt); + } if( rc==SQLITE_ROW ){ VdbeCursor *pC = v->apCsr[0]; - u32 type = pC->aType[p->iCol]; + u32 type = pC->nHdrParsed>p->iCol ? pC->aType[p->iCol] : 0; + testcase( pC->nHdrParsed==p->iCol ); + testcase( pC->nHdrParsed==p->iCol+1 ); if( type<12 ){ zErr = sqlite3MPrintf(p->db, "cannot open value of type %s", type==0?"null": type==7?"real": "integer" @@ -84447,7 +85199,7 @@ SQLITE_API int sqlite3_blob_open( const char *zTable, /* The table containing the blob */ const char *zColumn, /* The column containing the blob */ sqlite_int64 iRow, /* The row containing the glob */ - int flags, /* True -> read/write access, false -> read-only */ + int wrFlag, /* True -> read/write access, false -> read-only */ sqlite3_blob **ppBlob /* Handle for accessing the blob returned here */ ){ int nAttempt = 0; @@ -84469,7 +85221,7 @@ SQLITE_API int sqlite3_blob_open( return SQLITE_MISUSE_BKPT; } #endif - flags = !!flags; /* flags = (flags ? 1 : 0); */ + wrFlag = !!wrFlag; /* wrFlag = (wrFlag ? 1 : 0); */ sqlite3_mutex_enter(db->mutex); @@ -84529,9 +85281,8 @@ SQLITE_API int sqlite3_blob_open( /* If the value is being opened for writing, check that the ** column is not indexed, and that it is not part of a foreign key. - ** It is against the rules to open a column to which either of these - ** descriptions applies for writing. */ - if( flags ){ + */ + if( wrFlag ){ const char *zFault = 0; Index *pIdx; #ifndef SQLITE_OMIT_FOREIGN_KEY @@ -84592,19 +85343,17 @@ SQLITE_API int sqlite3_blob_open( static const VdbeOpList openBlob[] = { {OP_TableLock, 0, 0, 0}, /* 0: Acquire a read or write lock */ {OP_OpenRead, 0, 0, 0}, /* 1: Open a cursor */ - {OP_Variable, 1, 1, 0}, /* 2: Move ?1 into reg[1] */ - {OP_NotExists, 0, 7, 1}, /* 3: Seek the cursor */ - {OP_Column, 0, 0, 1}, /* 4 */ - {OP_ResultRow, 1, 0, 0}, /* 5 */ - {OP_Goto, 0, 2, 0}, /* 6 */ - {OP_Close, 0, 0, 0}, /* 7 */ - {OP_Halt, 0, 0, 0}, /* 8 */ + /* blobSeekToRow() will initialize r[1] to the desired rowid */ + {OP_NotExists, 0, 5, 1}, /* 2: Seek the cursor to rowid=r[1] */ + {OP_Column, 0, 0, 1}, /* 3 */ + {OP_ResultRow, 1, 0, 0}, /* 4 */ + {OP_Halt, 0, 0, 0}, /* 5 */ }; Vdbe *v = (Vdbe *)pBlob->pStmt; int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); VdbeOp *aOp; - sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, flags, + sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, wrFlag, pTab->pSchema->schema_cookie, pTab->pSchema->iGeneration); sqlite3VdbeChangeP5(v, 1); @@ -84621,7 +85370,7 @@ SQLITE_API int sqlite3_blob_open( #else aOp[0].p1 = iDb; aOp[0].p2 = pTab->tnum; - aOp[0].p3 = flags; + aOp[0].p3 = wrFlag; sqlite3VdbeChangeP4(v, 1, pTab->zName, P4_TRANSIENT); } if( db->mallocFailed==0 ){ @@ -84629,7 +85378,7 @@ SQLITE_API int sqlite3_blob_open( /* Remove either the OP_OpenWrite or OpenRead. Set the P2 ** parameter of the other to pTab->tnum. */ - if( flags ) aOp[1].opcode = OP_OpenWrite; + if( wrFlag ) aOp[1].opcode = OP_OpenWrite; aOp[1].p2 = pTab->tnum; aOp[1].p3 = iDb; @@ -84642,23 +85391,21 @@ SQLITE_API int sqlite3_blob_open( */ aOp[1].p4type = P4_INT32; aOp[1].p4.i = pTab->nCol+1; - aOp[4].p2 = pTab->nCol; + aOp[3].p2 = pTab->nCol; - pParse->nVar = 1; + pParse->nVar = 0; pParse->nMem = 1; pParse->nTab = 1; sqlite3VdbeMakeReady(v, pParse); } } - pBlob->flags = flags; pBlob->iCol = iCol; pBlob->db = db; sqlite3BtreeLeaveAll(db); if( db->mallocFailed ){ goto blob_open_out; } - sqlite3_bind_int64(pBlob->pStmt, 1, iRow); rc = blobSeekToRow(pBlob, iRow, &zErr); } while( (++nAttempt)pKeyInfo && pCsr->pBt==0 ); + assert( pCsr->pKeyInfo && pCsr->pBtx==0 ); assert( pCsr->eCurType==CURTYPE_SORTER ); szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nField-1)*sizeof(CollSeq*); sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); @@ -86170,12 +86917,8 @@ static int vdbeSorterOpenTempFile( */ static int vdbeSortAllocUnpacked(SortSubtask *pTask){ if( pTask->pUnpacked==0 ){ - char *pFree; - pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord( - pTask->pSorter->pKeyInfo, 0, 0, &pFree - ); - assert( pTask->pUnpacked==(UnpackedRecord*)pFree ); - if( pFree==0 ) return SQLITE_NOMEM_BKPT; + pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pTask->pSorter->pKeyInfo); + if( pTask->pUnpacked==0 ) return SQLITE_NOMEM_BKPT; pTask->pUnpacked->nField = pTask->pSorter->pKeyInfo->nField; pTask->pUnpacked->errCode = 0; } @@ -87576,9 +88319,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterCompare( r2 = pSorter->pUnpacked; pKeyInfo = pCsr->pKeyInfo; if( r2==0 ){ - char *p; - r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo,0,0,&p); - assert( pSorter->pUnpacked==(UnpackedRecord*)p ); + r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo); if( r2==0 ) return SQLITE_NOMEM_BKPT; r2->nField = nKeyCol; } @@ -88201,8 +88942,6 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){ ** table and column. */ /* #include "sqliteInt.h" */ -/* #include */ -/* #include */ /* ** Walk the expression tree pExpr and increase the aggregate function @@ -88586,6 +89325,10 @@ static int lookupName( sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); return WRC_Abort; } + if( sqlite3ExprVectorSize(pOrig)!=1 ){ + sqlite3ErrorMsg(pParse, "row value misused"); + return WRC_Abort; + } resolveAlias(pParse, pEList, j, pExpr, "", nSubquery); cnt = 1; pMatch = 0; @@ -88962,6 +89705,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ notValid(pParse, pNC, "parameters", NC_IsCheck|NC_PartIdx|NC_IdxExpr); break; } + case TK_BETWEEN: case TK_EQ: case TK_NE: case TK_LT: @@ -88972,10 +89716,17 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ case TK_ISNOT: { int nLeft, nRight; if( pParse->db->mallocFailed ) break; - assert( pExpr->pRight!=0 ); assert( pExpr->pLeft!=0 ); nLeft = sqlite3ExprVectorSize(pExpr->pLeft); - nRight = sqlite3ExprVectorSize(pExpr->pRight); + if( pExpr->op==TK_BETWEEN ){ + nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[0].pExpr); + if( nRight==nLeft ){ + nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[1].pExpr); + } + }else{ + assert( pExpr->pRight!=0 ); + nRight = sqlite3ExprVectorSize(pExpr->pRight); + } if( nLeft!=nRight ){ testcase( pExpr->op==TK_EQ ); testcase( pExpr->op==TK_NE ); @@ -88985,6 +89736,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ testcase( pExpr->op==TK_GE ); testcase( pExpr->op==TK_IS ); testcase( pExpr->op==TK_ISNOT ); + testcase( pExpr->op==TK_BETWEEN ); sqlite3ErrorMsg(pParse, "row value misused"); } break; @@ -89948,7 +90700,7 @@ static char comparisonAffinity(Expr *pExpr){ aff = sqlite3CompareAffinity(pExpr->pRight, aff); }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){ aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff); - }else if( NEVER(aff==0) ){ + }else if( aff==0 ){ aff = SQLITE_AFF_BLOB; } return aff; @@ -90131,9 +90883,10 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField( assert( pVector->flags & EP_xIsSelect ); /* The TK_SELECT_COLUMN Expr node: ** - ** pLeft: pVector containing TK_SELECT + ** pLeft: pVector containing TK_SELECT. Not deleted. ** pRight: not used. But recursively deleted. ** iColumn: Index of a column in pVector + ** iTable: 0 or the number of columns on the LHS of an assignment ** pLeft->iTable: First in an array of register holding result, or 0 ** if the result is not yet computed. ** @@ -90144,7 +90897,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField( ** with the same pLeft pointer to the pVector, but only one of them ** will own the pVector. */ - pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0, 0); + pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0); if( pRet ){ pRet->iColumn = iField; pRet->pLeft = pVector; @@ -90244,7 +90997,10 @@ static void codeVectorCompare( u8 opx = op; int addrDone = sqlite3VdbeMakeLabel(v); - assert( nLeft==sqlite3ExprVectorSize(pRight) ); + if( nLeft!=sqlite3ExprVectorSize(pRight) ){ + sqlite3ErrorMsg(pParse, "row value misused"); + return; + } assert( pExpr->op==TK_EQ || pExpr->op==TK_NE || pExpr->op==TK_IS || pExpr->op==TK_ISNOT || pExpr->op==TK_LT || pExpr->op==TK_GT @@ -90536,15 +91292,19 @@ SQLITE_PRIVATE Expr *sqlite3PExpr( Parse *pParse, /* Parsing context */ int op, /* Expression opcode */ Expr *pLeft, /* Left operand */ - Expr *pRight, /* Right operand */ - const Token *pToken /* Argument token */ + Expr *pRight /* Right operand */ ){ Expr *p; if( op==TK_AND && pParse->nErr==0 ){ /* Take advantage of short-circuit false optimization for AND */ p = sqlite3ExprAnd(pParse->db, pLeft, pRight); }else{ - p = sqlite3ExprAlloc(pParse->db, op & TKFLG_MASK, pToken, 1); + p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr)); + if( p ){ + memset(p, 0, sizeof(Expr)); + p->op = op & TKFLG_MASK; + p->iAgg = -1; + } sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight); } if( p ) { @@ -90647,7 +91407,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token * ** variable number. ** ** Wildcards of the form "?nnn" are assigned the number "nnn". We make -** sure "nnn" is not too be to avoid a denial of service attack when +** sure "nnn" is not too big to avoid a denial of service attack when ** the SQL statement comes from an external source. ** ** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number @@ -90658,6 +91418,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token * SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n){ sqlite3 *db = pParse->db; const char *z; + ynVar x; if( pExpr==0 ) return; assert( !ExprHasProperty(pExpr, EP_IntValue|EP_Reduced|EP_TokenOnly) ); @@ -90668,15 +91429,20 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n if( z[1]==0 ){ /* Wildcard of the form "?". Assign the next variable number */ assert( z[0]=='?' ); - pExpr->iColumn = (ynVar)(++pParse->nVar); + x = (ynVar)(++pParse->nVar); }else{ - ynVar x; + int doAdd = 0; if( z[0]=='?' ){ /* Wildcard of the form "?nnn". Convert "nnn" to an integer and ** use it as the variable number */ i64 i; - int bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8); - x = (ynVar)i; + int bOk; + if( n==2 ){ /*OPTIMIZATION-IF-TRUE*/ + i = z[1]-'0'; /* The common case of ?N for a single digit N */ + bOk = 1; + }else{ + bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8); + } testcase( i==0 ); testcase( i==1 ); testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 ); @@ -90686,40 +91452,30 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]); return; } - if( i>pParse->nVar ){ - pParse->nVar = (int)i; + x = (ynVar)i; + if( x>pParse->nVar ){ + pParse->nVar = (int)x; + doAdd = 1; + }else if( sqlite3VListNumToName(pParse->pVList, x)==0 ){ + doAdd = 1; } }else{ /* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable ** number as the prior appearance of the same name, or if the name ** has never appeared before, reuse the same variable number */ - ynVar i; - for(i=x=0; inzVar; i++){ - if( pParse->azVar[i] && strcmp(pParse->azVar[i],z)==0 ){ - x = (ynVar)i+1; - break; - } - } - if( x==0 ) x = (ynVar)(++pParse->nVar); - } - pExpr->iColumn = x; - if( x>pParse->nzVar ){ - char **a; - a = sqlite3DbRealloc(db, pParse->azVar, x*sizeof(a[0])); - if( a==0 ){ - assert( db->mallocFailed ); /* Error reported through mallocFailed */ - return; + x = (ynVar)sqlite3VListNameToNum(pParse->pVList, z, n); + if( x==0 ){ + x = (ynVar)(++pParse->nVar); + doAdd = 1; } - pParse->azVar = a; - memset(&a[pParse->nzVar], 0, (x-pParse->nzVar)*sizeof(a[0])); - pParse->nzVar = x; } - if( pParse->azVar[x-1]==0 ){ - pParse->azVar[x-1] = sqlite3DbStrNDup(db, z, n); + if( doAdd ){ + pParse->pVList = sqlite3VListAdd(db, pParse->pVList, z, n, x); } - } - if( pParse->nVar>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ + } + pExpr->iColumn = x; + if( x>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ sqlite3ErrorMsg(pParse, "too many SQL variables"); } } @@ -90808,7 +91564,7 @@ static int dupedExprStructSize(Expr *p, int flags){ assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ assert( EXPR_FULLSIZE<=0xfff ); assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 ); - if( 0==flags ){ + if( 0==flags || p->op==TK_SELECT_COLUMN ){ nSize = EXPR_FULLSIZE; }else{ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); @@ -90951,6 +91707,8 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){ if( pNew->op==TK_SELECT_COLUMN ){ pNew->pLeft = p->pLeft; + assert( p->iColumn==0 || p->pRight==0 ); + assert( p->pRight==0 || p->pRight==p->pLeft ); }else{ pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); } @@ -91013,6 +91771,7 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags) ExprList *pNew; struct ExprList_item *pItem, *pOldItem; int i; + Expr *pPriorSelectCol = 0; assert( db!=0 ); if( p==0 ) return 0; pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew) ); @@ -91027,7 +91786,24 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags) pOldItem = p->a; for(i=0; inExpr; i++, pItem++, pOldItem++){ Expr *pOldExpr = pOldItem->pExpr; + Expr *pNewExpr; pItem->pExpr = sqlite3ExprDup(db, pOldExpr, flags); + if( pOldExpr + && pOldExpr->op==TK_SELECT_COLUMN + && (pNewExpr = pItem->pExpr)!=0 + ){ + assert( pNewExpr->iColumn==0 || i>0 ); + if( pNewExpr->iColumn==0 ){ + assert( pOldExpr->pLeft==pOldExpr->pRight ); + pPriorSelectCol = pNewExpr->pLeft = pNewExpr->pRight; + }else{ + assert( i>0 ); + assert( pItem[-1].pExpr!=0 ); + assert( pNewExpr->iColumn==pItem[-1].pExpr->iColumn+1 ); + assert( pPriorSelectCol==pItem[-1].pExpr->pLeft ); + pNewExpr->pLeft = pPriorSelectCol; + } + } pItem->zName = sqlite3DbStrDup(db, pOldItem->zName); pItem->zSpan = sqlite3DbStrDup(db, pOldItem->zSpan); pItem->sortOrder = pOldItem->sortOrder; @@ -91078,7 +91854,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ } pTab = pNewItem->pTab = pOldItem->pTab; if( pTab ){ - pTab->nRef++; + pTab->nTabRef++; } pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags); pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags); @@ -91111,33 +91887,41 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ } return pNew; } -SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ - Select *pNew, *pPrior; +SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){ + Select *pRet = 0; + Select *pNext = 0; + Select **pp = &pRet; + Select *p; + assert( db!=0 ); - if( p==0 ) return 0; - pNew = sqlite3DbMallocRawNN(db, sizeof(*p) ); - if( pNew==0 ) return 0; - pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags); - pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags); - pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags); - pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags); - pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags); - pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags); - pNew->op = p->op; - pNew->pPrior = pPrior = sqlite3SelectDup(db, p->pPrior, flags); - if( pPrior ) pPrior->pNext = pNew; - pNew->pNext = 0; - pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); - pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags); - pNew->iLimit = 0; - pNew->iOffset = 0; - pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; - pNew->addrOpenEphm[0] = -1; - pNew->addrOpenEphm[1] = -1; - pNew->nSelectRow = p->nSelectRow; - pNew->pWith = withDup(db, p->pWith); - sqlite3SelectSetName(pNew, p->zSelName); - return pNew; + for(p=pDup; p; p=p->pPrior){ + Select *pNew = sqlite3DbMallocRawNN(db, sizeof(*p) ); + if( pNew==0 ) break; + pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags); + pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags); + pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags); + pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags); + pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags); + pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags); + pNew->op = p->op; + pNew->pNext = pNext; + pNew->pPrior = 0; + pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); + pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags); + pNew->iLimit = 0; + pNew->iOffset = 0; + pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; + pNew->addrOpenEphm[0] = -1; + pNew->addrOpenEphm[1] = -1; + pNew->nSelectRow = p->nSelectRow; + pNew->pWith = withDup(db, p->pWith); + sqlite3SelectSetName(pNew, p->zSelName); + *pp = pNew; + pp = &pNew->pPrior; + pNext = pNew; + } + + return pRet; } #else SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ @@ -91202,7 +91986,7 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListAppend( ** Or: (a,b,c) = (SELECT x,y,z FROM ....) ** ** For each term of the vector assignment, append new entries to the -** expression list pList. In the case of a subquery on the LHS, append +** expression list pList. In the case of a subquery on the RHS, append ** TK_SELECT_COLUMN expressions. */ SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector( @@ -91219,13 +92003,19 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector( ** exit prior to this routine being invoked */ if( NEVER(pColumns==0) ) goto vector_append_error; if( pExpr==0 ) goto vector_append_error; - n = sqlite3ExprVectorSize(pExpr); - if( pColumns->nId!=n ){ + + /* If the RHS is a vector, then we can immediately check to see that + ** the size of the RHS and LHS match. But if the RHS is a SELECT, + ** wildcards ("*") in the result set of the SELECT must be expanded before + ** we can do the size check, so defer the size check until code generation. + */ + if( pExpr->op!=TK_SELECT && pColumns->nId!=(n=sqlite3ExprVectorSize(pExpr)) ){ sqlite3ErrorMsg(pParse, "%d columns assigned %d values", pColumns->nId, n); goto vector_append_error; } - for(i=0; inId; i++){ Expr *pSubExpr = sqlite3ExprForVectorField(pParse, pExpr, i); pList = sqlite3ExprListAppend(pParse, pList, pSubExpr); if( pList ){ @@ -91234,11 +92024,20 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector( pColumns->a[i].zName = 0; } } + if( pExpr->op==TK_SELECT ){ if( pList && pList->a[iFirst].pExpr ){ - assert( pList->a[iFirst].pExpr->op==TK_SELECT_COLUMN ); - pList->a[iFirst].pExpr->pRight = pExpr; + Expr *pFirst = pList->a[iFirst].pExpr; + assert( pFirst->op==TK_SELECT_COLUMN ); + + /* Store the SELECT statement in pRight so it will be deleted when + ** sqlite3ExprListDelete() is called */ + pFirst->pRight = pExpr; pExpr = 0; + + /* Remember the size of the LHS in iTable so that we can check that + ** the RHS and LHS sizes match during code generation. */ + pFirst->iTable = pColumns->nId; } } @@ -92068,6 +92867,28 @@ SQLITE_PRIVATE void sqlite3SubselectError(Parse *pParse, int nActual, int nExpec } #endif +/* +** Expression pExpr is a vector that has been used in a context where +** it is not permitted. If pExpr is a sub-select vector, this routine +** loads the Parse object with a message of the form: +** +** "sub-select returns N columns - expected 1" +** +** Or, if it is a regular scalar vector: +** +** "row value misused" +*/ +SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){ +#ifndef SQLITE_OMIT_SUBQUERY + if( pExpr->flags & EP_xIsSelect ){ + sqlite3SubselectError(pParse, pExpr->x.pSelect->pEList->nExpr, 1); + }else +#endif + { + sqlite3ErrorMsg(pParse, "row value misused"); + } +} + /* ** Generate code for scalar subqueries used as a subquery expression, EXISTS, ** or IN operators. Examples: @@ -92255,7 +93076,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect( }else{ sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1); sqlite3ExprCacheAffinityChange(pParse, r3, 1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, pExpr->iTable, r2); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pExpr->iTable, r2, r3, 1); } } } @@ -92350,11 +93171,7 @@ SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse *pParse, Expr *pIn){ return 1; } }else if( nVector!=1 ){ - if( (pIn->pLeft->flags & EP_xIsSelect) ){ - sqlite3SubselectError(pParse, nVector, 1); - }else{ - sqlite3ErrorMsg(pParse, "row value misused"); - } + sqlite3VectorErrorMsg(pParse, pIn->pLeft); return 1; } return 0; @@ -92659,22 +93476,22 @@ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){ const char *z = pExpr->u.zToken; assert( z!=0 ); c = sqlite3DecOrHexToI64(z, &value); - if( c==0 || (c==2 && negFlag) ){ - if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; } - sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64); - }else{ + if( c==1 || (c==2 && !negFlag) || (negFlag && value==SMALLEST_INT64)){ #ifdef SQLITE_OMIT_FLOATING_POINT sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z); #else #ifndef SQLITE_OMIT_HEX_INTEGER if( sqlite3_strnicmp(z,"0x",2)==0 ){ - sqlite3ErrorMsg(pParse, "hex literal too big: %s", z); + sqlite3ErrorMsg(pParse, "hex literal too big: %s%s", negFlag?"-":"",z); }else #endif { codeReal(v, z, negFlag, iMem); } #endif + }else{ + if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; } + sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64); } } } @@ -93013,7 +93830,7 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){ iResult = pParse->nMem+1; pParse->nMem += nResult; for(i=0; ix.pList->a[i].pExpr, i+iResult); + sqlite3ExprCodeFactorable(pParse, p->x.pList->a[i].pExpr, i+iResult); } } } @@ -93125,9 +93942,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) assert( pExpr->u.zToken[0]!=0 ); sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); if( pExpr->u.zToken[1]!=0 ){ - assert( pExpr->u.zToken[0]=='?' - || strcmp(pExpr->u.zToken, pParse->azVar[pExpr->iColumn-1])==0 ); - sqlite3VdbeChangeP4(v, -1, pParse->azVar[pExpr->iColumn-1], P4_STATIC); + const char *z = sqlite3VListNumToName(pParse->pVList, pExpr->iColumn); + assert( pExpr->u.zToken[0]=='?' || strcmp(pExpr->u.zToken, z)==0 ); + pParse->pVList[0] = 0; /* Indicate VList may no longer be enlarged */ + sqlite3VdbeAppendP4(v, (char*)z, P4_STATIC); } return target; } @@ -93277,6 +94095,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) u8 enc = ENC(db); /* The text encoding used by this database */ CollSeq *pColl = 0; /* A collating sequence */ + if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){ + /* SQL functions can be expensive. So try to move constant functions + ** out of the inner loop, even if that means an extra OP_Copy. */ + return sqlite3ExprCodeAtInit(pParse, pExpr, -1); + } assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); if( ExprHasProperty(pExpr, EP_TokenOnly) ){ pFarg = 0; @@ -93325,6 +94148,22 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) return sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target); } +#ifdef SQLITE_DEBUG + /* The AFFINITY() function evaluates to a string that describes + ** the type affinity of the argument. This is used for testing of + ** the SQLite type logic. + */ + if( pDef->funcFlags & SQLITE_FUNC_AFFINITY ){ + const char *azAff[] = { "blob", "text", "numeric", "integer", "real" }; + char aff; + assert( nFarg==1 ); + aff = sqlite3ExprAffinity(pFarg->a[0].pExpr); + sqlite3VdbeLoadString(v, target, + aff ? azAff[aff-SQLITE_AFF_BLOB] : "none"); + return target; + } +#endif + for(i=0; ia[i].pExpr) ){ testcase( i==31 ); @@ -93413,9 +94252,17 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) break; } case TK_SELECT_COLUMN: { + int n; if( pExpr->pLeft->iTable==0 ){ pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft, 0, 0); } + assert( pExpr->iTable==0 || pExpr->pLeft->op==TK_SELECT ); + if( pExpr->iTable + && pExpr->iTable!=(n = sqlite3ExprVectorSize(pExpr->pLeft)) + ){ + sqlite3ErrorMsg(pParse, "%d columns assigned %d values", + pExpr->iTable, n); + } return pExpr->pLeft->iTable + pExpr->iColumn; } case TK_IN: { @@ -93633,24 +94480,40 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) /* ** Factor out the code of the given expression to initialization time. +** +** If regDest>=0 then the result is always stored in that register and the +** result is not reusable. If regDest<0 then this routine is free to +** store the value whereever it wants. The register where the expression +** is stored is returned. When regDest<0, two identical expressions will +** code to the same register. */ -SQLITE_PRIVATE void sqlite3ExprCodeAtInit( +SQLITE_PRIVATE int sqlite3ExprCodeAtInit( Parse *pParse, /* Parsing context */ Expr *pExpr, /* The expression to code when the VDBE initializes */ - int regDest, /* Store the value in this register */ - u8 reusable /* True if this expression is reusable */ + int regDest /* Store the value in this register */ ){ ExprList *p; assert( ConstFactorOk(pParse) ); p = pParse->pConstExpr; + if( regDest<0 && p ){ + struct ExprList_item *pItem; + int i; + for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){ + if( pItem->reusable && sqlite3ExprCompare(pItem->pExpr,pExpr,-1)==0 ){ + return pItem->u.iConstExprReg; + } + } + } pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); p = sqlite3ExprListAppend(pParse, p, pExpr); if( p ){ struct ExprList_item *pItem = &p->a[p->nExpr-1]; + pItem->reusable = regDest<0; + if( regDest<0 ) regDest = ++pParse->nMem; pItem->u.iConstExprReg = regDest; - pItem->reusable = reusable; } pParse->pConstExpr = p; + return regDest; } /* @@ -93673,19 +94536,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ && pExpr->op!=TK_REGISTER && sqlite3ExprIsConstantNotJoin(pExpr) ){ - ExprList *p = pParse->pConstExpr; - int i; *pReg = 0; - if( p ){ - struct ExprList_item *pItem; - for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){ - if( pItem->reusable && sqlite3ExprCompare(pItem->pExpr,pExpr,-1)==0 ){ - return pItem->u.iConstExprReg; - } - } - } - r2 = ++pParse->nMem; - sqlite3ExprCodeAtInit(pParse, pExpr, r2, 1); + r2 = sqlite3ExprCodeAtInit(pParse, pExpr, -1); }else{ int r1 = sqlite3GetTempReg(pParse); r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1); @@ -93739,7 +94591,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){ */ SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){ if( pParse->okConstFactor && sqlite3ExprIsConstant(pExpr) ){ - sqlite3ExprCodeAtInit(pParse, pExpr, target, 0); + sqlite3ExprCodeAtInit(pParse, pExpr, target); }else{ sqlite3ExprCode(pParse, pExpr, target); } @@ -93803,10 +94655,15 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList( if( !ConstFactorOk(pParse) ) flags &= ~SQLITE_ECEL_FACTOR; for(pItem=pList->a, i=0; ipExpr; - if( (flags & SQLITE_ECEL_REF)!=0 && (j = pList->a[i].u.x.iOrderByCol)>0 ){ - sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i); + if( (flags & SQLITE_ECEL_REF)!=0 && (j = pItem->u.x.iOrderByCol)>0 ){ + if( flags & SQLITE_ECEL_OMITREF ){ + i--; + n--; + }else{ + sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i); + } }else if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){ - sqlite3ExprCodeAtInit(pParse, pExpr, target+i, 0); + sqlite3ExprCodeAtInit(pParse, pExpr, target+i); }else{ int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i); if( inReg!=target+i ){ @@ -93879,6 +94736,11 @@ static void exprCodeBetween( if( xJump ){ xJump(pParse, &exprAnd, dest, jumpIfNull); }else{ + /* Mark the expression is being from the ON or USING clause of a join + ** so that the sqlite3ExprCodeTarget() routine will not attempt to move + ** it into the Parse.pConstExpr list. We should use a new bit for this, + ** for clarity, but we are out of bits in the Expr.flags field so we + ** have to reuse the EP_FromJoin bit. Bummer. */ exprX.flags |= EP_FromJoin; sqlite3ExprCodeTarget(pParse, &exprAnd, dest); } @@ -94317,11 +95179,10 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr *pE1, Expr *pE2, int iTab){ ){ return 1; } - if( pE2->op==TK_NOTNULL - && sqlite3ExprCompare(pE1->pLeft, pE2->pLeft, iTab)==0 - && (pE1->op!=TK_ISNULL && pE1->op!=TK_IS) - ){ - return 1; + if( pE2->op==TK_NOTNULL && pE1->op!=TK_ISNULL && pE1->op!=TK_IS ){ + Expr *pX = sqlite3ExprSkipCollate(pE1->pLeft); + testcase( pX!=pE1->pLeft ); + if( sqlite3ExprCompare(pX, pE2->pLeft, iTab)==0 ) return 1; } return 0; } @@ -95243,7 +96104,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( sqlite3NestedParse(pParse, "UPDATE \"%w\".%s SET " "sql = sqlite_rename_parent(sql, %Q, %Q) " - "WHERE %s;", zDb, SCHEMA_TABLE(iDb), zTabName, zName, zWhere); + "WHERE %s;", zDb, MASTER_NAME, zTabName, zName, zWhere); sqlite3DbFree(db, zWhere); } } @@ -95267,7 +96128,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( "ELSE name END " "WHERE tbl_name=%Q COLLATE nocase AND " "(type='table' OR type='index' OR type='trigger');", - zDb, SCHEMA_TABLE(iDb), zName, zName, zName, + zDb, MASTER_NAME, zName, zName, zName, #ifndef SQLITE_OMIT_TRIGGER zName, #endif @@ -95428,7 +96289,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ "UPDATE \"%w\".%s SET " "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " "WHERE type = 'table' AND name = %Q", - zDb, SCHEMA_TABLE(iDb), pNew->addColOffset, zCol, pNew->addColOffset+1, + zDb, MASTER_NAME, pNew->addColOffset, zCol, pNew->addColOffset+1, zTab ); sqlite3DbFree(db, zCol); @@ -95512,7 +96373,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table)); if( !pNew ) goto exit_begin_add_column; pParse->pNewTable = pNew; - pNew->nRef = 1; + pNew->nTabRef = 1; pNew->nCol = pTab->nCol; assert( pNew->nCol>0 ); nAlloc = (((pNew->nCol-1)/8)*8)+8; @@ -95532,7 +96393,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ } pNew->pSchema = db->aDb[iDb].pSchema; pNew->addColOffset = pTab->addColOffset; - pNew->nRef = 1; + pNew->nTabRef = 1; /* Begin a transaction and increment the schema cookie. */ sqlite3BeginWriteOperation(pParse, 0, iDb); @@ -96347,6 +97208,12 @@ static const FuncDef statPushFuncdef = { ** The content to returned is determined by the parameter J ** which is one of the STAT_GET_xxxx values defined above. ** +** The stat_get(P,J) function is not available to generic SQL. It is +** inserted as part of a manually constructed bytecode program. (See +** the callStatGet() routine below.) It is guaranteed that the P +** parameter will always be a poiner to a Stat4Accum object, never a +** NULL. +** ** If neither STAT3 nor STAT4 are enabled, then J is always ** STAT_GET_STAT1 and is hence omitted and this routine becomes ** a one-parameter function, stat_get(P), that always returns the @@ -97165,7 +98032,7 @@ static void initAvgEq(Index *pIdx){ } } - if( nDist100>nSum100 ){ + if( nDist100>nSum100 && sumEqp, sqlite3_column_blob(pStmt, 4), pSample->n); + if( pSample->n ){ + memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n); + } pIdx->nSample++; } rc = sqlite3_finalize(pStmt); @@ -97577,6 +98446,7 @@ static void attachFunc( rc = sqlite3BtreeOpen(pVfs, zPath, db, &aNew->pBt, 0, flags); sqlite3_free( zPath ); db->nDb++; + db->skipBtreeMutex = 0; if( rc==SQLITE_CONSTRAINT ){ rc = SQLITE_ERROR; zErrDyn = sqlite3MPrintf(db, "database is already attached"); @@ -97765,6 +98635,7 @@ static void codeAttach( sqlite3* db = pParse->db; int regArgs; + if( pParse->nErr ) goto attach_end; memset(&sName, 0, sizeof(NameContext)); sName.pParse = pParse; @@ -98318,10 +99189,10 @@ SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext *pContext){ ** codeTableLocks() functions. */ struct TableLock { - int iDb; /* The database containing the table to be locked */ - int iTab; /* The root page of the table to be locked */ - u8 isWriteLock; /* True for write lock. False for a read lock */ - const char *zName; /* Name of the table */ + int iDb; /* The database containing the table to be locked */ + int iTab; /* The root page of the table to be locked */ + u8 isWriteLock; /* True for write lock. False for a read lock */ + const char *zLockName; /* Name of the table */ }; /* @@ -98347,6 +99218,8 @@ SQLITE_PRIVATE void sqlite3TableLock( TableLock *p; assert( iDb>=0 ); + if( iDb==1 ) return; + if( !sqlite3BtreeSharable(pParse->db->aDb[iDb].pBt) ) return; for(i=0; inTableLock; i++){ p = &pToplevel->aTableLock[i]; if( p->iDb==iDb && p->iTab==iTab ){ @@ -98363,7 +99236,7 @@ SQLITE_PRIVATE void sqlite3TableLock( p->iDb = iDb; p->iTab = iTab; p->isWriteLock = isWriteLock; - p->zName = zName; + p->zLockName = zName; }else{ pToplevel->nTableLock = 0; sqlite3OomFault(pToplevel->db); @@ -98385,7 +99258,7 @@ static void codeTableLocks(Parse *pParse){ TableLock *p = &pParse->aTableLock[i]; int p1 = p->iDb; sqlite3VdbeAddOp4(pVdbe, OP_TableLock, p1, p->iTab, p->isWriteLock, - p->zName, P4_STATIC); + p->zLockName, P4_STATIC); } } #else @@ -98594,15 +99467,22 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha return 0; } #endif - for(i=OMIT_TEMPDB; inDb; i++){ - int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ - if( zDatabase==0 || sqlite3StrICmp(zDatabase, db->aDb[j].zDbSName)==0 ){ - assert( sqlite3SchemaMutexHeld(db, j, 0) ); - p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName); - if( p ) break; + while(1){ + for(i=OMIT_TEMPDB; inDb; i++){ + int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ + if( zDatabase==0 || sqlite3StrICmp(zDatabase, db->aDb[j].zDbSName)==0 ){ + assert( sqlite3SchemaMutexHeld(db, j, 0) ); + p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName); + if( p ) return p; + } } + /* Not found. If the name we were looking for was temp.sqlite_master + ** then change the name to sqlite_temp_master and try again. */ + if( sqlite3StrICmp(zName, MASTER_NAME)!=0 ) break; + if( sqlite3_stricmp(zDatabase, db->aDb[1].zDbSName)!=0 ) break; + zName = TEMP_MASTER_NAME; } - return p; + return 0; } /* @@ -98638,6 +99518,9 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( ** CREATE, then check to see if it is the name of an virtual table that ** can be an eponymous virtual table. */ Module *pMod = (Module*)sqlite3HashFind(&pParse->db->aModule, zName); + if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){ + pMod = sqlite3PragmaVtabRegister(pParse->db, zName); + } if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){ return pMod->pEpoTab; } @@ -98920,7 +99803,7 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ /* Do not delete the table until the reference count reaches zero. */ if( !pTable ) return; - if( ((!db || db->pnBytesFreed==0) && (--pTable->nRef)>0) ) return; + if( ((!db || db->pnBytesFreed==0) && (--pTable->nTabRef)>0) ) return; deleteTable(db, pTable); } @@ -98974,7 +99857,7 @@ SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ */ SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *p, int iDb){ Vdbe *v = sqlite3GetVdbe(p); - sqlite3TableLock(p, iDb, MASTER_ROOT, 1, SCHEMA_TABLE(iDb)); + sqlite3TableLock(p, iDb, MASTER_ROOT, 1, MASTER_NAME); sqlite3VdbeAddOp4Int(v, OP_OpenWrite, 0, MASTER_ROOT, iDb, 5); if( p->nTab==0 ){ p->nTab = 1; @@ -98992,7 +99875,10 @@ SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *db, const char *zName){ if( zName ){ Db *pDb; for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){ - if( 0==sqlite3StrICmp(pDb->zDbSName, zName) ) break; + if( 0==sqlite3_stricmp(pDb->zDbSName, zName) ) break; + /* "main" is always an acceptable alias for the primary database + ** even if it has been renamed using SQLITE_DBCONFIG_MAINDBNAME. */ + if( i==0 && 0==sqlite3_stricmp("main", zName) ) break; } } return i; @@ -99211,7 +100097,7 @@ SQLITE_PRIVATE void sqlite3StartTable( pTable->zName = zName; pTable->iPKey = -1; pTable->pSchema = db->aDb[iDb].pSchema; - pTable->nRef = 1; + pTable->nTabRef = 1; pTable->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); assert( pParse->pNewTable==0 ); pParse->pNewTable = pTable; @@ -100277,7 +101163,7 @@ SQLITE_PRIVATE void sqlite3EndTable( "UPDATE %Q.%s " "SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q " "WHERE rowid=#%d", - db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), + db->aDb[iDb].zDbSName, MASTER_NAME, zType, p->zName, p->zName, @@ -100614,7 +101500,7 @@ static void destroyRootPage(Parse *pParse, int iTable, int iDb){ */ sqlite3NestedParse(pParse, "UPDATE %Q.%s SET rootpage=%d WHERE #%d AND rootpage=#%d", - pParse->db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), iTable, r1, r1); + pParse->db->aDb[iDb].zDbSName, MASTER_NAME, iTable, r1, r1); #endif sqlite3ReleaseTempReg(pParse, r1); } @@ -100757,7 +101643,7 @@ SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, in */ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", - pDb->zDbSName, SCHEMA_TABLE(iDb), pTab->zName); + pDb->zDbSName, MASTER_NAME, pTab->zName); if( !isView && !IsVirtual(pTab) ){ destroyTable(pParse, pTab); } @@ -101104,7 +101990,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ } sqlite3VdbeAddOp3(v, OP_SorterData, iSorter, regRecord, iIdx); sqlite3VdbeAddOp3(v, OP_Last, iIdx, 0, -1); - sqlite3VdbeAddOp3(v, OP_IdxInsert, iIdx, regRecord, 0); + sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdx, regRecord); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2); VdbeCoverage(v); @@ -101649,7 +102535,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( */ sqlite3NestedParse(pParse, "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);", - db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), + db->aDb[iDb].zDbSName, MASTER_NAME, pIndex->zName, pTab->zName, iMem, @@ -101801,7 +102687,7 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists sqlite3BeginWriteOperation(pParse, 1, iDb); sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE name=%Q AND type='index'", - db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), pIndex->zName + db->aDb[iDb].zDbSName, MASTER_NAME, pIndex->zName ); sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName); sqlite3ChangeCookie(pParse, iDb); @@ -101944,7 +102830,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge( /* Allocate additional space if needed */ if( (u32)pSrc->nSrc+nExtra>pSrc->nAlloc ){ SrcList *pNew; - int nAlloc = pSrc->nSrc+nExtra; + int nAlloc = pSrc->nSrc*2+nExtra; int nGot; pNew = sqlite3DbRealloc(db, pSrc, sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); @@ -102022,9 +102908,12 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( pList = sqlite3DbMallocRawNN(db, sizeof(SrcList) ); if( pList==0 ) return 0; pList->nAlloc = 1; - pList->nSrc = 0; + pList->nSrc = 1; + memset(&pList->a[0], 0, sizeof(pList->a[0])); + pList->a[0].iCursor = -1; + }else{ + pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc); } - pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc); if( db->mallocFailed ){ sqlite3SrcListDelete(db, pList); return 0; @@ -103239,7 +104128,7 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ sqlite3DeleteTable(pParse->db, pItem->pTab); pItem->pTab = pTab; if( pTab ){ - pTab->nRef++; + pTab->nTabRef++; } if( sqlite3IndexedByLookup(pParse, pItem) ){ pTab = 0; @@ -103367,7 +104256,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( ** ); */ - pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); + pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0); if( pSelectRowid == 0 ) goto limit_where_cleanup; pEList = sqlite3ExprListAppend(pParse, 0, pSelectRowid); if( pEList == 0 ) goto limit_where_cleanup; @@ -103386,8 +104275,8 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( if( pSelect == 0 ) return 0; /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ - pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); - pInClause = pWhereRowid ? sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0, 0) : 0; + pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0); + pInClause = pWhereRowid ? sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0) : 0; sqlite3PExprAddSelect(pParse, pInClause, pSelect); return pInClause; @@ -103652,7 +104541,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( nKey = 0; /* Zero tells OP_Found to use a composite key */ sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey, sqlite3IndexAffinityStr(pParse->db, pPk), nPk); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iEphCur, iKey); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEphCur, iKey, iPk, nPk); }else{ /* Add the rowid of the row to be deleted to the RowSet */ nKey = 1; /* OP_Seek always uses a single rowid */ @@ -103698,7 +104587,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( } }else if( pPk ){ addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_RowKey, iEphCur, iKey); + sqlite3VdbeAddOp2(v, OP_RowData, iEphCur, iKey); assert( nKey==0 ); /* OP_Found will use a composite key */ }else{ addrLoop = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, 0, iKey); @@ -103722,12 +104611,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( #endif { int count = (pParse->nested==0); /* True to count changes */ - int iIdxNoSeek = -1; - if( bComplex==0 && aiCurOnePass[1]!=iDataCur ){ - iIdxNoSeek = aiCurOnePass[1]; - } sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, - iKey, nKey, count, OE_Default, eOnePass, iIdxNoSeek); + iKey, nKey, count, OE_Default, eOnePass, aiCurOnePass[1]); } /* End of the loop over all rowids/primary-keys. */ @@ -103741,14 +104626,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( sqlite3VdbeGoto(v, addrLoop); sqlite3VdbeJumpHere(v, addrLoop); } - - /* Close the cursors open on the table and its indexes. */ - if( !isView && !IsVirtual(pTab) ){ - if( !pPk ) sqlite3VdbeAddOp1(v, OP_Close, iDataCur); - for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ - sqlite3VdbeAddOp1(v, OP_Close, iIdxCur + i); - } - } } /* End non-truncate path */ /* Update the sqlite_sequence table by storing the content of the @@ -103815,15 +104692,17 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( ** ** If eMode is ONEPASS_MULTI, then this call is being made as part ** of a ONEPASS delete that affects multiple rows. In this case, if -** iIdxNoSeek is a valid cursor number (>=0), then its position should -** be preserved following the delete operation. Or, if iIdxNoSeek is not -** a valid cursor number, the position of iDataCur should be preserved -** instead. +** iIdxNoSeek is a valid cursor number (>=0) and is not the same as +** iDataCur, then its position should be preserved following the delete +** operation. Or, if iIdxNoSeek is not a valid cursor number, the +** position of iDataCur should be preserved instead. ** ** iIdxNoSeek: -** If iIdxNoSeek is a valid cursor number (>=0), then it identifies an -** index cursor (from within array of cursors starting at iIdxCur) that -** already points to the index entry to be deleted. +** If iIdxNoSeek is a valid cursor number (>=0) not equal to iDataCur, +** then it identifies an index cursor (from within array of cursors +** starting at iIdxCur) that already points to the index entry to be deleted. +** Except, this optimization is disabled if there are BEFORE triggers since +** the trigger body might have moved the cursor. */ SQLITE_PRIVATE void sqlite3GenerateRowDelete( Parse *pParse, /* Parsing context */ @@ -103894,13 +104773,18 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete( /* If any BEFORE triggers were coded, then seek the cursor to the ** row to be deleted again. It may be that the BEFORE triggers moved - ** the cursor or of already deleted the row that the cursor was + ** the cursor or already deleted the row that the cursor was ** pointing to. + ** + ** Also disable the iIdxNoSeek optimization since the BEFORE trigger + ** may have moved that cursor. */ if( addrStart=0 ); + iIdxNoSeek = -1; } /* Do FK processing. This call checks that any FK constraints that @@ -103923,11 +104807,13 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete( u8 p5 = 0; sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,iIdxNoSeek); sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0)); - sqlite3VdbeChangeP4(v, -1, (char*)pTab, P4_TABLE); + if( pParse->nested==0 ){ + sqlite3VdbeAppendP4(v, (char*)pTab, P4_TABLE); + } if( eMode!=ONEPASS_OFF ){ sqlite3VdbeChangeP5(v, OPFLAG_AUXDELETE); } - if( iIdxNoSeek>=0 ){ + if( iIdxNoSeek>=0 && iIdxNoSeek!=iDataCur ){ sqlite3VdbeAddOp1(v, OP_Delete, iIdxNoSeek); } if( eMode==ONEPASS_MULTI ) p5 |= OPFLAG_SAVEPOSITION; @@ -104081,6 +104967,10 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey( } if( regOut ){ sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut); + if( pIdx->pTable->pSelect ){ + const char *zAff = sqlite3IndexAffinityStr(pParse->db, pIdx); + sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT); + } } sqlite3ReleaseTempRange(pParse, regBase, nCol); return regBase; @@ -104302,23 +105192,28 @@ static void instrFunc( if( typeHaystack==SQLITE_NULL || typeNeedle==SQLITE_NULL ) return; nHaystack = sqlite3_value_bytes(argv[0]); nNeedle = sqlite3_value_bytes(argv[1]); - if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){ - zHaystack = sqlite3_value_blob(argv[0]); - zNeedle = sqlite3_value_blob(argv[1]); - isText = 0; - }else{ - zHaystack = sqlite3_value_text(argv[0]); - zNeedle = sqlite3_value_text(argv[1]); - isText = 1; - } - while( nNeedle<=nHaystack && memcmp(zHaystack, zNeedle, nNeedle)!=0 ){ - N++; - do{ - nHaystack--; - zHaystack++; - }while( isText && (zHaystack[0]&0xc0)==0x80 ); + if( nNeedle>0 ){ + if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){ + zHaystack = sqlite3_value_blob(argv[0]); + zNeedle = sqlite3_value_blob(argv[1]); + assert( zNeedle!=0 ); + assert( zHaystack!=0 || nHaystack==0 ); + isText = 0; + }else{ + zHaystack = sqlite3_value_text(argv[0]); + zNeedle = sqlite3_value_text(argv[1]); + isText = 1; + if( zHaystack==0 || zNeedle==0 ) return; + } + while( nNeedle<=nHaystack && memcmp(zHaystack, zNeedle, nNeedle)!=0 ){ + N++; + do{ + nHaystack--; + zHaystack++; + }while( isText && (zHaystack[0]&0xc0)==0x80 ); + } + if( nNeedle>nHaystack ) N = 0; } - if( nNeedle>nHaystack ) N = 0; sqlite3_result_int(context, N); } @@ -104698,9 +105593,19 @@ static const struct compareInfo likeInfoNorm = { '%', '_', 0, 1 }; static const struct compareInfo likeInfoAlt = { '%', '_', 0, 0 }; /* -** Compare two UTF-8 strings for equality where the first string can -** potentially be a "glob" or "like" expression. Return true (1) if they -** are the same and false (0) if they are different. +** Possible error returns from patternMatch() +*/ +#define SQLITE_MATCH 0 +#define SQLITE_NOMATCH 1 +#define SQLITE_NOWILDCARDMATCH 2 + +/* +** Compare two UTF-8 strings for equality where the first string is +** a GLOB or LIKE expression. Return values: +** +** SQLITE_MATCH: Match +** SQLITE_NOMATCH: No match +** SQLITE_NOWILDCARDMATCH: No match in spite of having * or % wildcards. ** ** Globbing rules: ** @@ -104751,30 +105656,31 @@ static int patternCompare( ** single character of the input string for each "?" skipped */ while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){ if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){ - return 0; + return SQLITE_NOWILDCARDMATCH; } } if( c==0 ){ - return 1; /* "*" at the end of the pattern matches */ + return SQLITE_MATCH; /* "*" at the end of the pattern matches */ }else if( c==matchOther ){ if( pInfo->matchSet==0 ){ c = sqlite3Utf8Read(&zPattern); - if( c==0 ) return 0; + if( c==0 ) return SQLITE_NOWILDCARDMATCH; }else{ /* "[...]" immediately follows the "*". We have to do a slow ** recursive search in this case, but it is an unusual case. */ assert( matchOther<0x80 ); /* '[' is a single-byte character */ - while( *zString - && patternCompare(&zPattern[-1],zString,pInfo,matchOther)==0 ){ + while( *zString ){ + int bMatch = patternCompare(&zPattern[-1],zString,pInfo,matchOther); + if( bMatch!=SQLITE_NOMATCH ) return bMatch; SQLITE_SKIP_UTF8(zString); } - return *zString!=0; + return SQLITE_NOWILDCARDMATCH; } } /* At this point variable c contains the first character of the ** pattern string past the "*". Search in the input string for the - ** first matching character and recursively contine the match from + ** first matching character and recursively continue the match from ** that point. ** ** For a case-insensitive search, set variable cx to be the same as @@ -104783,6 +105689,7 @@ static int patternCompare( */ if( c<=0x80 ){ u32 cx; + int bMatch; if( noCase ){ cx = sqlite3Toupper(c); c = sqlite3Tolower(c); @@ -104791,27 +105698,30 @@ static int patternCompare( } while( (c2 = *(zString++))!=0 ){ if( c2!=c && c2!=cx ) continue; - if( patternCompare(zPattern,zString,pInfo,matchOther) ) return 1; + bMatch = patternCompare(zPattern,zString,pInfo,matchOther); + if( bMatch!=SQLITE_NOMATCH ) return bMatch; } }else{ + int bMatch; while( (c2 = Utf8Read(zString))!=0 ){ if( c2!=c ) continue; - if( patternCompare(zPattern,zString,pInfo,matchOther) ) return 1; + bMatch = patternCompare(zPattern,zString,pInfo,matchOther); + if( bMatch!=SQLITE_NOMATCH ) return bMatch; } } - return 0; + return SQLITE_NOWILDCARDMATCH; } if( c==matchOther ){ if( pInfo->matchSet==0 ){ c = sqlite3Utf8Read(&zPattern); - if( c==0 ) return 0; + if( c==0 ) return SQLITE_NOMATCH; zEscaped = zPattern; }else{ u32 prior_c = 0; int seen = 0; int invert = 0; c = sqlite3Utf8Read(&zString); - if( c==0 ) return 0; + if( c==0 ) return SQLITE_NOMATCH; c2 = sqlite3Utf8Read(&zPattern); if( c2=='^' ){ invert = 1; @@ -104835,7 +105745,7 @@ static int patternCompare( c2 = sqlite3Utf8Read(&zPattern); } if( c2==0 || (seen ^ invert)==0 ){ - return 0; + return SQLITE_NOMATCH; } continue; } @@ -104846,23 +105756,25 @@ static int patternCompare( continue; } if( c==matchOne && zPattern!=zEscaped && c2!=0 ) continue; - return 0; + return SQLITE_NOMATCH; } - return *zString==0; + return *zString==0 ? SQLITE_MATCH : SQLITE_NOMATCH; } /* -** The sqlite3_strglob() interface. +** The sqlite3_strglob() interface. Return 0 on a match (like strcmp()) and +** non-zero if there is no match. */ SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){ - return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '[')==0; + return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '['); } /* -** The sqlite3_strlike() interface. +** The sqlite3_strlike() interface. Return 0 on a match and non-zero for +** a miss - like strcmp(). */ SQLITE_API int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){ - return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc)==0; + return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc); } /* @@ -104943,7 +105855,7 @@ static void likeFunc( #ifdef SQLITE_TEST sqlite3_like_count++; #endif - sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)); + sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)==SQLITE_MATCH); } } @@ -105714,7 +106626,7 @@ static void groupConcatStep( zSep = ","; nSep = 1; } - if( nSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep); + if( zSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep); } zVal = (char*)sqlite3_value_text(argv[0]); nVal = sqlite3_value_bytes(argv[0]); @@ -105855,6 +106767,9 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ FUNCTION2(unlikely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), FUNCTION2(likelihood, 2, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), FUNCTION2(likely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), +#ifdef SQLITE_DEBUG + FUNCTION2(affinity, 1, 0, 0, noopFunc, SQLITE_FUNC_AFFINITY), +#endif FUNCTION(ltrim, 1, 1, 0, trimFunc ), FUNCTION(ltrim, 2, 1, 0, trimFunc ), FUNCTION(rtrim, 1, 2, 0, trimFunc ), @@ -106177,7 +107092,7 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex( } for(pIdx=pParent->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) ){ + if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) && pIdx->pPartIdxWhere==0 ){ /* pIdx is a UNIQUE index (or a PRIMARY KEY) and has the right number ** of columns. If each indexed column corresponds to a foreign key ** column of pFKey, then this index is a winner. */ @@ -106536,7 +107451,7 @@ static void fkScanChildren( assert( iCol>=0 ); zCol = pFKey->pFrom->aCol[iCol].zName; pRight = sqlite3Expr(db, TK_ID, zCol); - pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0); + pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight); pWhere = sqlite3ExprAnd(db, pWhere, pEq); } @@ -106558,7 +107473,7 @@ static void fkScanChildren( if( HasRowid(pTab) ){ pLeft = exprTableRegister(pParse, pTab, regData, -1); pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, -1); - pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight, 0); + pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight); }else{ Expr *pEq, *pAll = 0; Index *pPk = sqlite3PrimaryKeyIndex(pTab); @@ -106568,10 +107483,10 @@ static void fkScanChildren( assert( iCol>=0 ); pLeft = exprTableRegister(pParse, pTab, regData, iCol); pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, iCol); - pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0); + pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight); pAll = sqlite3ExprAnd(db, pAll, pEq); } - pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0, 0); + pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0); } pWhere = sqlite3ExprAnd(db, pWhere, pNe); } @@ -106959,7 +107874,7 @@ SQLITE_PRIVATE void sqlite3FkCheck( struct SrcList_item *pItem = pSrc->a; pItem->pTab = pFKey->pFrom; pItem->zName = pFKey->pFrom->zName; - pItem->pTab->nRef++; + pItem->pTab->nTabRef++; pItem->iCursor = pParse->nTab++; if( regNew!=0 ){ @@ -107157,10 +108072,9 @@ static Trigger *fkActionTrigger( pEq = sqlite3PExpr(pParse, TK_EQ, sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tOld, 0), - sqlite3ExprAlloc(db, TK_ID, &tToCol, 0) - , 0), + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)), sqlite3ExprAlloc(db, TK_ID, &tFromCol, 0) - , 0); + ); pWhere = sqlite3ExprAnd(db, pWhere, pEq); /* For ON UPDATE, construct the next term of the WHEN clause. @@ -107172,13 +108086,11 @@ static Trigger *fkActionTrigger( pEq = sqlite3PExpr(pParse, TK_IS, sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tOld, 0), - sqlite3ExprAlloc(db, TK_ID, &tToCol, 0), - 0), + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)), sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tNew, 0), - sqlite3ExprAlloc(db, TK_ID, &tToCol, 0), - 0), - 0); + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)) + ); pWhen = sqlite3ExprAnd(db, pWhen, pEq); } @@ -107187,8 +108099,7 @@ static Trigger *fkActionTrigger( if( action==OE_Cascade ){ pNew = sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tNew, 0), - sqlite3ExprAlloc(db, TK_ID, &tToCol, 0) - , 0); + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)); }else if( action==OE_SetDflt ){ Expr *pDflt = pFKey->pFrom->aCol[iFromCol].pDflt; if( pDflt ){ @@ -107244,7 +108155,7 @@ static Trigger *fkActionTrigger( pStep->pExprList = sqlite3ExprListDup(db, pList, EXPRDUP_REDUCE); pStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); if( pWhen ){ - pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0, 0); + pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0); pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); } } @@ -107848,7 +108759,7 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3 *db; /* The main database structure */ Table *pTab; /* The table to insert into. aka TABLE */ char *zTab; /* Name of the table into which we are inserting */ - int i, j, idx; /* Loop counters */ + int i, j; /* Loop counters */ Vdbe *v; /* Generate code into this virtual machine */ Index *pIdx; /* For looping over indices of the table */ int nColumn; /* Number of columns in the data */ @@ -108155,8 +109066,10 @@ SQLITE_PRIVATE void sqlite3Insert( if( aRegIdx==0 ){ goto insert_cleanup; } - for(i=0; ipIndex; ipNext, i++){ + assert( pIdx ); aRegIdx[i] = ++pParse->nMem; + pParse->nMem += pIdx->nColumn; } } @@ -108358,12 +109271,26 @@ SQLITE_PRIVATE void sqlite3Insert( #endif { int isReplace; /* Set to true if constraints may cause a replace */ + int bUseSeek; /* True to use OPFLAG_SEEKRESULT */ sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace, 0 ); sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0); + + /* Set the OPFLAG_USESEEKRESULT flag if either (a) there are no REPLACE + ** constraints or (b) there are no triggers and this table is not a + ** parent table in a foreign key constraint. It is safe to set the + ** flag in the second case as if any REPLACE constraint is hit, an + ** OP_Delete or OP_IdxDelete instruction will be executed on each + ** cursor that is disturbed. And these instructions both clear the + ** VdbeCursor.seekResult variable, disabling the OPFLAG_USESEEKRESULT + ** functionality. */ + bUseSeek = (isReplace==0 || (pTrigger==0 && + ((db->flags & SQLITE_ForeignKeys)==0 || sqlite3FkReferences(pTab)==0) + )); sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur, - regIns, aRegIdx, 0, appendFlag, isReplace==0); + regIns, aRegIdx, 0, appendFlag, bUseSeek + ); } } @@ -108392,14 +109319,6 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3VdbeJumpHere(v, addrInsTop); } - if( !IsVirtual(pTab) && !isView ){ - /* Close all tables opened */ - if( iDataCurpIndex; pIdx; pIdx=pIdx->pNext, idx++){ - sqlite3VdbeAddOp1(v, OP_Close, idx+iIdxCur); - } - } - insert_end: /* Update the sqlite_sequence table by storing the content of the ** maximum rowid counter values recorded while inserting into @@ -108606,7 +109525,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int ipkBottom = 0; /* Bottom of the rowid change constraint check */ u8 isUpdate; /* True if this is an UPDATE operation */ u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */ - int regRowid = -1; /* Register holding ROWID value */ isUpdate = regOldData!=0; db = pParse->db; @@ -108661,8 +109579,9 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( case OE_Fail: { char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName, pTab->aCol[i].zName); - sqlite3VdbeAddOp4(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, - regNewData+1+i, zMsg, P4_DYNAMIC); + sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, + regNewData+1+i); + sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC); sqlite3VdbeChangeP5(v, P5_ConstraintNotNull); VdbeCoverage(v); break; @@ -108726,7 +109645,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } if( isUpdate ){ - /* pkChng!=0 does not mean that the rowid has change, only that + /* pkChng!=0 does not mean that the rowid has changed, only that ** it might have changed. Skip the conflict logic below if the rowid ** is unchanged. */ sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRowidOk, regOldData); @@ -108804,7 +109723,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** OP_Insert replace the existing entry than it is to delete the ** existing entry and then insert a new one. */ sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, OPFLAG_ISNOOP); - sqlite3VdbeChangeP4(v, -1, (char *)pTab, P4_TABLE); + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ if( pTab->pIndex ){ @@ -108861,7 +109780,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( /* Create a record for this index entry as it should appear after ** the insert or update. Store that record in the aRegIdx[ix] register */ - regIdx = sqlite3GetTempRange(pParse, pIdx->nColumn); + regIdx = aRegIdx[ix]+1; for(i=0; inColumn; i++){ int iField = pIdx->aiColumn[i]; int x; @@ -108872,9 +109791,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( VdbeComment((v, "%s column %d", pIdx->zName, i)); }else{ if( iField==XN_ROWID || iField==pTab->iPKey ){ - if( regRowid==regIdx+i ) continue; /* ROWID already in regIdx+i */ x = regNewData; - regRowid = pIdx->pPartIdxWhere ? -1 : regIdx+i; }else{ x = iField + regNewData + 1; } @@ -108884,7 +109801,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]); VdbeComment((v, "for %s", pIdx->zName)); - sqlite3ExprCacheAffinityChange(pParse, regIdx, pIdx->nColumn); /* In an UPDATE operation, if this index is the PRIMARY KEY index ** of a WITHOUT ROWID table and there has been no change the @@ -108898,7 +109814,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( /* Find out what action to take in case there is a uniqueness conflict */ onError = pIdx->onError; if( onError==OE_None ){ - sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn); sqlite3VdbeResolveLabel(v, addrUniqueOk); continue; /* pIdx is not a UNIQUE index */ } @@ -108907,7 +109822,26 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( }else if( onError==OE_Default ){ onError = OE_Abort; } - + + /* Collision detection may be omitted if all of the following are true: + ** (1) The conflict resolution algorithm is REPLACE + ** (2) The table is a WITHOUT ROWID table + ** (3) There are no secondary indexes on the table + ** (4) No delete triggers need to be fired if there is a conflict + ** (5) No FK constraint counters need to be updated if a conflict occurs. + */ + if( (ix==0 && pIdx->pNext==0) /* Condition 3 */ + && pPk==pIdx /* Condition 2 */ + && onError==OE_Replace /* Condition 1 */ + && ( 0==(db->flags&SQLITE_RecTriggers) || /* Condition 4 */ + 0==sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0)) + && ( 0==(db->flags&SQLITE_ForeignKeys) || /* Condition 5 */ + (0==pTab->pFKey && 0==sqlite3FkReferences(pTab))) + ){ + sqlite3VdbeResolveLabel(v, addrUniqueOk); + continue; + } + /* Check to see if the new index entry will be unique */ sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk, regIdx, pIdx->nKeyCol); VdbeCoverage(v); @@ -108991,13 +109925,12 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, regR, nPkField, 0, OE_Replace, - (pIdx==pPk ? ONEPASS_SINGLE : ONEPASS_OFF), -1); + (pIdx==pPk ? ONEPASS_SINGLE : ONEPASS_OFF), iThisCur); seenReplace = 1; break; } } sqlite3VdbeResolveLabel(v, addrUniqueOk); - sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn); if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField); } if( ipkTop ){ @@ -109009,6 +109942,25 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( VdbeModuleComment((v, "END: GenCnstCks(%d)", seenReplace)); } +#ifdef SQLITE_ENABLE_NULL_TRIM +/* +** Change the P5 operand on the last opcode (which should be an OP_MakeRecord) +** to be the number of columns in table pTab that must not be NULL-trimmed. +** +** Or if no columns of pTab may be NULL-trimmed, leave P5 at zero. +*/ +SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe *v, Table *pTab){ + u16 i; + + /* Records with omitted columns are only allowed for schema format + ** version 2 and later (SQLite version 3.1.4, 2005-02-20). */ + if( pTab->pSchema->file_format<2 ) return; + + for(i=pTab->nCol; i>1 && pTab->aCol[i-1].pDflt==0; i--){} + sqlite3VdbeChangeP5(v, i); +} +#endif + /* ** This routine generates code to finish the INSERT or UPDATE operation ** that was started by a prior call to sqlite3GenerateConstraintChecks. @@ -109025,7 +109977,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( int iIdxCur, /* First index cursor */ int regNewData, /* Range of content */ int *aRegIdx, /* Register used by each index. 0 for unused indices */ - int isUpdate, /* True for UPDATE, False for INSERT */ + int update_flags, /* True for UPDATE, False for INSERT */ int appendBias, /* True if this is likely to be an append */ int useSeekResult /* True to set the USESEEKRESULT flag on OP_[Idx]Insert */ ){ @@ -109037,6 +109989,11 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( int i; /* Loop counter */ u8 bAffinityDone = 0; /* True if OP_Affinity has been run already */ + assert( update_flags==0 + || update_flags==OPFLAG_ISUPDATE + || update_flags==(OPFLAG_ISUPDATE|OPFLAG_SAVEPOSITION) + ); + v = sqlite3GetVdbe(pParse); assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ @@ -109047,26 +110004,39 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( sqlite3VdbeAddOp2(v, OP_IsNull, aRegIdx[i], sqlite3VdbeCurrentAddr(v)+2); VdbeCoverage(v); } - sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i]); - pik_flags = 0; - if( useSeekResult ) pik_flags = OPFLAG_USESEEKRESULT; + pik_flags = (useSeekResult ? OPFLAG_USESEEKRESULT : 0); if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ assert( pParse->nested==0 ); pik_flags |= OPFLAG_NCHANGE; + pik_flags |= (update_flags & OPFLAG_SAVEPOSITION); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( update_flags==0 ){ + sqlite3VdbeAddOp4(v, OP_InsertInt, + iIdxCur+i, aRegIdx[i], 0, (char*)pTab, P4_TABLE + ); + sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP); + } +#endif } + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i], + aRegIdx[i]+1, + pIdx->uniqNotNull ? pIdx->nKeyCol: pIdx->nColumn); sqlite3VdbeChangeP5(v, pik_flags); } if( !HasRowid(pTab) ) return; regData = regNewData + 1; regRec = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_MakeRecord, regData, pTab->nCol, regRec); - if( !bAffinityDone ) sqlite3TableAffinity(v, pTab, 0); - sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol); + sqlite3SetMakeRecordP5(v, pTab); + if( !bAffinityDone ){ + sqlite3TableAffinity(v, pTab, 0); + sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol); + } if( pParse->nested ){ pik_flags = 0; }else{ pik_flags = OPFLAG_NCHANGE; - pik_flags |= (isUpdate?OPFLAG_ISUPDATE:OPFLAG_LASTROWID); + pik_flags |= (update_flags?update_flags:OPFLAG_LASTROWID); } if( appendBias ){ pik_flags |= OPFLAG_APPEND; @@ -109076,7 +110046,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( } sqlite3VdbeAddOp3(v, OP_Insert, iDataCur, regRec, regNewData); if( !pParse->nested ){ - sqlite3VdbeChangeP4(v, -1, (char *)pTab, P4_TABLE); + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); } sqlite3VdbeChangeP5(v, pik_flags); } @@ -109459,6 +110429,7 @@ static int xferOptimization( sqlite3VdbeJumpHere(v, addr1); } if( HasRowid(pSrc) ){ + u8 insFlags; sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead); emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); if( pDest->iPKey>=0 ){ @@ -109474,10 +110445,17 @@ static int xferOptimization( addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); assert( (pDest->tabFlags & TF_Autoincrement)==0 ); } - sqlite3VdbeAddOp2(v, OP_RowData, iSrc, regData); + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + if( db->flags & SQLITE_Vacuum ){ + sqlite3VdbeAddOp3(v, OP_Last, iDest, 0, -1); + insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID| + OPFLAG_APPEND|OPFLAG_USESEEKRESULT; + }else{ + insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND; + } sqlite3VdbeAddOp4(v, OP_Insert, iDest, regData, regRowid, (char*)pDest, P4_TABLE); - sqlite3VdbeChangeP5(v, OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND); + sqlite3VdbeChangeP5(v, insFlags); sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); @@ -109499,7 +110477,7 @@ static int xferOptimization( sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR); VdbeComment((v, "%s", pDestIdx->zName)); addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_RowKey, iSrc, regData); + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); if( db->flags & SQLITE_Vacuum ){ /* This INSERT command is part of a VACUUM operation, which guarantees ** that the destination table is empty. If all indexed columns use @@ -109529,8 +110507,8 @@ static int xferOptimization( if( !HasRowid(pSrc) && pDestIdx->idxType==2 ){ idxInsFlags |= OPFLAG_NCHANGE; } - sqlite3VdbeAddOp3(v, OP_IdxInsert, iDest, regData, 1); - sqlite3VdbeChangeP5(v, idxInsFlags); + sqlite3VdbeAddOp2(v, OP_IdxInsert, iDest, regData); + sqlite3VdbeChangeP5(v, idxInsFlags|OPFLAG_APPEND); sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addr1); sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); @@ -110284,7 +111262,6 @@ typedef int (*sqlite3_loadext_entry)( /************** End of sqlite3ext.h ******************************************/ /************** Continuing where we left off in loadext.c ********************/ /* #include "sqliteInt.h" */ -/* #include */ #ifndef SQLITE_OMIT_LOAD_EXTENSION /* @@ -111093,6 +112070,8 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ ** ../tool/mkpragmatab.tcl. To update the set of pragmas, edit ** that script and rerun it. */ + +/* The various pragma types */ #define PragTyp_HEADER_VALUE 0 #define PragTyp_AUTO_VACUUM 1 #define PragTyp_FLAG 2 @@ -111136,419 +112115,560 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ #define PragTyp_REKEY 40 #define PragTyp_LOCK_STATUS 41 #define PragTyp_PARSER_TRACE 42 -#define PragFlag_NeedSchema 0x01 -#define PragFlag_ReadOnly 0x02 -static const struct sPragmaNames { - const char *const zName; /* Name of pragma */ - u8 ePragTyp; /* PragTyp_XXX value */ - u8 mPragFlag; /* Zero or more PragFlag_XXX values */ - u32 iArg; /* Extra argument */ -} aPragmaNames[] = { + +/* Property flags associated with various pragma. */ +#define PragFlg_NeedSchema 0x01 /* Force schema load before running */ +#define PragFlg_NoColumns 0x02 /* OP_ResultRow called with zero columns */ +#define PragFlg_NoColumns1 0x04 /* zero columns if RHS argument is present */ +#define PragFlg_ReadOnly 0x08 /* Read-only HEADER_VALUE */ +#define PragFlg_Result0 0x10 /* Acts as query when no argument */ +#define PragFlg_Result1 0x20 /* Acts as query when has one argument */ +#define PragFlg_SchemaOpt 0x40 /* Schema restricts name search if present */ +#define PragFlg_SchemaReq 0x80 /* Schema required - "main" is default */ + +/* Names of columns for pragmas that return multi-column result +** or that return single-column results where the name of the +** result column is different from the name of the pragma +*/ +static const char *const pragCName[] = { + /* 0 */ "cache_size", /* Used by: default_cache_size */ + /* 1 */ "cid", /* Used by: table_info */ + /* 2 */ "name", + /* 3 */ "type", + /* 4 */ "notnull", + /* 5 */ "dflt_value", + /* 6 */ "pk", + /* 7 */ "table", /* Used by: stats */ + /* 8 */ "index", + /* 9 */ "width", + /* 10 */ "height", + /* 11 */ "seqno", /* Used by: index_info */ + /* 12 */ "cid", + /* 13 */ "name", + /* 14 */ "seqno", /* Used by: index_xinfo */ + /* 15 */ "cid", + /* 16 */ "name", + /* 17 */ "desc", + /* 18 */ "coll", + /* 19 */ "key", + /* 20 */ "seq", /* Used by: index_list */ + /* 21 */ "name", + /* 22 */ "unique", + /* 23 */ "origin", + /* 24 */ "partial", + /* 25 */ "seq", /* Used by: database_list */ + /* 26 */ "name", + /* 27 */ "file", + /* 28 */ "seq", /* Used by: collation_list */ + /* 29 */ "name", + /* 30 */ "id", /* Used by: foreign_key_list */ + /* 31 */ "seq", + /* 32 */ "table", + /* 33 */ "from", + /* 34 */ "to", + /* 35 */ "on_update", + /* 36 */ "on_delete", + /* 37 */ "match", + /* 38 */ "table", /* Used by: foreign_key_check */ + /* 39 */ "rowid", + /* 40 */ "parent", + /* 41 */ "fkid", + /* 42 */ "busy", /* Used by: wal_checkpoint */ + /* 43 */ "log", + /* 44 */ "checkpointed", + /* 45 */ "timeout", /* Used by: busy_timeout */ + /* 46 */ "database", /* Used by: lock_status */ + /* 47 */ "status", +}; + +/* Definitions of all built-in pragmas */ +typedef struct PragmaName { + const char *const zName; /* Name of pragma */ + u8 ePragTyp; /* PragTyp_XXX value */ + u8 mPragFlg; /* Zero or more PragFlg_XXX values */ + u8 iPragCName; /* Start of column names in pragCName[] */ + u8 nPragCName; /* Num of col names. 0 means use pragma name */ + u32 iArg; /* Extra argument */ +} PragmaName; +static const PragmaName aPragmaName[] = { #if defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD) - { /* zName: */ "activate_extensions", - /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "activate_extensions", + /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "application_id", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ 0, - /* iArg: */ BTREE_APPLICATION_ID }, + {/* zName: */ "application_id", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_APPLICATION_ID }, #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) - { /* zName: */ "auto_vacuum", - /* ePragTyp: */ PragTyp_AUTO_VACUUM, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "auto_vacuum", + /* ePragTyp: */ PragTyp_AUTO_VACUUM, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_AUTOMATIC_INDEX) - { /* zName: */ "automatic_index", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_AutoIndex }, -#endif -#endif - { /* zName: */ "busy_timeout", - /* ePragTyp: */ PragTyp_BUSY_TIMEOUT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "automatic_index", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_AutoIndex }, +#endif +#endif + {/* zName: */ "busy_timeout", + /* ePragTyp: */ PragTyp_BUSY_TIMEOUT, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 45, 1, + /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "cache_size", - /* ePragTyp: */ PragTyp_CACHE_SIZE, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "cache_size", + /* ePragTyp: */ PragTyp_CACHE_SIZE, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "cache_spill", - /* ePragTyp: */ PragTyp_CACHE_SPILL, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif - { /* zName: */ "case_sensitive_like", - /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "cell_size_check", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_CellSizeCk }, + {/* zName: */ "cache_spill", + /* ePragTyp: */ PragTyp_CACHE_SPILL, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif + {/* zName: */ "case_sensitive_like", + /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE, + /* ePragFlg: */ PragFlg_NoColumns, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "cell_size_check", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_CellSizeCk }, #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "checkpoint_fullfsync", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_CkptFullFSync }, + {/* zName: */ "checkpoint_fullfsync", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_CkptFullFSync }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "collation_list", - /* ePragTyp: */ PragTyp_COLLATION_LIST, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "collation_list", + /* ePragTyp: */ PragTyp_COLLATION_LIST, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 28, 2, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) - { /* zName: */ "compile_options", - /* ePragTyp: */ PragTyp_COMPILE_OPTIONS, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "compile_options", + /* ePragTyp: */ PragTyp_COMPILE_OPTIONS, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "count_changes", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_CountRows }, + {/* zName: */ "count_changes", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_CountRows }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN - { /* zName: */ "data_store_directory", - /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "data_store_directory", + /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY, + /* ePragFlg: */ PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "data_version", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ PragFlag_ReadOnly, - /* iArg: */ BTREE_DATA_VERSION }, + {/* zName: */ "data_version", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_DATA_VERSION }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "database_list", - /* ePragTyp: */ PragTyp_DATABASE_LIST, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "database_list", + /* ePragTyp: */ PragTyp_DATABASE_LIST, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0, + /* ColNames: */ 25, 3, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) - { /* zName: */ "default_cache_size", - /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "default_cache_size", + /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 1, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - { /* zName: */ "defer_foreign_keys", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_DeferFKs }, + {/* zName: */ "defer_foreign_keys", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_DeferFKs }, #endif #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "empty_result_callbacks", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_NullCallback }, + {/* zName: */ "empty_result_callbacks", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_NullCallback }, #endif #if !defined(SQLITE_OMIT_UTF16) - { /* zName: */ "encoding", - /* ePragTyp: */ PragTyp_ENCODING, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "encoding", + /* ePragTyp: */ PragTyp_ENCODING, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - { /* zName: */ "foreign_key_check", - /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "foreign_key_check", + /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 38, 4, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FOREIGN_KEY) - { /* zName: */ "foreign_key_list", - /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "foreign_key_list", + /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 30, 8, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - { /* zName: */ "foreign_keys", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ForeignKeys }, + {/* zName: */ "foreign_keys", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ForeignKeys }, #endif #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "freelist_count", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ PragFlag_ReadOnly, - /* iArg: */ BTREE_FREE_PAGE_COUNT }, + {/* zName: */ "freelist_count", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_FREE_PAGE_COUNT }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "full_column_names", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_FullColNames }, - { /* zName: */ "fullfsync", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_FullFSync }, + {/* zName: */ "full_column_names", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_FullColNames }, + {/* zName: */ "fullfsync", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_FullFSync }, #endif #if defined(SQLITE_HAS_CODEC) - { /* zName: */ "hexkey", - /* ePragTyp: */ PragTyp_HEXKEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "hexrekey", - /* ePragTyp: */ PragTyp_HEXKEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "hexkey", + /* ePragTyp: */ PragTyp_HEXKEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "hexrekey", + /* ePragTyp: */ PragTyp_HEXKEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_CHECK) - { /* zName: */ "ignore_check_constraints", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_IgnoreChecks }, + {/* zName: */ "ignore_check_constraints", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_IgnoreChecks }, #endif #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) - { /* zName: */ "incremental_vacuum", - /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "incremental_vacuum", + /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_NoColumns, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "index_info", - /* ePragTyp: */ PragTyp_INDEX_INFO, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "index_list", - /* ePragTyp: */ PragTyp_INDEX_LIST, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "index_xinfo", - /* ePragTyp: */ PragTyp_INDEX_INFO, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 1 }, + {/* zName: */ "index_info", + /* ePragTyp: */ PragTyp_INDEX_INFO, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 11, 3, + /* iArg: */ 0 }, + {/* zName: */ "index_list", + /* ePragTyp: */ PragTyp_INDEX_LIST, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 20, 5, + /* iArg: */ 0 }, + {/* zName: */ "index_xinfo", + /* ePragTyp: */ PragTyp_INDEX_INFO, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 14, 6, + /* iArg: */ 1 }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) - { /* zName: */ "integrity_check", - /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "integrity_check", + /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "journal_mode", - /* ePragTyp: */ PragTyp_JOURNAL_MODE, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "journal_size_limit", - /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "journal_mode", + /* ePragTyp: */ PragTyp_JOURNAL_MODE, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "journal_size_limit", + /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if defined(SQLITE_HAS_CODEC) - { /* zName: */ "key", - /* ePragTyp: */ PragTyp_KEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "key", + /* ePragTyp: */ PragTyp_KEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "legacy_file_format", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_LegacyFileFmt }, + {/* zName: */ "legacy_file_format", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_LegacyFileFmt }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE - { /* zName: */ "lock_proxy_file", - /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "lock_proxy_file", + /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE, + /* ePragFlg: */ PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - { /* zName: */ "lock_status", - /* ePragTyp: */ PragTyp_LOCK_STATUS, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "lock_status", + /* ePragTyp: */ PragTyp_LOCK_STATUS, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 46, 2, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "locking_mode", - /* ePragTyp: */ PragTyp_LOCKING_MODE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "max_page_count", - /* ePragTyp: */ PragTyp_PAGE_COUNT, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "mmap_size", - /* ePragTyp: */ PragTyp_MMAP_SIZE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "page_count", - /* ePragTyp: */ PragTyp_PAGE_COUNT, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "page_size", - /* ePragTyp: */ PragTyp_PAGE_SIZE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "locking_mode", + /* ePragTyp: */ PragTyp_LOCKING_MODE, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "max_page_count", + /* ePragTyp: */ PragTyp_PAGE_COUNT, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "mmap_size", + /* ePragTyp: */ PragTyp_MMAP_SIZE, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "page_count", + /* ePragTyp: */ PragTyp_PAGE_COUNT, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "page_size", + /* ePragTyp: */ PragTyp_PAGE_SIZE, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_PARSER_TRACE) - { /* zName: */ "parser_trace", - /* ePragTyp: */ PragTyp_PARSER_TRACE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "parser_trace", + /* ePragTyp: */ PragTyp_PARSER_TRACE, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "query_only", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_QueryOnly }, + {/* zName: */ "query_only", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_QueryOnly }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) - { /* zName: */ "quick_check", - /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "quick_check", + /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "read_uncommitted", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ReadUncommitted }, - { /* zName: */ "recursive_triggers", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_RecTriggers }, + {/* zName: */ "read_uncommitted", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ReadUncommitted }, + {/* zName: */ "recursive_triggers", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_RecTriggers }, #endif #if defined(SQLITE_HAS_CODEC) - { /* zName: */ "rekey", - /* ePragTyp: */ PragTyp_REKEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "rekey", + /* ePragTyp: */ PragTyp_REKEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "reverse_unordered_selects", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ReverseOrder }, + {/* zName: */ "reverse_unordered_selects", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ReverseOrder }, #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "schema_version", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ 0, - /* iArg: */ BTREE_SCHEMA_VERSION }, + {/* zName: */ "schema_version", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_SCHEMA_VERSION }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "secure_delete", - /* ePragTyp: */ PragTyp_SECURE_DELETE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "secure_delete", + /* ePragTyp: */ PragTyp_SECURE_DELETE, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "short_column_names", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ShortColNames }, -#endif - { /* zName: */ "shrink_memory", - /* ePragTyp: */ PragTyp_SHRINK_MEMORY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "soft_heap_limit", - /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "short_column_names", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ShortColNames }, +#endif + {/* zName: */ "shrink_memory", + /* ePragTyp: */ PragTyp_SHRINK_MEMORY, + /* ePragFlg: */ PragFlg_NoColumns, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "soft_heap_limit", + /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if defined(SQLITE_DEBUG) - { /* zName: */ "sql_trace", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_SqlTrace }, + {/* zName: */ "sql_trace", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_SqlTrace }, #endif #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "stats", - /* ePragTyp: */ PragTyp_STATS, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "stats", + /* ePragTyp: */ PragTyp_STATS, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 7, 4, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "synchronous", - /* ePragTyp: */ PragTyp_SYNCHRONOUS, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "synchronous", + /* ePragTyp: */ PragTyp_SYNCHRONOUS, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "table_info", - /* ePragTyp: */ PragTyp_TABLE_INFO, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "table_info", + /* ePragTyp: */ PragTyp_TABLE_INFO, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 1, 6, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "temp_store", - /* ePragTyp: */ PragTyp_TEMP_STORE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "temp_store_directory", - /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif - { /* zName: */ "threads", - /* ePragTyp: */ PragTyp_THREADS, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, + {/* zName: */ "temp_store", + /* ePragTyp: */ PragTyp_TEMP_STORE, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "temp_store_directory", + /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY, + /* ePragFlg: */ PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif + {/* zName: */ "threads", + /* ePragTyp: */ PragTyp_THREADS, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "user_version", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ 0, - /* iArg: */ BTREE_USER_VERSION }, + {/* zName: */ "user_version", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_USER_VERSION }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if defined(SQLITE_DEBUG) - { /* zName: */ "vdbe_addoptrace", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeAddopTrace }, - { /* zName: */ "vdbe_debug", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace }, - { /* zName: */ "vdbe_eqp", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeEQP }, - { /* zName: */ "vdbe_listing", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeListing }, - { /* zName: */ "vdbe_trace", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeTrace }, + {/* zName: */ "vdbe_addoptrace", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeAddopTrace }, + {/* zName: */ "vdbe_debug", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace }, + {/* zName: */ "vdbe_eqp", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeEQP }, + {/* zName: */ "vdbe_listing", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeListing }, + {/* zName: */ "vdbe_trace", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeTrace }, #endif #endif #if !defined(SQLITE_OMIT_WAL) - { /* zName: */ "wal_autocheckpoint", - /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "wal_checkpoint", - /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, + {/* zName: */ "wal_autocheckpoint", + /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "wal_checkpoint", + /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 42, 3, + /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "writable_schema", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode }, + {/* zName: */ "writable_schema", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode }, #endif }; /* Number of pragmas: 60 on by default, 73 total. */ @@ -111689,29 +112809,29 @@ static int changeTempStorage(Parse *pParse, const char *zStorageType){ #endif /* SQLITE_PAGER_PRAGMAS */ /* -** Set the names of the first N columns to the values in azCol[] +** Set result column names for a pragma. */ -static void setAllColumnNames( - Vdbe *v, /* The query under construction */ - int N, /* Number of columns */ - const char **azCol /* Names of columns */ +static void setPragmaResultColumnNames( + Vdbe *v, /* The query under construction */ + const PragmaName *pPragma /* The pragma */ ){ - int i; - sqlite3VdbeSetNumCols(v, N); - for(i=0; inPragCName; + sqlite3VdbeSetNumCols(v, n==0 ? 1 : n); + if( n==0 ){ + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, pPragma->zName, SQLITE_STATIC); + }else{ + int i, j; + for(i=0, j=pPragma->iPragCName; iupr ? 0 : &aPragmaName[mid]; +} + /* ** Process a pragma statement. ** @@ -111831,12 +112969,11 @@ SQLITE_PRIVATE void sqlite3Pragma( Token *pId; /* Pointer to token */ char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */ int iDb; /* Database index for */ - int lwr, upr, mid = 0; /* Binary search bounds */ int rc; /* return value form SQLITE_FCNTL_PRAGMA */ sqlite3 *db = pParse->db; /* The database connection */ Db *pDb; /* The specific database being pragmaed */ Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */ - const struct sPragmaNames *pPragma; + const PragmaName *pPragma; /* The pragma */ if( v==0 ) return; sqlite3VdbeRunOnlyOnce(v); @@ -111891,7 +113028,9 @@ SQLITE_PRIVATE void sqlite3Pragma( db->busyHandler.nBusy = 0; rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl); if( rc==SQLITE_OK ){ - returnSingleText(v, "result", aFcntl[0]); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, aFcntl[0], SQLITE_TRANSIENT); + returnSingleText(v, aFcntl[0]); sqlite3_free(aFcntl[0]); goto pragma_out; } @@ -111906,26 +113045,21 @@ SQLITE_PRIVATE void sqlite3Pragma( } /* Locate the pragma in the lookup table */ - lwr = 0; - upr = ArraySize(aPragmaNames)-1; - while( lwr<=upr ){ - mid = (lwr+upr)/2; - rc = sqlite3_stricmp(zLeft, aPragmaNames[mid].zName); - if( rc==0 ) break; - if( rc<0 ){ - upr = mid - 1; - }else{ - lwr = mid + 1; - } - } - if( lwr>upr ) goto pragma_out; - pPragma = &aPragmaNames[mid]; + pPragma = pragmaLocate(zLeft); + if( pPragma==0 ) goto pragma_out; /* Make sure the database schema is loaded if the pragma requires that */ - if( (pPragma->mPragFlag & PragFlag_NeedSchema)!=0 ){ + if( (pPragma->mPragFlg & PragFlg_NeedSchema)!=0 ){ if( sqlite3ReadSchema(pParse) ) goto pragma_out; } + /* Register the result column names for pragmas that return results */ + if( (pPragma->mPragFlg & PragFlg_NoColumns)==0 + && ((pPragma->mPragFlg & PragFlg_NoColumns1)==0 || zRight==0) + ){ + setPragmaResultColumnNames(v, pPragma); + } + /* Jump to the appropriate pragma handler */ switch( pPragma->ePragTyp ){ @@ -111962,7 +113096,6 @@ SQLITE_PRIVATE void sqlite3Pragma( VdbeOp *aOp; sqlite3VdbeUsesBtree(v, iDb); if( !zRight ){ - setOneColumnName(v, "cache_size"); pParse->nMem += 2; sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(getCacheSize)); aOp = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize, iLn); @@ -111997,7 +113130,7 @@ SQLITE_PRIVATE void sqlite3Pragma( assert( pBt!=0 ); if( !zRight ){ int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0; - returnSingleInt(v, "page_size", size); + returnSingleInt(v, size); }else{ /* Malloc may fail when setting the page-size, as there is an internal ** buffer that the pager module resizes using sqlite3_realloc(). @@ -112032,7 +113165,7 @@ SQLITE_PRIVATE void sqlite3Pragma( } } b = sqlite3BtreeSecureDelete(pBt, b); - returnSingleInt(v, "secure_delete", b); + returnSingleInt(v, b); break; } @@ -112064,8 +113197,6 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3AbsInt32(sqlite3Atoi(zRight))); } sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT); break; } @@ -112111,7 +113242,7 @@ SQLITE_PRIVATE void sqlite3Pragma( if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){ zRet = "exclusive"; } - returnSingleText(v, "locking_mode", zRet); + returnSingleText(v, zRet); break; } @@ -112124,7 +113255,6 @@ SQLITE_PRIVATE void sqlite3Pragma( int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */ int ii; /* Loop counter */ - setOneColumnName(v, "journal_mode"); if( zRight==0 ){ /* If there is no "=MODE" part of the pragma, do a query for the ** current mode */ @@ -112170,7 +113300,7 @@ SQLITE_PRIVATE void sqlite3Pragma( if( iLimit<-1 ) iLimit = -1; } iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit); - returnSingleInt(v, "journal_size_limit", iLimit); + returnSingleInt(v, iLimit); break; } @@ -112188,7 +113318,7 @@ SQLITE_PRIVATE void sqlite3Pragma( Btree *pBt = pDb->pBt; assert( pBt!=0 ); if( !zRight ){ - returnSingleInt(v, "auto_vacuum", sqlite3BtreeGetAutoVacuum(pBt)); + returnSingleInt(v, sqlite3BtreeGetAutoVacuum(pBt)); }else{ int eAuto = getAutoVacuum(zRight); assert( eAuto>=0 && eAuto<=2 ); @@ -112267,7 +113397,7 @@ SQLITE_PRIVATE void sqlite3Pragma( case PragTyp_CACHE_SIZE: { assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( !zRight ){ - returnSingleInt(v, "cache_size", pDb->pSchema->cache_size); + returnSingleInt(v, pDb->pSchema->cache_size); }else{ int size = sqlite3Atoi(zRight); pDb->pSchema->cache_size = size; @@ -112301,7 +113431,7 @@ SQLITE_PRIVATE void sqlite3Pragma( case PragTyp_CACHE_SPILL: { assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( !zRight ){ - returnSingleInt(v, "cache_spill", + returnSingleInt(v, (db->flags & SQLITE_CacheSpill)==0 ? 0 : sqlite3BtreeSetSpillSize(pDb->pBt,0)); }else{ @@ -112355,7 +113485,7 @@ SQLITE_PRIVATE void sqlite3Pragma( rc = SQLITE_OK; #endif if( rc==SQLITE_OK ){ - returnSingleInt(v, "mmap_size", sz); + returnSingleInt(v, sz); }else if( rc!=SQLITE_NOTFOUND ){ pParse->nErr++; pParse->rc = rc; @@ -112376,7 +113506,7 @@ SQLITE_PRIVATE void sqlite3Pragma( */ case PragTyp_TEMP_STORE: { if( !zRight ){ - returnSingleInt(v, "temp_store", db->temp_store); + returnSingleInt(v, db->temp_store); }else{ changeTempStorage(pParse, zRight); } @@ -112395,7 +113525,7 @@ SQLITE_PRIVATE void sqlite3Pragma( */ case PragTyp_TEMP_STORE_DIRECTORY: { if( !zRight ){ - returnSingleText(v, "temp_store_directory", sqlite3_temp_directory); + returnSingleText(v, sqlite3_temp_directory); }else{ #ifndef SQLITE_OMIT_WSD if( zRight[0] ){ @@ -112439,7 +113569,7 @@ SQLITE_PRIVATE void sqlite3Pragma( */ case PragTyp_DATA_STORE_DIRECTORY: { if( !zRight ){ - returnSingleText(v, "data_store_directory", sqlite3_data_directory); + returnSingleText(v, sqlite3_data_directory); }else{ #ifndef SQLITE_OMIT_WSD if( zRight[0] ){ @@ -112478,7 +113608,7 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3_file *pFile = sqlite3PagerFile(pPager); sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE, &proxy_file_path); - returnSingleText(v, "lock_proxy_file", proxy_file_path); + returnSingleText(v, proxy_file_path); }else{ Pager *pPager = sqlite3BtreePager(pDb->pBt); sqlite3_file *pFile = sqlite3PagerFile(pPager); @@ -112510,7 +113640,7 @@ SQLITE_PRIVATE void sqlite3Pragma( */ case PragTyp_SYNCHRONOUS: { if( !zRight ){ - returnSingleInt(v, "synchronous", pDb->safety_level-1); + returnSingleInt(v, pDb->safety_level-1); }else{ if( !db->autoCommit ){ sqlite3ErrorMsg(pParse, @@ -112530,7 +113660,8 @@ SQLITE_PRIVATE void sqlite3Pragma( #ifndef SQLITE_OMIT_FLAG_PRAGMAS case PragTyp_FLAG: { if( zRight==0 ){ - returnSingleInt(v, pPragma->zName, (db->flags & pPragma->iArg)!=0 ); + setPragmaResultColumnNames(v, pPragma); + returnSingleInt(v, (db->flags & pPragma->iArg)!=0 ); }else{ int mask = pPragma->iArg; /* Mask of bits to set or clear. */ if( db->autoCommit==0 ){ @@ -112580,16 +113711,12 @@ SQLITE_PRIVATE void sqlite3Pragma( Table *pTab; pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb); if( pTab ){ - static const char *azCol[] = { - "cid", "name", "type", "notnull", "dflt_value", "pk" - }; int i, k; int nHidden = 0; Column *pCol; Index *pPk = sqlite3PrimaryKeyIndex(pTab); pParse->nMem = 6; sqlite3CodeVerifySchema(pParse, iDb); - setAllColumnNames(v, 6, azCol); assert( 6==ArraySize(azCol) ); sqlite3ViewGetColumnNames(pParse, pTab); for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ if( IsHiddenColumn(pCol) ){ @@ -112618,13 +113745,10 @@ SQLITE_PRIVATE void sqlite3Pragma( break; case PragTyp_STATS: { - static const char *azCol[] = { "table", "index", "width", "height" }; Index *pIdx; HashElem *i; - v = sqlite3GetVdbe(pParse); pParse->nMem = 4; sqlite3CodeVerifySchema(pParse, iDb); - setAllColumnNames(v, 4, azCol); assert( 4==ArraySize(azCol) ); for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){ Table *pTab = sqliteHashData(i); sqlite3VdbeMultiLoad(v, 1, "ssii", @@ -112649,9 +113773,6 @@ SQLITE_PRIVATE void sqlite3Pragma( Table *pTab; pIdx = sqlite3FindIndex(db, zRight, zDb); if( pIdx ){ - static const char *azCol[] = { - "seqno", "cid", "name", "desc", "coll", "key" - }; int i; int mx; if( pPragma->iArg ){ @@ -112665,8 +113786,7 @@ SQLITE_PRIVATE void sqlite3Pragma( } pTab = pIdx->pTable; sqlite3CodeVerifySchema(pParse, iDb); - assert( pParse->nMem<=ArraySize(azCol) ); - setAllColumnNames(v, pParse->nMem, azCol); + assert( pParse->nMem<=pPragma->nPragCName ); for(i=0; iaiColumn[i]; sqlite3VdbeMultiLoad(v, 1, "iis", i, cnum, @@ -112689,13 +113809,8 @@ SQLITE_PRIVATE void sqlite3Pragma( int i; pTab = sqlite3FindTable(db, zRight, zDb); if( pTab ){ - static const char *azCol[] = { - "seq", "name", "unique", "origin", "partial" - }; - v = sqlite3GetVdbe(pParse); pParse->nMem = 5; sqlite3CodeVerifySchema(pParse, iDb); - setAllColumnNames(v, 5, azCol); assert( 5==ArraySize(azCol) ); for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){ const char *azOrigin[] = { "c", "u", "pk" }; sqlite3VdbeMultiLoad(v, 1, "isisi", @@ -112711,10 +113826,8 @@ SQLITE_PRIVATE void sqlite3Pragma( break; case PragTyp_DATABASE_LIST: { - static const char *azCol[] = { "seq", "name", "file" }; int i; pParse->nMem = 3; - setAllColumnNames(v, 3, azCol); assert( 3==ArraySize(azCol) ); for(i=0; inDb; i++){ if( db->aDb[i].pBt==0 ) continue; assert( db->aDb[i].zDbSName!=0 ); @@ -112728,11 +113841,9 @@ SQLITE_PRIVATE void sqlite3Pragma( break; case PragTyp_COLLATION_LIST: { - static const char *azCol[] = { "seq", "name" }; int i = 0; HashElem *p; pParse->nMem = 2; - setAllColumnNames(v, 2, azCol); assert( 2==ArraySize(azCol) ); for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){ CollSeq *pColl = (CollSeq *)sqliteHashData(p); sqlite3VdbeMultiLoad(v, 1, "is", i++, pColl->zName); @@ -112748,17 +113859,11 @@ SQLITE_PRIVATE void sqlite3Pragma( Table *pTab; pTab = sqlite3FindTable(db, zRight, zDb); if( pTab ){ - v = sqlite3GetVdbe(pParse); pFK = pTab->pFKey; if( pFK ){ - static const char *azCol[] = { - "id", "seq", "table", "from", "to", "on_update", "on_delete", - "match" - }; int i = 0; pParse->nMem = 8; sqlite3CodeVerifySchema(pParse, iDb); - setAllColumnNames(v, 8, azCol); assert( 8==ArraySize(azCol) ); while(pFK){ int j; for(j=0; jnCol; j++){ @@ -112799,14 +113904,11 @@ SQLITE_PRIVATE void sqlite3Pragma( int addrTop; /* Top of a loop checking foreign keys */ int addrOk; /* Jump here if the key is OK */ int *aiCols; /* child to parent column mapping */ - static const char *azCol[] = { "table", "rowid", "parent", "fkid" }; regResult = pParse->nMem+1; pParse->nMem += 4; regKey = ++pParse->nMem; regRow = ++pParse->nMem; - v = sqlite3GetVdbe(pParse); - setAllColumnNames(v, 4, azCol); assert( 4==ArraySize(azCol) ); sqlite3CodeVerifySchema(pParse, iDb); k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash); while( k ){ @@ -112945,7 +114047,6 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Initialize the VDBE program */ pParse->nMem = 6; - setOneColumnName(v, "integrity_check"); /* Set the maximum error count */ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; @@ -113197,7 +114298,7 @@ SQLITE_PRIVATE void sqlite3Pragma( assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 ); assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE ); assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE ); - returnSingleText(v, "encoding", encnames[ENC(pParse->db)].zName); + returnSingleText(v, encnames[ENC(pParse->db)].zName); }else{ /* "PRAGMA encoding = XXX" */ /* Only change the value of sqlite.enc if the database handle is not ** initialized. If the main database exists, the new sqlite.enc value @@ -113260,7 +114361,7 @@ SQLITE_PRIVATE void sqlite3Pragma( case PragTyp_HEADER_VALUE: { int iCookie = pPragma->iArg; /* Which cookie to read or write */ sqlite3VdbeUsesBtree(v, iDb); - if( zRight && (pPragma->mPragFlag & PragFlag_ReadOnly)==0 ){ + if( zRight && (pPragma->mPragFlg & PragFlg_ReadOnly)==0 ){ /* Write the specified cookie value */ static const VdbeOpList setCookie[] = { { OP_Transaction, 0, 1, 0}, /* 0 */ @@ -113288,8 +114389,6 @@ SQLITE_PRIVATE void sqlite3Pragma( aOp[0].p1 = iDb; aOp[1].p1 = iDb; aOp[1].p3 = iCookie; - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT); sqlite3VdbeReusable(v); } } @@ -113307,7 +114406,6 @@ SQLITE_PRIVATE void sqlite3Pragma( int i = 0; const char *zOpt; pParse->nMem = 1; - setOneColumnName(v, "compile_option"); while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){ sqlite3VdbeLoadString(v, 1, zOpt); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); @@ -113324,7 +114422,6 @@ SQLITE_PRIVATE void sqlite3Pragma( ** Checkpoint the database. */ case PragTyp_WAL_CHECKPOINT: { - static const char *azCol[] = { "busy", "log", "checkpointed" }; int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED); int eMode = SQLITE_CHECKPOINT_PASSIVE; if( zRight ){ @@ -113336,7 +114433,6 @@ SQLITE_PRIVATE void sqlite3Pragma( eMode = SQLITE_CHECKPOINT_TRUNCATE; } } - setAllColumnNames(v, 3, azCol); assert( 3==ArraySize(azCol) ); pParse->nMem = 3; sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); @@ -113355,7 +114451,7 @@ SQLITE_PRIVATE void sqlite3Pragma( if( zRight ){ sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight)); } - returnSingleInt(v, "wal_autocheckpoint", + returnSingleInt(v, db->xWalCallback==sqlite3WalDefaultHook ? SQLITE_PTR_TO_INT(db->pWalArg) : 0); } @@ -113388,7 +114484,7 @@ SQLITE_PRIVATE void sqlite3Pragma( if( zRight ){ sqlite3_busy_timeout(db, sqlite3Atoi(zRight)); } - returnSingleInt(v, "timeout", db->busyTimeout); + returnSingleInt(v, db->busyTimeout); break; } @@ -113408,7 +114504,7 @@ SQLITE_PRIVATE void sqlite3Pragma( if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){ sqlite3_soft_heap_limit64(N); } - returnSingleInt(v, "soft_heap_limit", sqlite3_soft_heap_limit64(-1)); + returnSingleInt(v, sqlite3_soft_heap_limit64(-1)); break; } @@ -113427,8 +114523,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ){ sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff)); } - returnSingleInt(v, "threads", - sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1)); + returnSingleInt(v, sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1)); break; } @@ -113440,9 +114535,7 @@ SQLITE_PRIVATE void sqlite3Pragma( static const char *const azLockName[] = { "unlocked", "shared", "reserved", "pending", "exclusive" }; - static const char *azCol[] = { "database", "status" }; int i; - setAllColumnNames(v, 2, azCol); assert( 2==ArraySize(azCol) ); pParse->nMem = 2; for(i=0; inDb; i++){ Btree *pBt; @@ -113508,10 +114601,325 @@ SQLITE_PRIVATE void sqlite3Pragma( } /* End of the PRAGMA switch */ + /* The following block is a no-op unless SQLITE_DEBUG is defined. Its only + ** purpose is to execute assert() statements to verify that if the + ** PragFlg_NoColumns1 flag is set and the caller specified an argument + ** to the PRAGMA, the implementation has not added any OP_ResultRow + ** instructions to the VM. */ + if( (pPragma->mPragFlg & PragFlg_NoColumns1) && zRight ){ + sqlite3VdbeVerifyNoResultRow(v); + } + pragma_out: sqlite3DbFree(db, zLeft); sqlite3DbFree(db, zRight); } +#ifndef SQLITE_OMIT_VIRTUALTABLE +/***************************************************************************** +** Implementation of an eponymous virtual table that runs a pragma. +** +*/ +typedef struct PragmaVtab PragmaVtab; +typedef struct PragmaVtabCursor PragmaVtabCursor; +struct PragmaVtab { + sqlite3_vtab base; /* Base class. Must be first */ + sqlite3 *db; /* The database connection to which it belongs */ + const PragmaName *pName; /* Name of the pragma */ + u8 nHidden; /* Number of hidden columns */ + u8 iHidden; /* Index of the first hidden column */ +}; +struct PragmaVtabCursor { + sqlite3_vtab_cursor base; /* Base class. Must be first */ + sqlite3_stmt *pPragma; /* The pragma statement to run */ + sqlite_int64 iRowid; /* Current rowid */ + char *azArg[2]; /* Value of the argument and schema */ +}; + +/* +** Pragma virtual table module xConnect method. +*/ +static int pragmaVtabConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + const PragmaName *pPragma = (const PragmaName*)pAux; + PragmaVtab *pTab = 0; + int rc; + int i, j; + char cSep = '('; + StrAccum acc; + char zBuf[200]; + + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); + sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0); + sqlite3StrAccumAppendAll(&acc, "CREATE TABLE x"); + for(i=0, j=pPragma->iPragCName; inPragCName; i++, j++){ + sqlite3XPrintf(&acc, "%c\"%s\"", cSep, pragCName[j]); + cSep = ','; + } + if( i==0 ){ + sqlite3XPrintf(&acc, "(\"%s\"", pPragma->zName); + cSep = ','; + i++; + } + j = 0; + if( pPragma->mPragFlg & PragFlg_Result1 ){ + sqlite3StrAccumAppendAll(&acc, ",arg HIDDEN"); + j++; + } + if( pPragma->mPragFlg & (PragFlg_SchemaOpt|PragFlg_SchemaReq) ){ + sqlite3StrAccumAppendAll(&acc, ",schema HIDDEN"); + j++; + } + sqlite3StrAccumAppend(&acc, ")", 1); + sqlite3StrAccumFinish(&acc); + assert( strlen(zBuf) < sizeof(zBuf)-1 ); + rc = sqlite3_declare_vtab(db, zBuf); + if( rc==SQLITE_OK ){ + pTab = (PragmaVtab*)sqlite3_malloc(sizeof(PragmaVtab)); + if( pTab==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(pTab, 0, sizeof(PragmaVtab)); + pTab->pName = pPragma; + pTab->db = db; + pTab->iHidden = i; + pTab->nHidden = j; + } + }else{ + *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } + + *ppVtab = (sqlite3_vtab*)pTab; + return rc; +} + +/* +** Pragma virtual table module xDisconnect method. +*/ +static int pragmaVtabDisconnect(sqlite3_vtab *pVtab){ + PragmaVtab *pTab = (PragmaVtab*)pVtab; + sqlite3_free(pTab); + return SQLITE_OK; +} + +/* Figure out the best index to use to search a pragma virtual table. +** +** There are not really any index choices. But we want to encourage the +** query planner to give == constraints on as many hidden parameters as +** possible, and especially on the first hidden parameter. So return a +** high cost if hidden parameters are unconstrained. +*/ +static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + PragmaVtab *pTab = (PragmaVtab*)tab; + const struct sqlite3_index_constraint *pConstraint; + int i, j; + int seen[2]; + + pIdxInfo->estimatedCost = (double)1; + if( pTab->nHidden==0 ){ return SQLITE_OK; } + pConstraint = pIdxInfo->aConstraint; + seen[0] = 0; + seen[1] = 0; + for(i=0; inConstraint; i++, pConstraint++){ + if( pConstraint->usable==0 ) continue; + if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; + if( pConstraint->iColumn < pTab->iHidden ) continue; + j = pConstraint->iColumn - pTab->iHidden; + assert( j < 2 ); + seen[j] = i+1; + } + if( seen[0]==0 ){ + pIdxInfo->estimatedCost = (double)2147483647; + pIdxInfo->estimatedRows = 2147483647; + return SQLITE_OK; + } + j = seen[0]-1; + pIdxInfo->aConstraintUsage[j].argvIndex = 1; + pIdxInfo->aConstraintUsage[j].omit = 1; + if( seen[1]==0 ) return SQLITE_OK; + pIdxInfo->estimatedCost = (double)20; + pIdxInfo->estimatedRows = 20; + j = seen[1]-1; + pIdxInfo->aConstraintUsage[j].argvIndex = 2; + pIdxInfo->aConstraintUsage[j].omit = 1; + return SQLITE_OK; +} + +/* Create a new cursor for the pragma virtual table */ +static int pragmaVtabOpen(sqlite3_vtab *pVtab, sqlite3_vtab_cursor **ppCursor){ + PragmaVtabCursor *pCsr; + pCsr = (PragmaVtabCursor*)sqlite3_malloc(sizeof(*pCsr)); + if( pCsr==0 ) return SQLITE_NOMEM; + memset(pCsr, 0, sizeof(PragmaVtabCursor)); + pCsr->base.pVtab = pVtab; + *ppCursor = &pCsr->base; + return SQLITE_OK; +} + +/* Clear all content from pragma virtual table cursor. */ +static void pragmaVtabCursorClear(PragmaVtabCursor *pCsr){ + int i; + sqlite3_finalize(pCsr->pPragma); + pCsr->pPragma = 0; + for(i=0; iazArg); i++){ + sqlite3_free(pCsr->azArg[i]); + pCsr->azArg[i] = 0; + } +} + +/* Close a pragma virtual table cursor */ +static int pragmaVtabClose(sqlite3_vtab_cursor *cur){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)cur; + pragmaVtabCursorClear(pCsr); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* Advance the pragma virtual table cursor to the next row */ +static int pragmaVtabNext(sqlite3_vtab_cursor *pVtabCursor){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + int rc = SQLITE_OK; + + /* Increment the xRowid value */ + pCsr->iRowid++; + assert( pCsr->pPragma ); + if( SQLITE_ROW!=sqlite3_step(pCsr->pPragma) ){ + rc = sqlite3_finalize(pCsr->pPragma); + pCsr->pPragma = 0; + pragmaVtabCursorClear(pCsr); + } + return rc; +} + +/* +** Pragma virtual table module xFilter method. +*/ +static int pragmaVtabFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab); + int rc; + int i, j; + StrAccum acc; + char *zSql; + + UNUSED_PARAMETER(idxNum); + UNUSED_PARAMETER(idxStr); + pragmaVtabCursorClear(pCsr); + j = (pTab->pName->mPragFlg & PragFlg_Result1)!=0 ? 0 : 1; + for(i=0; iazArg) ); + pCsr->azArg[j] = sqlite3_mprintf("%s", sqlite3_value_text(argv[i])); + if( pCsr->azArg[j]==0 ){ + return SQLITE_NOMEM; + } + } + sqlite3StrAccumInit(&acc, 0, 0, 0, pTab->db->aLimit[SQLITE_LIMIT_SQL_LENGTH]); + sqlite3StrAccumAppendAll(&acc, "PRAGMA "); + if( pCsr->azArg[1] ){ + sqlite3XPrintf(&acc, "%Q.", pCsr->azArg[1]); + } + sqlite3StrAccumAppendAll(&acc, pTab->pName->zName); + if( pCsr->azArg[0] ){ + sqlite3XPrintf(&acc, "=%Q", pCsr->azArg[0]); + } + zSql = sqlite3StrAccumFinish(&acc); + if( zSql==0 ) return SQLITE_NOMEM; + rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pPragma, 0); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ){ + pTab->base.zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pTab->db)); + return rc; + } + return pragmaVtabNext(pVtabCursor); +} + +/* +** Pragma virtual table module xEof method. +*/ +static int pragmaVtabEof(sqlite3_vtab_cursor *pVtabCursor){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + return (pCsr->pPragma==0); +} + +/* The xColumn method simply returns the corresponding column from +** the PRAGMA. +*/ +static int pragmaVtabColumn( + sqlite3_vtab_cursor *pVtabCursor, + sqlite3_context *ctx, + int i +){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab); + if( iiHidden ){ + sqlite3_result_value(ctx, sqlite3_column_value(pCsr->pPragma, i)); + }else{ + sqlite3_result_text(ctx, pCsr->azArg[i-pTab->iHidden],-1,SQLITE_TRANSIENT); + } + return SQLITE_OK; +} + +/* +** Pragma virtual table module xRowid method. +*/ +static int pragmaVtabRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *p){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + *p = pCsr->iRowid; + return SQLITE_OK; +} + +/* The pragma virtual table object */ +static const sqlite3_module pragmaVtabModule = { + 0, /* iVersion */ + 0, /* xCreate - create a table */ + pragmaVtabConnect, /* xConnect - connect to an existing table */ + pragmaVtabBestIndex, /* xBestIndex - Determine search strategy */ + pragmaVtabDisconnect, /* xDisconnect - Disconnect from a table */ + 0, /* xDestroy - Drop a table */ + pragmaVtabOpen, /* xOpen - open a cursor */ + pragmaVtabClose, /* xClose - close a cursor */ + pragmaVtabFilter, /* xFilter - configure scan constraints */ + pragmaVtabNext, /* xNext - advance a cursor */ + pragmaVtabEof, /* xEof */ + pragmaVtabColumn, /* xColumn - read data */ + pragmaVtabRowid, /* xRowid - read data */ + 0, /* xUpdate - write data */ + 0, /* xBegin - begin transaction */ + 0, /* xSync - sync transaction */ + 0, /* xCommit - commit transaction */ + 0, /* xRollback - rollback transaction */ + 0, /* xFindFunction - function overloading */ + 0, /* xRename - rename the table */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0 /* xRollbackTo */ +}; + +/* +** Check to see if zTabName is really the name of a pragma. If it is, +** then register an eponymous virtual table for that pragma and return +** a pointer to the Module object for the new virtual table. +*/ +SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3 *db, const char *zName){ + const PragmaName *pName; + assert( sqlite3_strnicmp(zName, "pragma_", 7)==0 ); + pName = pragmaLocate(zName+7); + if( pName==0 ) return 0; + if( (pName->mPragFlg & (PragFlg_Result0|PragFlg_Result1))==0 ) return 0; + assert( sqlite3HashFind(&db->aModule, zName)==0 ); + return sqlite3VtabCreateModule(db, zName, &pragmaVtabModule, (void*)pName, 0); +} + +#endif /* SQLITE_OMIT_VIRTUALTABLE */ #endif /* SQLITE_OMIT_PRAGMA */ @@ -114708,7 +116116,7 @@ static void addWhereTerm( pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft); pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight); - pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2, 0); + pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2); if( pEq && isOuterJoin ){ ExprSetProperty(pEq, EP_FromJoin); assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) ); @@ -114895,7 +116303,7 @@ static void pushOntoSorter( int iLimit; /* LIMIT counter */ assert( bSeq==0 || bSeq==1 ); - assert( nData==1 || regData==regOrigData ); + assert( nData==1 || regData==regOrigData || regOrigData==0 ); if( nPrefixReg ){ assert( nPrefixReg==nExpr+bSeq ); regBase = regData - nExpr - bSeq; @@ -114907,11 +116315,11 @@ static void pushOntoSorter( iLimit = pSelect->iOffset ? pSelect->iOffset+1 : pSelect->iLimit; pSort->labelDone = sqlite3VdbeMakeLabel(v); sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, regOrigData, - SQLITE_ECEL_DUP|SQLITE_ECEL_REF); + SQLITE_ECEL_DUP | (regOrigData? SQLITE_ECEL_REF : 0)); if( bSeq ){ sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr); } - if( nPrefixReg==0 ){ + if( nPrefixReg==0 && nData>0 ){ sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+bSeq, nData); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regRecord); @@ -114961,7 +116369,8 @@ static void pushOntoSorter( }else{ op = OP_IdxInsert; } - sqlite3VdbeAddOp2(v, op, pSort->iECursor, regRecord); + sqlite3VdbeAddOp4Int(v, op, pSort->iECursor, regRecord, + regBase+nOBSat, nBase-nOBSat); if( iLimit ){ int addr; int r1 = 0; @@ -114969,7 +116378,7 @@ static void pushOntoSorter( ** register is initialized with value of LIMIT+OFFSET.) After the sorter ** fills up, delete the least entry in the sorter after each insert. ** Thus we never hold more than the LIMIT+OFFSET rows in memory at once */ - addr = sqlite3VdbeAddOp3(v, OP_IfNotZero, iLimit, 0, 1); VdbeCoverage(v); + addr = sqlite3VdbeAddOp1(v, OP_IfNotZero, iLimit); VdbeCoverage(v); sqlite3VdbeAddOp1(v, OP_Last, pSort->iECursor); if( pSort->bOrderedInnerLoop ){ r1 = ++pParse->nMem; @@ -115029,7 +116438,8 @@ static void codeDistinct( r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iTab, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r1, iMem, N); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); sqlite3ReleaseTempReg(pParse, r1); } @@ -115040,7 +116450,7 @@ static void codeDistinct( ** If srcTab is negative, then the pEList expressions ** are evaluated in order to get the data for this row. If srcTab is ** zero or more, then data is pulled from srcTab and pEList is used only -** to get number columns and the datatype for each column. +** to get the number of columns and the collation sequence for each column. */ static void selectInnerLoop( Parse *pParse, /* The parser context */ @@ -115055,13 +116465,20 @@ static void selectInnerLoop( ){ Vdbe *v = pParse->pVdbe; int i; - int hasDistinct; /* True if the DISTINCT keyword is present */ - int regResult; /* Start of memory holding result set */ + int hasDistinct; /* True if the DISTINCT keyword is present */ int eDest = pDest->eDest; /* How to dispose of results */ int iParm = pDest->iSDParm; /* First argument to disposal method */ int nResultCol; /* Number of result columns */ int nPrefixReg = 0; /* Number of extra registers before regResult */ + /* Usually, regResult is the first cell in an array of memory cells + ** containing the current result row. In this case regOrig is set to the + ** same value. However, if the results are being sent to the sorter, the + ** values for any expressions that are also part of the sort-key are omitted + ** from this array. In this case regOrig is set to zero. */ + int regResult; /* Start of memory holding current results */ + int regOrig; /* Start of memory holding full result (or 0) */ + assert( v ); assert( pEList!=0 ); hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP; @@ -115092,7 +116509,7 @@ static void selectInnerLoop( pParse->nMem += nResultCol; } pDest->nSdst = nResultCol; - regResult = pDest->iSdst; + regOrig = regResult = pDest->iSdst; if( srcTab>=0 ){ for(i=0; ipOrderBy), set the associated + ** iOrderByCol value to one more than the index of the ORDER BY + ** expression within the sort-key that pushOntoSorter() will generate. + ** This allows the pEList field to be omitted from the sorted record, + ** saving space and CPU cycles. */ + ecelFlags |= (SQLITE_ECEL_OMITREF|SQLITE_ECEL_REF); + for(i=pSort->nOBSat; ipOrderBy->nExpr; i++){ + int j; + if( (j = pSort->pOrderBy->a[i].u.x.iOrderByCol)>0 ){ + pEList->a[j-1].u.x.iOrderByCol = i+1-pSort->nOBSat; + } + } + regOrig = 0; + assert( eDest==SRT_Set || eDest==SRT_Mem + || eDest==SRT_Coroutine || eDest==SRT_Output ); + } + nResultCol = sqlite3ExprCodeExprList(pParse,pEList,regResult,0,ecelFlags); } /* If the DISTINCT keyword was present on the SELECT statement @@ -115182,7 +116617,7 @@ static void selectInnerLoop( int r1; r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol); sqlite3ReleaseTempReg(pParse, r1); break; } @@ -115219,7 +116654,7 @@ static void selectInnerLoop( int addr = sqlite3VdbeCurrentAddr(v) + 4; sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm+1, r1,regResult,nResultCol); assert( pSort==0 ); } #endif @@ -115248,14 +116683,14 @@ static void selectInnerLoop( ** does not matter. But there might be a LIMIT clause, in which ** case the order does matter */ pushOntoSorter( - pParse, pSort, p, regResult, regResult, nResultCol, nPrefixReg); + pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); }else{ int r1 = sqlite3GetTempReg(pParse); assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol, r1, pDest->zAffSdst, nResultCol); sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol); sqlite3ReleaseTempReg(pParse, r1); } break; @@ -115274,11 +116709,12 @@ static void selectInnerLoop( ** memory cells and break out of the scan loop. */ case SRT_Mem: { - assert( nResultCol==pDest->nSdst ); if( pSort ){ + assert( nResultCol<=pDest->nSdst ); pushOntoSorter( - pParse, pSort, p, regResult, regResult, nResultCol, nPrefixReg); + pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); }else{ + assert( nResultCol==pDest->nSdst ); assert( regResult==iParm ); /* The LIMIT clause will jump out of the loop for us */ } @@ -115291,7 +116727,7 @@ static void selectInnerLoop( testcase( eDest==SRT_Coroutine ); testcase( eDest==SRT_Output ); if( pSort ){ - pushOntoSorter(pParse, pSort, p, regResult, regResult, nResultCol, + pushOntoSorter(pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); }else if( eDest==SRT_Coroutine ){ sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); @@ -115341,7 +116777,7 @@ static void selectInnerLoop( } sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey); sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, r2, nKey+2); if( addrTest ) sqlite3VdbeJumpHere(v, addrTest); sqlite3ReleaseTempReg(pParse, r1); sqlite3ReleaseTempRange(pParse, r2, nKey+2); @@ -115576,14 +117012,13 @@ static void generateSortTail( int iParm = pDest->iSDParm; int regRow; int regRowid; + int iCol; int nKey; int iSortTab; /* Sorter cursor to read from */ int nSortData; /* Trailing values to read from sorter */ int i; int bSeq; /* True if sorter record includes seq. no. */ -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS struct ExprList_item *aOutEx = p->pEList->a; -#endif assert( addrBreak<0 ); if( pSort->labelBkOut ){ @@ -115621,11 +117056,18 @@ static void generateSortTail( iSortTab = iTab; bSeq = 1; } - for(i=0; izAffSdst, nColumn); sqlite3ExprCacheAffinityChange(pParse, regRow, nColumn); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, regRowid); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, regRowid, regRow, nColumn); break; } case SRT_Mem: { @@ -116147,7 +117589,7 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){ /* The sqlite3ResultSetOfSelect() is only used n contexts where lookaside ** is disabled */ assert( db->lookaside.bDisable ); - pTab->nRef = 1; + pTab->nTabRef = 1; pTab->zName = 0; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); @@ -116378,6 +117820,7 @@ static void generateWithRecursiveQuery( /* Process the LIMIT and OFFSET clauses, if they exist */ addrBreak = sqlite3VdbeMakeLabel(v); + p->nSelectRow = 320; /* 4 billion rows */ computeLimitRegisters(pParse, p, addrBreak); pLimit = p->pLimit; pOffset = p->pOffset; @@ -116847,7 +118290,7 @@ static int multiSelect( computeLimitRegisters(pParse, p, iBreak); sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v); r1 = sqlite3GetTempReg(pParse); - iStart = sqlite3VdbeAddOp2(v, OP_RowKey, tab1, r1); + iStart = sqlite3VdbeAddOp2(v, OP_RowData, tab1, r1); sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0); VdbeCoverage(v); sqlite3ReleaseTempReg(pParse, r1); selectInnerLoop(pParse, p, p->pEList, tab1, @@ -117014,7 +118457,8 @@ static int generateOutputSubroutine( sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, r1, pDest->zAffSdst, pIn->nSdst); sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst); - sqlite3VdbeAddOp2(v, OP_IdxInsert, pDest->iSDParm, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1, + pIn->iSdst, pIn->nSdst); sqlite3ReleaseTempReg(pParse, r1); break; } @@ -117473,8 +118917,8 @@ static int multiSelectOrderBy( #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* Forward Declarations */ -static void substExprList(sqlite3*, ExprList*, int, ExprList*); -static void substSelect(sqlite3*, Select *, int, ExprList*, int); +static void substExprList(Parse*, ExprList*, int, ExprList*); +static void substSelect(Parse*, Select *, int, ExprList*, int); /* ** Scan through the expression pExpr. Replace every reference to @@ -117490,36 +118934,46 @@ static void substSelect(sqlite3*, Select *, int, ExprList*, int); ** of the subquery rather the result set of the subquery. */ static Expr *substExpr( - sqlite3 *db, /* Report malloc errors to this connection */ + Parse *pParse, /* Report errors here */ Expr *pExpr, /* Expr in which substitution occurs */ int iTable, /* Table to be substituted */ ExprList *pEList /* Substitute expressions */ ){ + sqlite3 *db = pParse->db; if( pExpr==0 ) return 0; if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){ if( pExpr->iColumn<0 ){ pExpr->op = TK_NULL; }else{ Expr *pNew; + Expr *pCopy = pEList->a[pExpr->iColumn].pExpr; assert( pEList!=0 && pExpr->iColumnnExpr ); assert( pExpr->pLeft==0 && pExpr->pRight==0 ); - pNew = sqlite3ExprDup(db, pEList->a[pExpr->iColumn].pExpr, 0); - sqlite3ExprDelete(db, pExpr); - pExpr = pNew; + if( sqlite3ExprIsVector(pCopy) ){ + sqlite3VectorErrorMsg(pParse, pCopy); + }else{ + pNew = sqlite3ExprDup(db, pCopy, 0); + if( pNew && (pExpr->flags & EP_FromJoin) ){ + pNew->iRightJoinTable = pExpr->iRightJoinTable; + pNew->flags |= EP_FromJoin; + } + sqlite3ExprDelete(db, pExpr); + pExpr = pNew; + } } }else{ - pExpr->pLeft = substExpr(db, pExpr->pLeft, iTable, pEList); - pExpr->pRight = substExpr(db, pExpr->pRight, iTable, pEList); + pExpr->pLeft = substExpr(pParse, pExpr->pLeft, iTable, pEList); + pExpr->pRight = substExpr(pParse, pExpr->pRight, iTable, pEList); if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - substSelect(db, pExpr->x.pSelect, iTable, pEList, 1); + substSelect(pParse, pExpr->x.pSelect, iTable, pEList, 1); }else{ - substExprList(db, pExpr->x.pList, iTable, pEList); + substExprList(pParse, pExpr->x.pList, iTable, pEList); } } return pExpr; } static void substExprList( - sqlite3 *db, /* Report malloc errors here */ + Parse *pParse, /* Report errors here */ ExprList *pList, /* List to scan and in which to make substitutes */ int iTable, /* Table to be substituted */ ExprList *pEList /* Substitute values */ @@ -117527,11 +118981,11 @@ static void substExprList( int i; if( pList==0 ) return; for(i=0; inExpr; i++){ - pList->a[i].pExpr = substExpr(db, pList->a[i].pExpr, iTable, pEList); + pList->a[i].pExpr = substExpr(pParse, pList->a[i].pExpr, iTable, pEList); } } static void substSelect( - sqlite3 *db, /* Report malloc errors here */ + Parse *pParse, /* Report errors here */ Select *p, /* SELECT statement in which to make substitutions */ int iTable, /* Table to be replaced */ ExprList *pEList, /* Substitute values */ @@ -117542,17 +118996,17 @@ static void substSelect( int i; if( !p ) return; do{ - substExprList(db, p->pEList, iTable, pEList); - substExprList(db, p->pGroupBy, iTable, pEList); - substExprList(db, p->pOrderBy, iTable, pEList); - p->pHaving = substExpr(db, p->pHaving, iTable, pEList); - p->pWhere = substExpr(db, p->pWhere, iTable, pEList); + substExprList(pParse, p->pEList, iTable, pEList); + substExprList(pParse, p->pGroupBy, iTable, pEList); + substExprList(pParse, p->pOrderBy, iTable, pEList); + p->pHaving = substExpr(pParse, p->pHaving, iTable, pEList); + p->pWhere = substExpr(pParse, p->pWhere, iTable, pEList); pSrc = p->pSrc; assert( pSrc!=0 ); for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - substSelect(db, pItem->pSelect, iTable, pEList, 1); + substSelect(pParse, pItem->pSelect, iTable, pEList, 1); if( pItem->fg.isTabFunc ){ - substExprList(db, pItem->u1.pFuncArg, iTable, pEList); + substExprList(pParse, pItem->u1.pFuncArg, iTable, pEList); } } }while( doPrior && (p = p->pPrior)!=0 ); @@ -117949,12 +119403,12 @@ static int flattenSubquery( */ if( ALWAYS(pSubitem->pTab!=0) ){ Table *pTabToDel = pSubitem->pTab; - if( pTabToDel->nRef==1 ){ + if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); pTabToDel->pNextZombie = pToplevel->pZombieTab; pToplevel->pZombieTab = pTabToDel; }else{ - pTabToDel->nRef--; + pTabToDel->nTabRef--; } pSubitem->pTab = 0; } @@ -118077,7 +119531,7 @@ static int flattenSubquery( }else{ pParent->pWhere = sqlite3ExprAnd(db, pWhere, pParent->pWhere); } - substSelect(db, pParent, iParent, pSub->pEList, 0); + substSelect(pParse, pParent, iParent, pSub->pEList, 0); /* The flattened query is distinct if either the inner or the ** outer query is distinct. @@ -118151,7 +119605,7 @@ static int flattenSubquery( ** terms are duplicated into the subquery. */ static int pushDownWhereTerms( - sqlite3 *db, /* The database connection (for malloc()) */ + Parse *pParse, /* Parse context (for malloc() and error reporting) */ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ Expr *pWhere, /* The WHERE clause of the outer query */ int iCursor /* Cursor number of the subquery */ @@ -118172,16 +119626,16 @@ static int pushDownWhereTerms( return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ - nChng += pushDownWhereTerms(db, pSubq, pWhere->pRight, iCursor); + nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, iCursor); pWhere = pWhere->pLeft; } if( ExprHasProperty(pWhere,EP_FromJoin) ) return 0; /* restriction 5 */ if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){ nChng++; while( pSubq ){ - pNew = sqlite3ExprDup(db, pWhere, 0); - pNew = substExpr(db, pNew, iCursor, pSubq->pEList); - pSubq->pWhere = sqlite3ExprAnd(db, pSubq->pWhere, pNew); + pNew = sqlite3ExprDup(pParse->db, pWhere, 0); + pNew = substExpr(pParse, pNew, iCursor, pSubq->pEList); + pSubq->pWhere = sqlite3ExprAnd(pParse->db, pSubq->pWhere, pNew); pSubq = pSubq->pPrior; } } @@ -118473,7 +119927,7 @@ static int withExpand( assert( pFrom->pTab==0 ); pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return WRC_Abort; - pTab->nRef = 1; + pTab->nTabRef = 1; pTab->zName = sqlite3DbStrDup(db, pCte->zName); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); @@ -118496,25 +119950,33 @@ static int withExpand( ){ pItem->pTab = pTab; pItem->fg.isRecursive = 1; - pTab->nRef++; + pTab->nTabRef++; pSel->selFlags |= SF_Recursive; } } } /* Only one recursive reference is permitted. */ - if( pTab->nRef>2 ){ + if( pTab->nTabRef>2 ){ sqlite3ErrorMsg( pParse, "multiple references to recursive table: %s", pCte->zName ); return SQLITE_ERROR; } - assert( pTab->nRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nRef==2 )); + assert( pTab->nTabRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 )); pCte->zCteErr = "circular reference: %s"; pSavedWith = pParse->pWith; pParse->pWith = pWith; - sqlite3WalkSelect(pWalker, bMayRecursive ? pSel->pPrior : pSel); + if( bMayRecursive ){ + Select *pPrior = pSel->pPrior; + assert( pPrior->pWith==0 ); + pPrior->pWith = pSel->pWith; + sqlite3WalkSelect(pWalker, pPrior); + pPrior->pWith = 0; + }else{ + sqlite3WalkSelect(pWalker, pSel); + } pParse->pWith = pWith; for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior); @@ -118558,10 +120020,12 @@ static int withExpand( */ static void selectPopWith(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; - With *pWith = findRightmost(p)->pWith; - if( pWith!=0 ){ - assert( pParse->pWith==pWith ); - pParse->pWith = pWith->pOuter; + if( pParse->pWith && p->pPrior==0 ){ + With *pWith = findRightmost(p)->pWith; + if( pWith!=0 ){ + assert( pParse->pWith==pWith ); + pParse->pWith = pWith->pOuter; + } } } #else @@ -118611,8 +120075,8 @@ static int selectExpander(Walker *pWalker, Select *p){ } pTabList = p->pSrc; pEList = p->pEList; - if( pWalker->xSelectCallback2==selectPopWith ){ - sqlite3WithPush(pParse, findRightmost(p)->pWith, 0); + if( p->pWith ){ + sqlite3WithPush(pParse, p->pWith, 0); } /* Make sure cursor numbers have been assigned to all entries in @@ -118642,7 +120106,7 @@ static int selectExpander(Walker *pWalker, Select *p){ if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return WRC_Abort; - pTab->nRef = 1; + pTab->nTabRef = 1; pTab->zName = sqlite3MPrintf(db, "sqlite_sq_%p", (void*)pTab); while( pSel->pPrior ){ pSel = pSel->pPrior; } sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol); @@ -118655,13 +120119,13 @@ static int selectExpander(Walker *pWalker, Select *p){ assert( pFrom->pTab==0 ); pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); if( pTab==0 ) return WRC_Abort; - if( pTab->nRef==0xffff ){ + if( pTab->nTabRef>=0xffff ){ sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535", pTab->zName); pFrom->pTab = 0; return WRC_Abort; } - pTab->nRef++; + pTab->nTabRef++; if( !IsVirtual(pTab) && cannotBeFunction(pParse, pFrom) ){ return WRC_Abort; } @@ -118811,10 +120275,10 @@ static int selectExpander(Walker *pWalker, Select *p){ if( longNames || pTabList->nSrc>1 ){ Expr *pLeft; pLeft = sqlite3Expr(db, TK_ID, zTabName); - pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); + pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); if( zSchemaName ){ pLeft = sqlite3Expr(db, TK_ID, zSchemaName); - pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr, 0); + pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr); } if( longNames ){ zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName); @@ -118899,9 +120363,7 @@ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){ sqlite3WalkSelect(&w, pSelect); } w.xSelectCallback = selectExpander; - if( (pSelect->selFlags & SF_MultiValue)==0 ){ - w.xSelectCallback2 = selectPopWith; - } + w.xSelectCallback2 = selectPopWith; sqlite3WalkSelect(&w, pSelect); } @@ -119051,8 +120513,8 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ ExprList *pList = pF->pExpr->x.pList; assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); - sqlite3VdbeAddOp4(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0, 0, - (void*)pF->pFunc, P4_FUNCDEF); + sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); } } @@ -119103,8 +120565,8 @@ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){ if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ); } - sqlite3VdbeAddOp4(v, OP_AggStep0, 0, regAgg, pF->iMem, - (void*)pF->pFunc, P4_FUNCDEF); + sqlite3VdbeAddOp3(v, OP_AggStep0, 0, regAgg, pF->iMem); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); sqlite3VdbeChangeP5(v, (u8)nArg); sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg); sqlite3ReleaseTempRange(pParse, regAgg, nArg); @@ -119338,7 +120800,7 @@ SQLITE_PRIVATE int sqlite3Select( ** inside the subquery. This can help the subquery to run more efficiently. */ if( (pItem->fg.jointype & JT_OUTER)==0 - && pushDownWhereTerms(db, pSub, p->pWhere, pItem->iCursor) + && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor) ){ #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x100 ){ @@ -119500,7 +120962,9 @@ SQLITE_PRIVATE int sqlite3Select( /* Set the limiter. */ iEnd = sqlite3VdbeMakeLabel(v); - p->nSelectRow = 320; /* 4 billion rows */ + if( (p->selFlags & SF_FixedLimit)==0 ){ + p->nSelectRow = 320; /* 4 billion rows */ + } computeLimitRegisters(pParse, p, iEnd); if( p->iLimit==0 && sSort.addrSortIndex>=0 ){ sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen); @@ -119978,7 +121442,7 @@ SQLITE_PRIVATE int sqlite3Select( ** of output. */ resetAccumulator(pParse, &sAggInfo); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMax,0,flag,0); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMax, 0,flag,0); if( pWInfo==0 ){ sqlite3ExprListDelete(db, pDel); goto select_end; @@ -120067,8 +121531,6 @@ SQLITE_PRIVATE int sqlite3Select( ** if they are not used. */ /* #include "sqliteInt.h" */ -/* #include */ -/* #include */ #ifndef SQLITE_OMIT_GET_TABLE @@ -120561,7 +122023,7 @@ SQLITE_PRIVATE void sqlite3FinishTrigger( z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n); sqlite3NestedParse(pParse, "INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", - db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), zName, + db->aDb[iDb].zDbSName, MASTER_NAME, zName, pTrig->table, z); sqlite3DbFree(db, z); sqlite3ChangeCookie(pParse, iDb); @@ -120812,7 +122274,7 @@ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){ if( (v = sqlite3GetVdbe(pParse))!=0 ){ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE name=%Q AND type='trigger'", - db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), pTrigger->zName + db->aDb[iDb].zDbSName, MASTER_NAME, pTrigger->zName ); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->zName, 0); @@ -121424,14 +122886,14 @@ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc, pCol->affinity, &pValue); if( pValue ){ - sqlite3VdbeChangeP4(v, -1, (const char *)pValue, P4_MEM); + sqlite3VdbeAppendP4(v, pValue, P4_MEM); } + } #ifndef SQLITE_OMIT_FLOATING_POINT - if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){ - sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); - } -#endif + if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){ + sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); } +#endif } /* @@ -121460,7 +122922,7 @@ SQLITE_PRIVATE void sqlite3Update( int iDataCur; /* Cursor for the canonical data btree */ int iIdxCur; /* Cursor for the first index */ sqlite3 *db; /* The database structure */ - int *aRegIdx = 0; /* One register assigned to each index to be updated */ + int *aRegIdx = 0; /* First register in array assigned to each index */ int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the ** an expression for the i-th column of the table. ** aXRef[i]==-1 if the i-th column is not changed. */ @@ -121472,10 +122934,11 @@ SQLITE_PRIVATE void sqlite3Update( AuthContext sContext; /* The authorization context */ NameContext sNC; /* The name-context to resolve expressions in */ int iDb; /* Database containing the table being updated */ - int okOnePass; /* True for one-pass algorithm without the FIFO */ + int eOnePass; /* ONEPASS_XXX value from where.c */ int hasFK; /* True if foreign key processing is required */ int labelBreak; /* Jump here to break out of UPDATE loop */ int labelContinue; /* Jump here to continue next step of UPDATE loop */ + int flags; /* Flags for sqlite3WhereBegin() */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True when updating a view (INSTEAD OF trigger) */ @@ -121486,6 +122949,10 @@ SQLITE_PRIVATE void sqlite3Update( int iEph = 0; /* Ephemeral table holding all primary key values */ int nKey = 0; /* Number of elements in regKey for WITHOUT ROWID */ int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */ + int addrOpen = 0; /* Address of OP_OpenEphemeral */ + int iPk = 0; /* First of nPk cells holding PRIMARY KEY value */ + i16 nPk = 0; /* Number of components of the PRIMARY KEY */ + int bReplace = 0; /* True if REPLACE conflict resolution might happen */ /* Register Allocations */ int regRowCount = 0; /* A count of rows changed */ @@ -121637,12 +123104,19 @@ SQLITE_PRIVATE void sqlite3Update( int reg; if( chngKey || hasFK || pIdx->pPartIdxWhere || pIdx==pPk ){ reg = ++pParse->nMem; + pParse->nMem += pIdx->nColumn; }else{ reg = 0; for(i=0; inKeyCol; i++){ i16 iIdxCol = pIdx->aiColumn[i]; if( iIdxCol<0 || aXRef[iIdxCol]>=0 ){ reg = ++pParse->nMem; + pParse->nMem += pIdx->nColumn; + if( (onError==OE_Replace) + || (onError==OE_Default && pIdx->onError==OE_Replace) + ){ + bReplace = 1; + } break; } } @@ -121650,6 +123124,11 @@ SQLITE_PRIVATE void sqlite3Update( if( reg==0 ) aToOpen[j+1] = 0; aRegIdx[j] = reg; } + if( bReplace ){ + /* If REPLACE conflict resolution might be invoked, open cursors on all + ** indexes in case they are needed to delete records. */ + memset(aToOpen, 1, nIdx+1); + } /* Begin generating code. */ v = sqlite3GetVdbe(pParse); @@ -121702,110 +123181,130 @@ SQLITE_PRIVATE void sqlite3Update( } #endif - /* Begin the database scan - */ + /* Initialize the count of updated rows */ + if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab ){ + regRowCount = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); + } + if( HasRowid(pTab) ){ sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid); - pWInfo = sqlite3WhereBegin( - pParse, pTabList, pWhere, 0, 0, - WHERE_ONEPASS_DESIRED | WHERE_SEEK_TABLE, iIdxCur - ); - if( pWInfo==0 ) goto update_cleanup; - okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); - - /* Remember the rowid of every item to be updated. - */ - sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid); - if( !okOnePass ){ - sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid); - } - - /* End the database scan loop. - */ - sqlite3WhereEnd(pWInfo); }else{ - int iPk; /* First of nPk memory cells holding PRIMARY KEY value */ - i16 nPk; /* Number of components of the PRIMARY KEY */ - int addrOpen; /* Address of the OpenEphemeral instruction */ - assert( pPk!=0 ); nPk = pPk->nKeyCol; iPk = pParse->nMem+1; pParse->nMem += nPk; regKey = ++pParse->nMem; iEph = pParse->nTab++; + sqlite3VdbeAddOp2(v, OP_Null, 0, iPk); addrOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEph, nPk); sqlite3VdbeSetP4KeyInfo(pParse, pPk); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, - WHERE_ONEPASS_DESIRED, iIdxCur); - if( pWInfo==0 ) goto update_cleanup; - okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); + } + + /* Begin the database scan. + ** + ** Do not consider a single-pass strategy for a multi-row update if + ** there are any triggers or foreign keys to process, or rows may + ** be deleted as a result of REPLACE conflict handling. Any of these + ** things might disturb a cursor being used to scan through the table + ** or index, causing a single-pass approach to malfunction. */ + flags = WHERE_ONEPASS_DESIRED|WHERE_SEEK_UNIQ_TABLE; + if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){ + flags |= WHERE_ONEPASS_MULTIROW; + } + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, flags, iIdxCur); + if( pWInfo==0 ) goto update_cleanup; + + /* A one-pass strategy that might update more than one row may not + ** be used if any column of the index used for the scan is being + ** updated. Otherwise, if there is an index on "b", statements like + ** the following could create an infinite loop: + ** + ** UPDATE t1 SET b=b+1 WHERE b>? + ** + ** Fall back to ONEPASS_OFF if where.c has selected a ONEPASS_MULTI + ** strategy that uses an index for which one or more columns are being + ** updated. */ + eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); + if( eOnePass==ONEPASS_MULTI ){ + int iCur = aiCurOnePass[1]; + if( iCur>=0 && iCur!=iDataCur && aToOpen[iCur-iBaseCur] ){ + eOnePass = ONEPASS_OFF; + } + assert( iCur!=iDataCur || !HasRowid(pTab) ); + } + + if( HasRowid(pTab) ){ + /* Read the rowid of the current row of the WHERE scan. In ONEPASS_OFF + ** mode, write the rowid into the FIFO. In either of the one-pass modes, + ** leave it in register regOldRowid. */ + sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid); + if( eOnePass==ONEPASS_OFF ){ + sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid); + } + }else{ + /* Read the PK of the current row into an array of registers. In + ** ONEPASS_OFF mode, serialize the array into a record and store it in + ** the ephemeral table. Or, in ONEPASS_SINGLE or MULTI mode, change + ** the OP_OpenEphemeral instruction to a Noop (the ephemeral table + ** is not required) and leave the PK fields in the array of registers. */ for(i=0; iaiColumn[i]>=0 ); - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, pPk->aiColumn[i], - iPk+i); + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur,pPk->aiColumn[i],iPk+i); } - if( okOnePass ){ + if( eOnePass ){ sqlite3VdbeChangeToNoop(v, addrOpen); nKey = nPk; regKey = iPk; }else{ sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, regKey, sqlite3IndexAffinityStr(db, pPk), nPk); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iEph, regKey); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEph, regKey, iPk, nPk); } - sqlite3WhereEnd(pWInfo); } - /* Initialize the count of updated rows - */ - if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab ){ - regRowCount = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); + if( eOnePass!=ONEPASS_MULTI ){ + sqlite3WhereEnd(pWInfo); } labelBreak = sqlite3VdbeMakeLabel(v); if( !isView ){ - /* - ** Open every index that needs updating. Note that if any - ** index could potentially invoke a REPLACE conflict resolution - ** action, then we need to open all indices because we might need - ** to be deleting some records. - */ - if( onError==OE_Replace ){ - memset(aToOpen, 1, nIdx+1); - }else{ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->onError==OE_Replace ){ - memset(aToOpen, 1, nIdx+1); - break; - } - } - } - if( okOnePass ){ + int addrOnce = 0; + + /* Open every index that needs updating. */ + if( eOnePass!=ONEPASS_OFF ){ if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iBaseCur] = 0; if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iBaseCur] = 0; } + + if( eOnePass==ONEPASS_MULTI && (nIdx-(aiCurOnePass[1]>=0))>0 ){ + addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + } sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur, aToOpen, 0, 0); + if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); } /* Top of the update loop */ - if( okOnePass ){ - if( aToOpen[iDataCur-iBaseCur] && !isView ){ + if( eOnePass!=ONEPASS_OFF ){ + if( !isView && aiCurOnePass[0]!=iDataCur && aiCurOnePass[1]!=iDataCur ){ assert( pPk ); sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey, nKey); VdbeCoverageNeverTaken(v); } - labelContinue = labelBreak; + if( eOnePass==ONEPASS_SINGLE ){ + labelContinue = labelBreak; + }else{ + labelContinue = sqlite3VdbeMakeLabel(v); + } sqlite3VdbeAddOp2(v, OP_IsNull, pPk ? regKey : regOldRowid, labelBreak); VdbeCoverageIf(v, pPk==0); VdbeCoverageIf(v, pPk!=0); }else if( pPk ){ labelContinue = sqlite3VdbeMakeLabel(v); sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v); - addrTop = sqlite3VdbeAddOp2(v, OP_RowKey, iEph, regKey); + addrTop = sqlite3VdbeAddOp2(v, OP_RowData, iEph, regKey); sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue, regKey, 0); VdbeCoverage(v); }else{ @@ -121923,7 +123422,6 @@ SQLITE_PRIVATE void sqlite3Update( if( !isView ){ int addr1 = 0; /* Address of jump instruction */ - int bReplace = 0; /* True if REPLACE conflict resolution might happen */ /* Do constraint checks. */ assert( regOldRowid>0 ); @@ -121959,14 +123457,18 @@ SQLITE_PRIVATE void sqlite3Update( assert( regNew==regNewRowid+1 ); #ifdef SQLITE_ENABLE_PREUPDATE_HOOK sqlite3VdbeAddOp3(v, OP_Delete, iDataCur, - OPFLAG_ISUPDATE | ((hasFK || chngKey || pPk!=0) ? 0 : OPFLAG_ISNOOP), + OPFLAG_ISUPDATE | ((hasFK || chngKey) ? 0 : OPFLAG_ISNOOP), regNewRowid ); + if( eOnePass==ONEPASS_MULTI ){ + assert( hasFK==0 && chngKey==0 ); + sqlite3VdbeChangeP5(v, OPFLAG_SAVEPOSITION); + } if( !pParse->nested ){ - sqlite3VdbeChangeP4(v, -1, (char*)pTab, P4_TABLE); + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); } #else - if( hasFK || chngKey || pPk!=0 ){ + if( hasFK || chngKey ){ sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0); } #endif @@ -121979,8 +123481,11 @@ SQLITE_PRIVATE void sqlite3Update( } /* Insert the new index entries and the new record. */ - sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur, - regNewRowid, aRegIdx, 1, 0, 0); + sqlite3CompleteInsertion( + pParse, pTab, iDataCur, iIdxCur, regNewRowid, aRegIdx, + OPFLAG_ISUPDATE | (eOnePass==ONEPASS_MULTI ? OPFLAG_SAVEPOSITION : 0), + 0, 0 + ); /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to ** handle rows (possibly in other tables) that refer via a foreign key @@ -122002,8 +123507,11 @@ SQLITE_PRIVATE void sqlite3Update( /* Repeat the above with the next record to be updated, until ** all record selected by the WHERE clause have been updated. */ - if( okOnePass ){ + if( eOnePass==ONEPASS_SINGLE ){ /* Nothing to do at end-of-loop for a single-pass */ + }else if( eOnePass==ONEPASS_MULTI ){ + sqlite3VdbeResolveLabel(v, labelContinue); + sqlite3WhereEnd(pWInfo); }else if( pPk ){ sqlite3VdbeResolveLabel(v, labelContinue); sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v); @@ -122012,15 +123520,6 @@ SQLITE_PRIVATE void sqlite3Update( } sqlite3VdbeResolveLabel(v, labelBreak); - /* Close all tables */ - for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ - assert( aRegIdx ); - if( aToOpen[i+1] ){ - sqlite3VdbeAddOp2(v, OP_Close, iIdxCur+i, 0); - } - } - if( iDataCurzName = zCopy; + pMod->pModule = pModule; + pMod->pAux = pAux; + pMod->xDestroy = xDestroy; + pMod->pEpoTab = 0; + pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod); + assert( pDel==0 || pDel==pMod ); + if( pDel ){ + sqlite3OomFault(db); + sqlite3DbFree(db, pDel); + pMod = 0; + } + } + return pMod; +} + /* ** The actual function that does the work of creating a new module. ** This function implements the sqlite3_create_module() and @@ -122583,35 +124117,15 @@ static int createModule( void (*xDestroy)(void *) /* Module destructor function */ ){ int rc = SQLITE_OK; - int nName; sqlite3_mutex_enter(db->mutex); - nName = sqlite3Strlen30(zName); if( sqlite3HashFind(&db->aModule, zName) ){ rc = SQLITE_MISUSE_BKPT; }else{ - Module *pMod; - pMod = (Module *)sqlite3DbMallocRawNN(db, sizeof(Module) + nName + 1); - if( pMod ){ - Module *pDel; - char *zCopy = (char *)(&pMod[1]); - memcpy(zCopy, zName, nName+1); - pMod->zName = zCopy; - pMod->pModule = pModule; - pMod->pAux = pAux; - pMod->xDestroy = xDestroy; - pMod->pEpoTab = 0; - pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod); - assert( pDel==0 || pDel==pMod ); - if( pDel ){ - sqlite3OomFault(db); - sqlite3DbFree(db, pDel); - } - } + (void)sqlite3VtabCreateModule(db, zName, pModule, pAux, xDestroy); } rc = sqlite3ApiExit(db, rc); if( rc!=SQLITE_OK && xDestroy ) xDestroy(pAux); - sqlite3_mutex_leave(db->mutex); return rc; } @@ -122950,7 +124464,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ "UPDATE %Q.%s " "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " "WHERE rowid=#%d", - db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), + db->aDb[iDb].zDbSName, MASTER_NAME, pTab->zName, pTab->zName, zStmt, @@ -123675,7 +125189,7 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ return 0; } pMod->pEpoTab = pTab; - pTab->nRef = 1; + pTab->nTabRef = 1; pTab->pSchema = db->aDb[0].pSchema; pTab->tabFlags |= TF_Virtual; pTab->nModuleArg = 0; @@ -124772,6 +126286,7 @@ static int codeEqualityTerm( }else{ Select *pSelect = pX->x.pSelect; sqlite3 *db = pParse->db; + u16 savedDbOptFlags = db->dbOptFlags; ExprList *pOrigRhs = pSelect->pEList; ExprList *pOrigLhs = pX->pLeft->x.pList; ExprList *pRhs = 0; /* New Select.pEList for RHS */ @@ -124815,7 +126330,9 @@ static int codeEqualityTerm( testcase( aiMap==0 ); } pSelect->pEList = pRhs; + db->dbOptFlags |= SQLITE_QueryFlattener; eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap); + db->dbOptFlags = savedDbOptFlags; testcase( aiMap!=0 && aiMap[0]!=0 ); pSelect->pEList = pOrigRhs; pLeft->x.pList = pOrigLhs; @@ -125465,7 +126982,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1); sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg, pLoop->u.vtab.idxStr, - pLoop->u.vtab.needFree ? P4_MPRINTF : P4_STATIC); + pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC); VdbeCoverage(v); pLoop->u.vtab.needFree = 0; pLevel->p1 = iCur; @@ -125498,7 +127015,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( /* Generate code that will continue to the next row if ** the IN constraint is not satisfied */ - pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0, 0); + pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0); assert( pCompare!=0 || db->mallocFailed ); if( pCompare ){ pCompare->pLeft = pTerm->pExpr->pLeft; @@ -125914,7 +127431,10 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( omitTable ){ /* pIdx is a covering index. No need to access the main table. */ }else if( HasRowid(pIdx->pTable) ){ - if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE)!=0 ){ + if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE) || ( + (pWInfo->wctrlFlags & WHERE_SEEK_UNIQ_TABLE) + && (pWInfo->eOnePass==ONEPASS_SINGLE) + )){ iRowidReg = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg); sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); @@ -126097,7 +127617,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pAndExpr = sqlite3ExprAnd(db, pAndExpr, pExpr); } if( pAndExpr ){ - pAndExpr = sqlite3PExpr(pParse, TK_AND|TKFLG_DONTFOLD, 0, pAndExpr, 0); + pAndExpr = sqlite3PExpr(pParse, TK_AND|TKFLG_DONTFOLD, 0, pAndExpr); } } @@ -126170,7 +127690,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } if( iSet>=0 ){ sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid); - sqlite3VdbeAddOp3(v, OP_IdxInsert, regRowset, regRowid, 0); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, regRowset, regRowid, + r, nPk); if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); } @@ -127098,7 +128619,7 @@ static void exprAnalyzeOrTerm( } assert( pLeft!=0 ); pDup = sqlite3ExprDup(db, pLeft, 0); - pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0, 0); + pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0); if( pNew ){ int idxNew; transferJoinMarkings(pNew, pExpr); @@ -127277,6 +128798,7 @@ static void exprAnalyze( Parse *pParse = pWInfo->pParse; /* Parsing context */ sqlite3 *db = pParse->db; /* Database connection */ unsigned char eOp2; /* op2 value for LIKE/REGEXP/GLOB */ + int nLeft; /* Number of elements on left side vector */ if( db->mallocFailed ){ return; @@ -127306,6 +128828,10 @@ static void exprAnalyze( prereqAll |= x; extraRight = x-1; /* ON clause terms may not be used with an index ** on left table of a LEFT JOIN. Ticket #3015 */ + if( (prereqAll>>1)>=x ){ + sqlite3ErrorMsg(pParse, "ON clause references tables to its right"); + return; + } } pTerm->prereqAll = prereqAll; pTerm->leftCursor = -1; @@ -127396,7 +128922,7 @@ static void exprAnalyze( int idxNew; pNewExpr = sqlite3PExpr(pParse, ops[i], sqlite3ExprDup(db, pExpr->pLeft, 0), - sqlite3ExprDup(db, pList->a[i].pExpr, 0), 0); + sqlite3ExprDup(db, pList->a[i].pExpr, 0)); transferJoinMarkings(pNewExpr, pExpr); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); @@ -127481,7 +129007,7 @@ static void exprAnalyze( pNewExpr1 = sqlite3ExprDup(db, pLeft, 0); pNewExpr1 = sqlite3PExpr(pParse, TK_GE, sqlite3ExprAddCollateString(pParse,pNewExpr1,zCollSeqName), - pStr1, 0); + pStr1); transferJoinMarkings(pNewExpr1, pExpr); idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags); testcase( idxNew1==0 ); @@ -127489,7 +129015,7 @@ static void exprAnalyze( pNewExpr2 = sqlite3ExprDup(db, pLeft, 0); pNewExpr2 = sqlite3PExpr(pParse, TK_LT, sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName), - pStr2, 0); + pStr2); transferJoinMarkings(pNewExpr2, pExpr); idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags); testcase( idxNew2==0 ); @@ -127522,7 +129048,7 @@ static void exprAnalyze( if( (prereqExpr & prereqColumn)==0 ){ Expr *pNewExpr; pNewExpr = sqlite3PExpr(pParse, TK_MATCH, - 0, sqlite3ExprDup(db, pRight, 0), 0); + 0, sqlite3ExprDup(db, pRight, 0)); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); pNewTerm = &pWC->a[idxNew]; @@ -127548,20 +129074,19 @@ static void exprAnalyze( ** is not a sub-select. */ if( pWC->op==TK_AND && (pExpr->op==TK_EQ || pExpr->op==TK_IS) - && sqlite3ExprIsVector(pExpr->pLeft) + && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1 + && sqlite3ExprVectorSize(pExpr->pRight)==nLeft && ( (pExpr->pLeft->flags & EP_xIsSelect)==0 - || (pExpr->pRight->flags & EP_xIsSelect)==0 - )){ - int nLeft = sqlite3ExprVectorSize(pExpr->pLeft); + || (pExpr->pRight->flags & EP_xIsSelect)==0) + ){ int i; - assert( nLeft==sqlite3ExprVectorSize(pExpr->pRight) ); for(i=0; ipLeft, i); Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i); - pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight, 0); + pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight); transferJoinMarkings(pNew, pExpr); idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC); exprAnalyze(pSrc, pWC, idxNew); @@ -127613,7 +129138,7 @@ static void exprAnalyze( pNewExpr = sqlite3PExpr(pParse, TK_GT, sqlite3ExprDup(db, pLeft, 0), - sqlite3ExprAlloc(db, TK_NULL, 0, 0), 0); + sqlite3ExprAlloc(db, TK_NULL, 0, 0)); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); @@ -127634,6 +129159,8 @@ static void exprAnalyze( /* Prevent ON clause terms of a LEFT JOIN from being used to drive ** an index for tables to the left of the join. */ + testcase( pTerm!=&pWC->a[idxTerm] ); + pTerm = &pWC->a[idxTerm]; pTerm->prereqRight |= extraRight; } @@ -127797,7 +129324,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( pColRef->iColumn = k++; pColRef->pTab = pTab; pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, - sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0); + sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0)); whereClauseInsert(pWC, pTerm, TERM_DYNAMIC); } } @@ -128004,11 +129531,13 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ WhereTerm *pTerm; /* The term being tested */ int k = pScan->k; /* Where to start scanning */ - while( pScan->iEquiv<=pScan->nEquiv ){ - iCur = pScan->aiCur[pScan->iEquiv-1]; + assert( pScan->iEquiv<=pScan->nEquiv ); + pWC = pScan->pWC; + while(1){ iColumn = pScan->aiColumn[pScan->iEquiv-1]; - if( iColumn==XN_EXPR && pScan->pIdxExpr==0 ) return 0; - while( (pWC = pScan->pWC)!=0 ){ + iCur = pScan->aiCur[pScan->iEquiv-1]; + assert( pWC!=0 ); + do{ for(pTerm=pWC->a+k; knTerm; k++, pTerm++){ if( pTerm->leftCursor==iCur && pTerm->u.leftColumn==iColumn @@ -128058,15 +129587,17 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ testcase( pTerm->eOperator & WO_IS ); continue; } + pScan->pWC = pWC; pScan->k = k+1; return pTerm; } } } - pScan->pWC = pScan->pWC->pOuter; + pWC = pWC->pOuter; k = 0; - } - pScan->pWC = pScan->pOrigWC; + }while( pWC!=0 ); + if( pScan->iEquiv>=pScan->nEquiv ) break; + pWC = pScan->pOrigWC; k = 0; pScan->iEquiv++; } @@ -128100,24 +129631,25 @@ static WhereTerm *whereScanInit( u32 opMask, /* Operator(s) to scan for */ Index *pIdx /* Must be compatible with this index */ ){ - int j = 0; - - /* memset(pScan, 0, sizeof(*pScan)); */ pScan->pOrigWC = pWC; pScan->pWC = pWC; pScan->pIdxExpr = 0; + pScan->idxaff = 0; + pScan->zCollName = 0; if( pIdx ){ - j = iColumn; + int j = iColumn; iColumn = pIdx->aiColumn[j]; - if( iColumn==XN_EXPR ) pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr; - if( iColumn==pIdx->pTable->iPKey ) iColumn = XN_ROWID; - } - if( pIdx && iColumn>=0 ){ - pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity; - pScan->zCollName = pIdx->azColl[j]; - }else{ - pScan->idxaff = 0; - pScan->zCollName = 0; + if( iColumn==XN_EXPR ){ + pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr; + pScan->zCollName = pIdx->azColl[j]; + }else if( iColumn==pIdx->pTable->iPKey ){ + iColumn = XN_ROWID; + }else if( iColumn>=0 ){ + pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity; + pScan->zCollName = pIdx->azColl[j]; + } + }else if( iColumn==XN_EXPR ){ + return 0; } pScan->opMask = opMask; pScan->k = 0; @@ -132713,27 +134245,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ continue; } - /* Close all of the cursors that were opened by sqlite3WhereBegin. - ** Except, do not close cursors that will be reused by the OR optimization - ** (WHERE_OR_SUBCLAUSE). And do not close the OP_OpenWrite cursors - ** created for the ONEPASS optimization. - */ - if( (pTab->tabFlags & TF_Ephemeral)==0 - && pTab->pSelect==0 - && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 - ){ - int ws = pLoop->wsFlags; - if( pWInfo->eOnePass==ONEPASS_OFF && (ws & WHERE_IDX_ONLY)==0 ){ - sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor); - } - if( (ws & WHERE_INDEXED)!=0 - && (ws & (WHERE_IPK|WHERE_AUTO_INDEX))==0 - && pLevel->iIdxCur!=pWInfo->aiCurOnePass[1] - ){ - sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur); - } - } - /* If this scan uses an index, make VDBE code substitutions to read data ** from the index instead of from the table where possible. In some cases ** this optimization prevents the table from ever being read, which can @@ -132772,7 +134283,8 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ pOp->p2 = x; pOp->p1 = pLevel->iIdxCur; } - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0 ); + assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0 + || pWInfo->eOnePass ); }else if( pOp->opcode==OP_Rowid ){ pOp->p1 = pLevel->iIdxCur; pOp->opcode = OP_IdxRowid; @@ -132836,6 +134348,19 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ */ #define YYPARSEFREENEVERNULL 1 +/* +** In the amalgamation, the parse.c file generated by lemon and the +** tokenize.c file are concatenated. In that case, sqlite3RunParser() +** has access to the the size of the yyParser object and so the parser +** engine can be allocated from stack. In that case, only the +** sqlite3ParserInit() and sqlite3ParserFinalize() routines are invoked +** and the sqlite3ParserAlloc() and sqlite3ParserFree() routines can be +** omitted. +*/ +#ifdef SQLITE_AMALGAMATION +# define sqlite3Parser_ENGINEALWAYSONSTACK 1 +#endif + /* ** Alternative datatype for the argument to the malloc() routine passed ** into sqlite3ParserAlloc(). The default is size_t. @@ -132939,7 +134464,7 @@ static void disableLookaside(Parse *pParse){ ExprSpan *pLeft, /* The left operand, and output */ ExprSpan *pRight /* The right operand */ ){ - pLeft->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr, 0); + pLeft->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr); pLeft->zEnd = pRight->zEnd; } @@ -132948,7 +134473,7 @@ static void disableLookaside(Parse *pParse){ */ static void exprNot(Parse *pParse, int doNot, ExprSpan *pSpan){ if( doNot ){ - pSpan->pExpr = sqlite3PExpr(pParse, TK_NOT, pSpan->pExpr, 0, 0); + pSpan->pExpr = sqlite3PExpr(pParse, TK_NOT, pSpan->pExpr, 0); } } @@ -132960,7 +134485,7 @@ static void disableLookaside(Parse *pParse){ ExprSpan *pOperand, /* The operand, and output */ Token *pPostOp /* The operand token for setting the span */ ){ - pOperand->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); + pOperand->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0); pOperand->zEnd = &pPostOp->z[pPostOp->n]; } @@ -132985,7 +134510,7 @@ static void disableLookaside(Parse *pParse){ Token *pPreOp /* The operand token for setting the span */ ){ pOut->zStart = pPreOp->z; - pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); + pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0); pOut->zEnd = pOperand->zEnd; } @@ -134284,6 +135809,31 @@ static int yyGrowStack(yyParser *p){ # define YYMALLOCARGTYPE size_t #endif +/* Initialize a new parser that has already been allocated. +*/ +SQLITE_PRIVATE void sqlite3ParserInit(void *yypParser){ + yyParser *pParser = (yyParser*)yypParser; +#ifdef YYTRACKMAXSTACKDEPTH + pParser->yyhwm = 0; +#endif +#if YYSTACKDEPTH<=0 + pParser->yytos = NULL; + pParser->yystack = NULL; + pParser->yystksz = 0; + if( yyGrowStack(pParser) ){ + pParser->yystack = &pParser->yystk0; + pParser->yystksz = 1; + } +#endif +#ifndef YYNOERRORRECOVERY + pParser->yyerrcnt = -1; +#endif + pParser->yytos = pParser->yystack; + pParser->yystack[0].stateno = 0; + pParser->yystack[0].major = 0; +} + +#ifndef sqlite3Parser_ENGINEALWAYSONSTACK /* ** This function allocates a new parser. ** The only argument is a pointer to a function which works like @@ -134299,28 +135849,11 @@ static int yyGrowStack(yyParser *p){ SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ yyParser *pParser; pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( pParser ){ -#ifdef YYTRACKMAXSTACKDEPTH - pParser->yyhwm = 0; -#endif -#if YYSTACKDEPTH<=0 - pParser->yytos = NULL; - pParser->yystack = NULL; - pParser->yystksz = 0; - if( yyGrowStack(pParser) ){ - pParser->yystack = &pParser->yystk0; - pParser->yystksz = 1; - } -#endif -#ifndef YYNOERRORRECOVERY - pParser->yyerrcnt = -1; -#endif - pParser->yytos = pParser->yystack; - pParser->yystack[0].stateno = 0; - pParser->yystack[0].major = 0; - } + if( pParser ) sqlite3ParserInit(pParser); return pParser; } +#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */ + /* The following function deletes the "minor type" or semantic value ** associated with a symbol. The symbol can be either a terminal @@ -134446,6 +135979,18 @@ static void yy_pop_parser_stack(yyParser *pParser){ yy_destructor(pParser, yytos->major, &yytos->minor); } +/* +** Clear all secondary memory allocations from the parser +*/ +SQLITE_PRIVATE void sqlite3ParserFinalize(void *p){ + yyParser *pParser = (yyParser*)p; + while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); +#if YYSTACKDEPTH<=0 + if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); +#endif +} + +#ifndef sqlite3Parser_ENGINEALWAYSONSTACK /* ** Deallocate and destroy a parser. Destructors are called for ** all stack elements before shutting the parser down. @@ -134458,16 +136003,13 @@ SQLITE_PRIVATE void sqlite3ParserFree( void *p, /* The parser to be deleted */ void (*freeProc)(void*) /* Function used to reclaim memory */ ){ - yyParser *pParser = (yyParser*)p; #ifndef YYPARSEFREENEVERNULL - if( pParser==0 ) return; -#endif - while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); + if( p==0 ) return; #endif - (*freeProc)((void*)pParser); + sqlite3ParserFinalize(p); + (*freeProc)(p); } +#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */ /* ** Return the peak depth of the stack for a parser. @@ -134578,7 +136120,6 @@ static int yy_find_reduce_action( */ static void yyStackOverflow(yyParser *yypParser){ sqlite3ParserARG_FETCH; - yypParser->yytos--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt); @@ -134633,12 +136174,14 @@ static void yy_shift( #endif #if YYSTACKDEPTH>0 if( yypParser->yytos>=&yypParser->yystack[YYSTACKDEPTH] ){ + yypParser->yytos--; yyStackOverflow(yypParser); return; } #else if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ if( yyGrowStack(yypParser) ){ + yypParser->yytos--; yyStackOverflow(yypParser); return; } @@ -135180,7 +136723,7 @@ static void yy_reduce( case 33: /* ccons ::= DEFAULT MINUS term */ { ExprSpan v; - v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy190.pExpr, 0, 0); + v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy190.pExpr, 0); v.zStart = yymsp[-1].minor.yy0.z; v.zEnd = yymsp[0].minor.yy190.zEnd; sqlite3AddDefaultValue(pParse,&v); @@ -135444,9 +136987,9 @@ static void yy_reduce( break; case 94: /* selcollist ::= sclp nm DOT STAR */ { - Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0, 0); - Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); - Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); + Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); + Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); + Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, pDot); } break; @@ -135672,7 +137215,7 @@ static void yy_reduce( Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0); + yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } break; case 155: /* expr ::= nm DOT nm DOT nm */ @@ -135680,9 +137223,9 @@ static void yy_reduce( Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1); Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); Expr *temp3 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); - Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0); + Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3); spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } break; case 158: /* term ::= INTEGER */ @@ -135711,7 +137254,7 @@ static void yy_reduce( sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); yymsp[0].minor.yy190.pExpr = 0; }else{ - yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, 0); + yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); if( yymsp[0].minor.yy190.pExpr ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy190.pExpr->iTable); } } @@ -135726,7 +137269,8 @@ static void yy_reduce( case 161: /* expr ::= CAST LP expr AS typetoken RP */ { spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy190.pExpr, 0, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy190.pExpr, yymsp[-3].minor.yy190.pExpr, 0); } break; case 162: /* expr ::= ID|INDEXED LP distinct exprlist RP */ @@ -135759,7 +137303,7 @@ static void yy_reduce( case 165: /* expr ::= LP nexprlist COMMA expr RP */ { ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy148, yymsp[-1].minor.yy190.pExpr); - yylhsminor.yy190.pExpr = sqlite3PExpr(pParse, TK_VECTOR, 0, 0, 0); + yylhsminor.yy190.pExpr = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); if( yylhsminor.yy190.pExpr ){ yylhsminor.yy190.pExpr->x.pList = pList; spanSet(&yylhsminor.yy190, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0); @@ -135848,7 +137392,7 @@ static void yy_reduce( { ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr); pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr); - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy190.pExpr, 0, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy190.pExpr, 0); if( yymsp[-4].minor.yy190.pExpr ){ yymsp[-4].minor.yy190.pExpr->x.pList = pList; }else{ @@ -135870,7 +137414,7 @@ static void yy_reduce( ** regardless of the value of expr1. */ sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy190.pExpr); - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy194]); + yymsp[-4].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_INTEGER,&sqlite3IntTokens[yymsp[-3].minor.yy194],1); }else if( yymsp[-1].minor.yy148->nExpr==1 ){ /* Expressions of the form: ** @@ -135897,9 +137441,9 @@ static void yy_reduce( pRHS->flags &= ~EP_Collate; pRHS->flags |= EP_Generic; } - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy194 ? TK_NE : TK_EQ, yymsp[-4].minor.yy190.pExpr, pRHS, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy194 ? TK_NE : TK_EQ, yymsp[-4].minor.yy190.pExpr, pRHS); }else{ - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0); if( yymsp[-4].minor.yy190.pExpr ){ yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy148; sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr); @@ -135914,13 +137458,13 @@ static void yy_reduce( case 192: /* expr ::= LP select RP */ { spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ - yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0); + yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0); sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy190.pExpr, yymsp[-1].minor.yy243); } break; case 193: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0); sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, yymsp[-1].minor.yy243); exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; @@ -135931,7 +137475,7 @@ static void yy_reduce( SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); if( yymsp[0].minor.yy148 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy148); - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0); sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, pSelect); exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); yymsp[-4].minor.yy190.zEnd = yymsp[-1].minor.yy0.z ? &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n] : &yymsp[-2].minor.yy0.z[yymsp[-2].minor.yy0.n]; @@ -135941,14 +137485,14 @@ static void yy_reduce( { Expr *p; spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ - p = yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0); + p = yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy243); } break; case 196: /* expr ::= CASE case_operand case_exprlist case_else END */ { spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-C*/ - yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy72, 0, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy72, 0); if( yymsp[-4].minor.yy190.pExpr ){ yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy72 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy148,yymsp[-1].minor.yy72) : yymsp[-2].minor.yy148; sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr); @@ -136122,7 +137666,7 @@ static void yy_reduce( case 247: /* expr ::= RAISE LP IGNORE RP */ { spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0); + yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0); if( yymsp[-3].minor.yy190.pExpr ){ yymsp[-3].minor.yy190.pExpr->affinity = OE_Ignore; } @@ -136131,7 +137675,7 @@ static void yy_reduce( case 248: /* expr ::= RAISE LP raisetype COMMA nm RP */ { spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); if( yymsp[-5].minor.yy190.pExpr ) { yymsp[-5].minor.yy190.pExpr->affinity = (char)yymsp[-3].minor.yy194; } @@ -136630,13 +138174,13 @@ static const unsigned char aiClass[] = { /* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, /* 2x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, /* 3x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 12, 17, 20, 10, +/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 26, 12, 17, 20, 10, /* 5x */ 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 15, 4, 21, 18, 19, 27, -/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 7, +/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 6, /* 7x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 8, 5, 5, 5, 8, 14, 8, /* 8x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, /* 9x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* 9x */ 25, 1, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, +/* Ax */ 27, 25, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, /* Bx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 27, 27, 27, 27, 27, /* Cx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, /* Dx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, @@ -137319,6 +138863,9 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr int lastTokenParsed = -1; /* type of the previous token */ sqlite3 *db = pParse->db; /* The database connection */ int mxSqlLen; /* Max length of an SQL string */ +#ifdef sqlite3Parser_ENGINEALWAYSONSTACK + unsigned char zSpace[sizeof(yyParser)]; /* Space for parser engine object */ +#endif assert( zSql!=0 ); mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; @@ -137330,16 +138877,20 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr i = 0; assert( pzErrMsg!=0 ); /* sqlite3ParserTrace(stdout, "parser: "); */ +#ifdef sqlite3Parser_ENGINEALWAYSONSTACK + pEngine = zSpace; + sqlite3ParserInit(pEngine); +#else pEngine = sqlite3ParserAlloc(sqlite3Malloc); if( pEngine==0 ){ sqlite3OomFault(db); return SQLITE_NOMEM_BKPT; } +#endif assert( pParse->pNewTable==0 ); assert( pParse->pNewTrigger==0 ); assert( pParse->nVar==0 ); - assert( pParse->nzVar==0 ); - assert( pParse->azVar==0 ); + assert( pParse->pVList==0 ); while( 1 ){ assert( i>=0 ); if( zSql[i]!=0 ){ @@ -137387,7 +138938,11 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr ); sqlite3_mutex_leave(sqlite3MallocMutex()); #endif /* YYDEBUG */ +#ifdef sqlite3Parser_ENGINEALWAYSONSTACK + sqlite3ParserFinalize(pEngine); +#else sqlite3ParserFree(pEngine, sqlite3_free); +#endif if( db->mallocFailed ){ pParse->rc = SQLITE_NOMEM_BKPT; } @@ -137426,8 +138981,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree); sqlite3DeleteTrigger(db, pParse->pNewTrigger); - for(i=pParse->nzVar-1; i>=0; i--) sqlite3DbFree(db, pParse->azVar[i]); - sqlite3DbFree(db, pParse->azVar); + sqlite3DbFree(db, pParse->pVList); while( pParse->pAinc ){ AutoincInfo *p = pParse->pAinc; pParse->pAinc = p->pNext; @@ -138640,6 +140194,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, { SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, SQLITE_Fts3Tokenizer }, { SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_LoadExtension }, + { SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE, SQLITE_NoCkptOnClose }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ @@ -139397,7 +140952,7 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ */ SQLITE_API void sqlite3_interrupt(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) ){ + if( !sqlite3SafetyCheckOk(db) && (db==0 || db->magic!=SQLITE_MAGIC_ZOMBIE) ){ (void)SQLITE_MISUSE_BKPT; return; } @@ -139936,6 +141491,13 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( sqlite3Error(db, rc); } rc = sqlite3ApiExit(db, rc); + + /* If there are no active statements, clear the interrupt flag at this + ** point. */ + if( db->nVdbeActive==0 ){ + db->u1.isInterrupted = 0; + } + sqlite3_mutex_leave(db->mutex); return rc; #endif @@ -140438,6 +142000,7 @@ SQLITE_PRIVATE int sqlite3ParseUri( assert( octet>=0 && octet<256 ); if( octet==0 ){ +#ifndef SQLITE_ENABLE_URI_00_ERROR /* This branch is taken when "%00" appears within the URI. In this ** case we ignore all text in the remainder of the path, name or ** value currently being parsed. So ignore the current character @@ -140450,6 +142013,12 @@ SQLITE_PRIVATE int sqlite3ParseUri( iIn++; } continue; +#else + /* If ENABLE_URI_00_ERROR is defined, "%00" in a URI is an error. */ + *pzErrMsg = sqlite3_mprintf("unexpected %%00 in uri"); + rc = SQLITE_ERROR; + goto parse_uri_out; +#endif } c = octet; }else if( eState==1 && (c=='&' || c=='=') ){ @@ -140554,7 +142123,9 @@ SQLITE_PRIVATE int sqlite3ParseUri( }else{ zFile = sqlite3_malloc64(nUri+2); if( !zFile ) return SQLITE_NOMEM_BKPT; - memcpy(zFile, zUri, nUri); + if( nUri ){ + memcpy(zFile, zUri, nUri); + } zFile[nUri] = '\0'; zFile[nUri+1] = '\0'; flags &= ~SQLITE_OPEN_URI; @@ -141348,7 +142919,7 @@ SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, vo */ SQLITE_API int sqlite3_test_control(int op, ...){ int rc = 0; -#ifdef SQLITE_OMIT_BUILTIN_TEST +#ifdef SQLITE_UNTESTABLE UNUSED_PARAMETER(op); #else va_list ap; @@ -141685,7 +143256,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){ } } va_end(ap); -#endif /* SQLITE_OMIT_BUILTIN_TEST */ +#endif /* SQLITE_UNTESTABLE */ return rc; } @@ -141741,15 +143312,8 @@ SQLITE_API sqlite3_int64 sqlite3_uri_int64( ** Return the Btree pointer identified by zDbName. Return NULL if not found. */ SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){ - int i; - for(i=0; inDb; i++){ - if( db->aDb[i].pBt - && (zDbName==0 || sqlite3StrICmp(zDbName, db->aDb[i].zDbSName)==0) - ){ - return db->aDb[i].pBt; - } - } - return 0; + int iDb = zDbName ? sqlite3FindDbName(db, zDbName) : 0; + return iDb<0 ? 0 : db->aDb[iDb].pBt; } /* @@ -141796,7 +143360,6 @@ SQLITE_API int sqlite3_snapshot_get( ){ int rc = SQLITE_ERROR; #ifndef SQLITE_OMIT_WAL - int iDb; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ @@ -141805,13 +143368,15 @@ SQLITE_API int sqlite3_snapshot_get( #endif sqlite3_mutex_enter(db->mutex); - iDb = sqlite3FindDbName(db, zDb); - if( iDb==0 || iDb>1 ){ - Btree *pBt = db->aDb[iDb].pBt; - if( 0==sqlite3BtreeIsInTrans(pBt) ){ - rc = sqlite3BtreeBeginTrans(pBt, 0); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot); + if( db->autoCommit==0 ){ + int iDb = sqlite3FindDbName(db, zDb); + if( iDb==0 || iDb>1 ){ + Btree *pBt = db->aDb[iDb].pBt; + if( 0==sqlite3BtreeIsInTrans(pBt) ){ + rc = sqlite3BtreeBeginTrans(pBt, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot); + } } } } @@ -141858,6 +143423,38 @@ SQLITE_API int sqlite3_snapshot_open( return rc; } +/* +** Recover as many snapshots as possible from the wal file associated with +** schema zDb of database db. +*/ +SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){ + int rc = SQLITE_ERROR; + int iDb; +#ifndef SQLITE_OMIT_WAL + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } +#endif + + sqlite3_mutex_enter(db->mutex); + iDb = sqlite3FindDbName(db, zDb); + if( iDb==0 || iDb>1 ){ + Btree *pBt = db->aDb[iDb].pBt; + if( 0==sqlite3BtreeIsInReadTrans(pBt) ){ + rc = sqlite3BtreeBeginTrans(pBt, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerSnapshotRecover(sqlite3BtreePager(pBt)); + sqlite3BtreeCommit(pBt); + } + } + } + sqlite3_mutex_leave(db->mutex); +#endif /* SQLITE_OMIT_WAL */ + return rc; +} + /* ** Free a snapshot handle obtained from sqlite3_snapshot_get(). */ @@ -143008,6 +144605,7 @@ struct Fts3Table { ** statements is run and reset within a single virtual table API call. */ sqlite3_stmt *aStmt[40]; + sqlite3_stmt *pSeekStmt; /* Cache for fts3CursorSeekStmt() */ char *zReadExprlist; char *zWriteExprlist; @@ -143077,6 +144675,7 @@ struct Fts3Cursor { i16 eSearch; /* Search strategy (see below) */ u8 isEof; /* True if at End Of Results */ u8 isRequireSeek; /* True if must seek pStmt to %_content row */ + u8 bSeekStmt; /* True if pStmt is a seek */ sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ Fts3Expr *pExpr; /* Parsed MATCH query string */ int iLangid; /* Language being queried for */ @@ -143599,6 +145198,7 @@ static int fts3DisconnectMethod(sqlite3_vtab *pVtab){ assert( p->pSegments==0 ); /* Free any prepared statements held */ + sqlite3_finalize(p->pSeekStmt); for(i=0; iaStmt); i++){ sqlite3_finalize(p->aStmt[i]); } @@ -144470,9 +146070,9 @@ static int fts3InitVtab( p->pTokenizer = pTokenizer; p->nMaxPendingData = FTS3_MAX_PENDING_DATA; p->bHasDocsize = (isFts4 && bNoDocsize==0); - p->bHasStat = isFts4; - p->bFts4 = isFts4; - p->bDescIdx = bDescIdx; + p->bHasStat = (u8)isFts4; + p->bFts4 = (u8)isFts4; + p->bDescIdx = (u8)bDescIdx; p->nAutoincrmerge = 0xff; /* 0xff means setting unknown */ p->zContentTbl = zContent; p->zLanguageid = zLanguageid; @@ -144787,6 +146387,26 @@ static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ return SQLITE_OK; } +/* +** Finalize the statement handle at pCsr->pStmt. +** +** Or, if that statement handle is one created by fts3CursorSeekStmt(), +** and the Fts3Table.pSeekStmt slot is currently NULL, save the statement +** pointer there instead of finalizing it. +*/ +static void fts3CursorFinalizeStmt(Fts3Cursor *pCsr){ + if( pCsr->bSeekStmt ){ + Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; + if( p->pSeekStmt==0 ){ + p->pSeekStmt = pCsr->pStmt; + sqlite3_reset(pCsr->pStmt); + pCsr->pStmt = 0; + } + pCsr->bSeekStmt = 0; + } + sqlite3_finalize(pCsr->pStmt); +} + /* ** Close the cursor. For additional information see the documentation ** on the xClose method of the virtual table interface. @@ -144794,7 +146414,7 @@ static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); - sqlite3_finalize(pCsr->pStmt); + fts3CursorFinalizeStmt(pCsr); sqlite3Fts3ExprFree(pCsr->pExpr); sqlite3Fts3FreeDeferredTokens(pCsr); sqlite3_free(pCsr->aDoclist); @@ -144812,20 +146432,23 @@ static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){ ** ** (or the equivalent for a content=xxx table) and set pCsr->pStmt to ** it. If an error occurs, return an SQLite error code. -** -** Otherwise, set *ppStmt to point to pCsr->pStmt and return SQLITE_OK. */ -static int fts3CursorSeekStmt(Fts3Cursor *pCsr, sqlite3_stmt **ppStmt){ +static int fts3CursorSeekStmt(Fts3Cursor *pCsr){ int rc = SQLITE_OK; if( pCsr->pStmt==0 ){ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; char *zSql; - zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist); - if( !zSql ) return SQLITE_NOMEM; - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); - sqlite3_free(zSql); + if( p->pSeekStmt ){ + pCsr->pStmt = p->pSeekStmt; + p->pSeekStmt = 0; + }else{ + zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist); + if( !zSql ) return SQLITE_NOMEM; + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); + sqlite3_free(zSql); + } + if( rc==SQLITE_OK ) pCsr->bSeekStmt = 1; } - *ppStmt = pCsr->pStmt; return rc; } @@ -144837,9 +146460,7 @@ static int fts3CursorSeekStmt(Fts3Cursor *pCsr, sqlite3_stmt **ppStmt){ static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){ int rc = SQLITE_OK; if( pCsr->isRequireSeek ){ - sqlite3_stmt *pStmt = 0; - - rc = fts3CursorSeekStmt(pCsr, &pStmt); + rc = fts3CursorSeekStmt(pCsr); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iPrevId); pCsr->isRequireSeek = 0; @@ -146297,7 +147918,7 @@ static int fts3FilterMethod( assert( iIdx==nVal ); /* In case the cursor has been used before, clear it now. */ - sqlite3_finalize(pCsr->pStmt); + fts3CursorFinalizeStmt(pCsr); sqlite3_free(pCsr->aDoclist); sqlite3Fts3MIBufferFree(pCsr->pMIBuffer); sqlite3Fts3ExprFree(pCsr->pExpr); @@ -146365,7 +147986,7 @@ static int fts3FilterMethod( rc = SQLITE_NOMEM; } }else if( eSearch==FTS3_DOCID_SEARCH ){ - rc = fts3CursorSeekStmt(pCsr, &pCsr->pStmt); + rc = fts3CursorSeekStmt(pCsr); if( rc==SQLITE_OK ){ rc = sqlite3_bind_value(pCsr->pStmt, 1, pCons); } @@ -146529,7 +148150,7 @@ static int fts3SetHasStat(Fts3Table *p){ if( rc==SQLITE_OK ){ int bHasStat = (sqlite3_step(pStmt)==SQLITE_ROW); rc = sqlite3_finalize(pStmt); - if( rc==SQLITE_OK ) p->bHasStat = bHasStat; + if( rc==SQLITE_OK ) p->bHasStat = (u8)bHasStat; } sqlite3_free(zSql); }else{ @@ -161385,6 +163006,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){ #ifndef SQLITE_AMALGAMATION #include "sqlite3rtree.h" typedef sqlite3_int64 i64; +typedef sqlite3_uint64 u64; typedef unsigned char u8; typedef unsigned short u16; typedef unsigned int u32; @@ -161433,13 +163055,16 @@ struct Rtree { sqlite3 *db; /* Host database connection */ int iNodeSize; /* Size in bytes of each node in the node table */ u8 nDim; /* Number of dimensions */ + u8 nDim2; /* Twice the number of dimensions */ u8 eCoordType; /* RTREE_COORD_REAL32 or RTREE_COORD_INT32 */ u8 nBytesPerCell; /* Bytes consumed per cell */ + u8 inWrTrans; /* True if inside write transaction */ int iDepth; /* Current depth of the r-tree structure */ char *zDb; /* Name of database containing r-tree table */ char *zName; /* Name of r-tree table */ - int nBusy; /* Current number of users of this structure */ + u32 nBusy; /* Current number of users of this structure */ i64 nRowEst; /* Estimated number of rows in this table */ + u32 nCursor; /* Number of open cursors */ /* List of nodes removed during a CondenseTree operation. List is ** linked together via the pointer normally used for hash chains - @@ -161449,8 +163074,10 @@ struct Rtree { RtreeNode *pDeleted; int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ + /* Blob I/O on xxx_node */ + sqlite3_blob *pNodeBlob; + /* Statements to read/write/delete a record from xxx_node */ - sqlite3_stmt *pReadNode; sqlite3_stmt *pWriteNode; sqlite3_stmt *pDeleteNode; @@ -161679,6 +163306,64 @@ struct RtreeMatchArg { # define MIN(x,y) ((x) > (y) ? (y) : (x)) #endif +/* What version of GCC is being used. 0 means GCC is not being used */ +#ifndef GCC_VERSION +#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC) +# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__) +#else +# define GCC_VERSION 0 +#endif +#endif + +/* What version of CLANG is being used. 0 means CLANG is not being used */ +#ifndef CLANG_VERSION +#if defined(__clang__) && !defined(_WIN32) && !defined(SQLITE_DISABLE_INTRINSIC) +# define CLANG_VERSION \ + (__clang_major__*1000000+__clang_minor__*1000+__clang_patchlevel__) +#else +# define CLANG_VERSION 0 +#endif +#endif + +/* The testcase() macro should already be defined in the amalgamation. If +** it is not, make it a no-op. +*/ +#ifndef SQLITE_AMALGAMATION +# define testcase(X) +#endif + +/* +** Macros to determine whether the machine is big or little endian, +** and whether or not that determination is run-time or compile-time. +** +** For best performance, an attempt is made to guess at the byte-order +** using C-preprocessor macros. If that is unsuccessful, or if +** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined +** at run-time. +*/ +#ifndef SQLITE_BYTEORDER +#if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ + defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ + defined(__arm__) +# define SQLITE_BYTEORDER 1234 +#elif defined(sparc) || defined(__ppc__) +# define SQLITE_BYTEORDER 4321 +#else +# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ +#endif +#endif + + +/* What version of MSVC is being used. 0 means MSVC is not being used */ +#ifndef MSVC_VERSION +#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC) +# define MSVC_VERSION _MSC_VER +#else +# define MSVC_VERSION 0 +#endif +#endif + /* ** Functions to deserialize a 16 bit integer, 32 bit real number and ** 64 bit integer. The deserialized value is returned. @@ -161687,14 +163372,36 @@ static int readInt16(u8 *p){ return (p[0]<<8) + p[1]; } static void readCoord(u8 *p, RtreeCoord *pCoord){ + assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ +#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + pCoord->u = _byteswap_ulong(*(u32*)p); +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + pCoord->u = __builtin_bswap32(*(u32*)p); +#elif SQLITE_BYTEORDER==4321 + pCoord->u = *(u32*)p; +#else pCoord->u = ( (((u32)p[0]) << 24) + (((u32)p[1]) << 16) + (((u32)p[2]) << 8) + (((u32)p[3]) << 0) ); +#endif } static i64 readInt64(u8 *p){ +#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + u64 x; + memcpy(&x, p, 8); + return (i64)_byteswap_uint64(x); +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + u64 x; + memcpy(&x, p, 8); + return (i64)__builtin_bswap64(x); +#elif SQLITE_BYTEORDER==4321 + i64 x; + memcpy(&x, p, 8); + return x; +#else return ( (((i64)p[0]) << 56) + (((i64)p[1]) << 48) + @@ -161705,6 +163412,7 @@ static i64 readInt64(u8 *p){ (((i64)p[6]) << 8) + (((i64)p[7]) << 0) ); +#endif } /* @@ -161712,23 +163420,43 @@ static i64 readInt64(u8 *p){ ** 64 bit integer. The value returned is the number of bytes written ** to the argument buffer (always 2, 4 and 8 respectively). */ -static int writeInt16(u8 *p, int i){ +static void writeInt16(u8 *p, int i){ p[0] = (i>> 8)&0xFF; p[1] = (i>> 0)&0xFF; - return 2; } static int writeCoord(u8 *p, RtreeCoord *pCoord){ u32 i; + assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ assert( sizeof(RtreeCoord)==4 ); assert( sizeof(u32)==4 ); +#if SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + i = __builtin_bswap32(pCoord->u); + memcpy(p, &i, 4); +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + i = _byteswap_ulong(pCoord->u); + memcpy(p, &i, 4); +#elif SQLITE_BYTEORDER==4321 + i = pCoord->u; + memcpy(p, &i, 4); +#else i = pCoord->u; p[0] = (i>>24)&0xFF; p[1] = (i>>16)&0xFF; p[2] = (i>> 8)&0xFF; p[3] = (i>> 0)&0xFF; +#endif return 4; } static int writeInt64(u8 *p, i64 i){ +#if SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + i = (i64)__builtin_bswap64((u64)i); + memcpy(p, &i, 8); +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + i = (i64)_byteswap_uint64((u64)i); + memcpy(p, &i, 8); +#elif SQLITE_BYTEORDER==4321 + memcpy(p, &i, 8); +#else p[0] = (i>>56)&0xFF; p[1] = (i>>48)&0xFF; p[2] = (i>>40)&0xFF; @@ -161737,6 +163465,7 @@ static int writeInt64(u8 *p, i64 i){ p[5] = (i>>16)&0xFF; p[6] = (i>> 8)&0xFF; p[7] = (i>> 0)&0xFF; +#endif return 8; } @@ -161819,6 +163548,17 @@ static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){ return pNode; } +/* +** Clear the Rtree.pNodeBlob object +*/ +static void nodeBlobReset(Rtree *pRtree){ + if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){ + sqlite3_blob *pBlob = pRtree->pNodeBlob; + pRtree->pNodeBlob = 0; + sqlite3_blob_close(pBlob); + } +} + /* ** Obtain a reference to an r-tree node. */ @@ -161828,9 +163568,8 @@ static int nodeAcquire( RtreeNode *pParent, /* Either the parent node or NULL */ RtreeNode **ppNode /* OUT: Acquired node */ ){ - int rc; - int rc2 = SQLITE_OK; - RtreeNode *pNode; + int rc = SQLITE_OK; + RtreeNode *pNode = 0; /* Check if the requested node is already in the hash table. If so, ** increase its reference count and return it. @@ -161846,28 +163585,45 @@ static int nodeAcquire( return SQLITE_OK; } - sqlite3_bind_int64(pRtree->pReadNode, 1, iNode); - rc = sqlite3_step(pRtree->pReadNode); - if( rc==SQLITE_ROW ){ - const u8 *zBlob = sqlite3_column_blob(pRtree->pReadNode, 0); - if( pRtree->iNodeSize==sqlite3_column_bytes(pRtree->pReadNode, 0) ){ - pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode)+pRtree->iNodeSize); - if( !pNode ){ - rc2 = SQLITE_NOMEM; - }else{ - pNode->pParent = pParent; - pNode->zData = (u8 *)&pNode[1]; - pNode->nRef = 1; - pNode->iNode = iNode; - pNode->isDirty = 0; - pNode->pNext = 0; - memcpy(pNode->zData, zBlob, pRtree->iNodeSize); - nodeReference(pParent); - } + if( pRtree->pNodeBlob ){ + sqlite3_blob *pBlob = pRtree->pNodeBlob; + pRtree->pNodeBlob = 0; + rc = sqlite3_blob_reopen(pBlob, iNode); + pRtree->pNodeBlob = pBlob; + if( rc ){ + nodeBlobReset(pRtree); + if( rc==SQLITE_NOMEM ) return SQLITE_NOMEM; + } + } + if( pRtree->pNodeBlob==0 ){ + char *zTab = sqlite3_mprintf("%s_node", pRtree->zName); + if( zTab==0 ) return SQLITE_NOMEM; + rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0, + &pRtree->pNodeBlob); + sqlite3_free(zTab); + } + if( rc ){ + nodeBlobReset(pRtree); + *ppNode = 0; + /* If unable to open an sqlite3_blob on the desired row, that can only + ** be because the shadow tables hold erroneous data. */ + if( rc==SQLITE_ERROR ) rc = SQLITE_CORRUPT_VTAB; + }else if( pRtree->iNodeSize==sqlite3_blob_bytes(pRtree->pNodeBlob) ){ + pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode)+pRtree->iNodeSize); + if( !pNode ){ + rc = SQLITE_NOMEM; + }else{ + pNode->pParent = pParent; + pNode->zData = (u8 *)&pNode[1]; + pNode->nRef = 1; + pNode->iNode = iNode; + pNode->isDirty = 0; + pNode->pNext = 0; + rc = sqlite3_blob_read(pRtree->pNodeBlob, pNode->zData, + pRtree->iNodeSize, 0); + nodeReference(pParent); } } - rc = sqlite3_reset(pRtree->pReadNode); - if( rc==SQLITE_OK ) rc = rc2; /* If the root node was just loaded, set pRtree->iDepth to the height ** of the r-tree structure. A height of zero means all data is stored on @@ -161919,7 +163675,7 @@ static void nodeOverwriteCell( int ii; u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; p += writeInt64(p, pCell->iRowid); - for(ii=0; ii<(pRtree->nDim*2); ii++){ + for(ii=0; iinDim2; ii++){ p += writeCoord(p, &pCell->aCoord[ii]); } pNode->isDirty = 1; @@ -162053,13 +163809,16 @@ static void nodeGetCell( ){ u8 *pData; RtreeCoord *pCoord; - int ii; + int ii = 0; pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell); pData = pNode->zData + (12 + pRtree->nBytesPerCell*iCell); pCoord = pCell->aCoord; - for(ii=0; iinDim*2; ii++){ - readCoord(&pData[ii*4], &pCoord[ii]); - } + do{ + readCoord(pData, &pCoord[ii]); + readCoord(pData+4, &pCoord[ii+1]); + pData += 8; + ii += 2; + }while( iinDim2 ); } @@ -162110,7 +163869,9 @@ static void rtreeReference(Rtree *pRtree){ static void rtreeRelease(Rtree *pRtree){ pRtree->nBusy--; if( pRtree->nBusy==0 ){ - sqlite3_finalize(pRtree->pReadNode); + pRtree->inWrTrans = 0; + pRtree->nCursor = 0; + nodeBlobReset(pRtree); sqlite3_finalize(pRtree->pWriteNode); sqlite3_finalize(pRtree->pDeleteNode); sqlite3_finalize(pRtree->pReadRowid); @@ -162148,6 +163909,7 @@ static int rtreeDestroy(sqlite3_vtab *pVtab){ if( !zCreate ){ rc = SQLITE_NOMEM; }else{ + nodeBlobReset(pRtree); rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0); sqlite3_free(zCreate); } @@ -162163,6 +163925,7 @@ static int rtreeDestroy(sqlite3_vtab *pVtab){ */ static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ int rc = SQLITE_NOMEM; + Rtree *pRtree = (Rtree *)pVTab; RtreeCursor *pCsr; pCsr = (RtreeCursor *)sqlite3_malloc(sizeof(RtreeCursor)); @@ -162170,6 +163933,7 @@ static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ memset(pCsr, 0, sizeof(RtreeCursor)); pCsr->base.pVtab = pVTab; rc = SQLITE_OK; + pRtree->nCursor++; } *ppCursor = (sqlite3_vtab_cursor *)pCsr; @@ -162202,10 +163966,13 @@ static int rtreeClose(sqlite3_vtab_cursor *cur){ Rtree *pRtree = (Rtree *)(cur->pVtab); int ii; RtreeCursor *pCsr = (RtreeCursor *)cur; + assert( pRtree->nCursor>0 ); freeCursorConstraints(pCsr); sqlite3_free(pCsr->aPoint); for(ii=0; iiaNode[ii]); sqlite3_free(pCsr); + pRtree->nCursor--; + nodeBlobReset(pRtree); return SQLITE_OK; } @@ -162228,15 +163995,22 @@ static int rtreeEof(sqlite3_vtab_cursor *cur){ ** false. a[] is the four bytes of the on-disk record to be decoded. ** Store the results in "r". ** -** There are three versions of this macro, one each for little-endian and -** big-endian processors and a third generic implementation. The endian- -** specific implementations are much faster and are preferred if the -** processor endianness is known at compile-time. The SQLITE_BYTEORDER -** macro is part of sqliteInt.h and hence the endian-specific -** implementation will only be used if this module is compiled as part -** of the amalgamation. +** There are five versions of this macro. The last one is generic. The +** other four are various architectures-specific optimizations. */ -#if defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==1234 +#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 +#define RTREE_DECODE_COORD(eInt, a, r) { \ + RtreeCoord c; /* Coordinate decoded */ \ + c.u = _byteswap_ulong(*(u32*)a); \ + r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ +} +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) +#define RTREE_DECODE_COORD(eInt, a, r) { \ + RtreeCoord c; /* Coordinate decoded */ \ + c.u = __builtin_bswap32(*(u32*)a); \ + r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ +} +#elif SQLITE_BYTEORDER==1234 #define RTREE_DECODE_COORD(eInt, a, r) { \ RtreeCoord c; /* Coordinate decoded */ \ memcpy(&c.u,a,4); \ @@ -162244,7 +164018,7 @@ static int rtreeEof(sqlite3_vtab_cursor *cur){ ((c.u&0xff)<<24)|((c.u&0xff00)<<8); \ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ } -#elif defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==4321 +#elif SQLITE_BYTEORDER==4321 #define RTREE_DECODE_COORD(eInt, a, r) { \ RtreeCoord c; /* Coordinate decoded */ \ memcpy(&c.u,a,4); \ @@ -162271,10 +164045,10 @@ static int rtreeCallbackConstraint( sqlite3_rtree_dbl *prScore, /* OUT: score for the cell */ int *peWithin /* OUT: visibility of the cell */ ){ - int i; /* Loop counter */ sqlite3_rtree_query_info *pInfo = pConstraint->pInfo; /* Callback info */ int nCoord = pInfo->nCoord; /* No. of coordinates */ int rc; /* Callback return code */ + RtreeCoord c; /* Translator union */ sqlite3_rtree_dbl aCoord[RTREE_MAX_DIMENSIONS*2]; /* Decoded coordinates */ assert( pConstraint->op==RTREE_MATCH || pConstraint->op==RTREE_QUERY ); @@ -162284,13 +164058,41 @@ static int rtreeCallbackConstraint( pInfo->iRowid = readInt64(pCellData); } pCellData += 8; - for(i=0; iop==RTREE_MATCH ){ + int eWithin = 0; rc = pConstraint->u.xGeom((sqlite3_rtree_geometry*)pInfo, - nCoord, aCoord, &i); - if( i==0 ) *peWithin = NOT_WITHIN; + nCoord, aCoord, &eWithin); + if( eWithin==0 ) *peWithin = NOT_WITHIN; *prScore = RTREE_ZERO; }else{ pInfo->aCoord = aCoord; @@ -162326,6 +164128,7 @@ static void rtreeNonleafConstraint( assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE || p->op==RTREE_GT || p->op==RTREE_EQ ); + assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ switch( p->op ){ case RTREE_LE: case RTREE_LT: @@ -162366,6 +164169,7 @@ static void rtreeLeafConstraint( assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE || p->op==RTREE_GT || p->op==RTREE_EQ ); pCellData += 8 + p->iCoord*4; + assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ RTREE_DECODE_COORD(eInt, pCellData, xN); switch( p->op ){ case RTREE_LE: if( xN <= p->u.rValue ) return; break; @@ -162434,7 +164238,7 @@ static int rtreeSearchPointCompare( } /* -** Interchange to search points in a cursor. +** Interchange two search points in a cursor. */ static void rtreeSearchPointSwap(RtreeCursor *p, int i, int j){ RtreeSearchPoint t = p->aPoint[i]; @@ -162682,7 +164486,7 @@ static int rtreeStepToLeaf(RtreeCursor *pCur){ if( rScoreeWithin = eWithin; + p->eWithin = (u8)eWithin; p->id = x.id; p->iCell = x.iCell; RTREE_QUEUE_TRACE(pCur, "PUSH-S:"); @@ -162741,7 +164545,6 @@ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ if( i==0 ){ sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell)); }else{ - if( rc ) return rc; nodeGetCoord(pRtree, pNode, p->iCell, i-1, &c); #ifndef SQLITE_RTREE_INT_ONLY if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ @@ -162870,7 +164673,7 @@ static int rtreeFilter( p->id = iNode; p->eWithin = PARTLY_WITHIN; rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell); - p->iCell = iCell; + p->iCell = (u8)iCell; RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:"); }else{ pCsr->atEOF = 1; @@ -162903,7 +164706,7 @@ static int rtreeFilter( if( rc!=SQLITE_OK ){ break; } - p->pInfo->nCoord = pRtree->nDim*2; + p->pInfo->nCoord = pRtree->nDim2; p->pInfo->anQueue = pCsr->anQueue; p->pInfo->mxLevel = pRtree->iDepth + 1; }else{ @@ -162918,7 +164721,7 @@ static int rtreeFilter( } if( rc==SQLITE_OK ){ RtreeSearchPoint *pNew; - pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, pRtree->iDepth+1); + pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, (u8)(pRtree->iDepth+1)); if( pNew==0 ) return SQLITE_NOMEM; pNew->id = 1; pNew->iCell = 0; @@ -162936,19 +164739,6 @@ static int rtreeFilter( return rc; } -/* -** Set the pIdxInfo->estimatedRows variable to nRow. Unless this -** extension is currently being used by a version of SQLite too old to -** support estimatedRows. In that case this function is a no-op. -*/ -static void setEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ -#if SQLITE_VERSION_NUMBER>=3008002 - if( sqlite3_libversion_number()>=3008002 ){ - pIdxInfo->estimatedRows = nRow; - } -#endif -} - /* ** Rtree virtual table module xBestIndex method. There are three ** table scan strategies to choose from (in order from most to @@ -163028,7 +164818,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ ** a single row. */ pIdxInfo->estimatedCost = 30.0; - setEstimatedRows(pIdxInfo, 1); + pIdxInfo->estimatedRows = 1; return SQLITE_OK; } @@ -163046,7 +164836,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ break; } zIdxStr[iIdx++] = op; - zIdxStr[iIdx++] = p->iColumn - 1 + '0'; + zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0'); pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2); pIdxInfo->aConstraintUsage[ii].omit = 1; } @@ -163060,7 +164850,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ nRow = pRtree->nRowEst >> (iIdx/2); pIdxInfo->estimatedCost = (double)6.0 * (double)nRow; - setEstimatedRows(pIdxInfo, nRow); + pIdxInfo->estimatedRows = nRow; return rc; } @@ -163070,9 +164860,26 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ */ static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ RtreeDValue area = (RtreeDValue)1; - int ii; - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - area = (area * (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii]))); + assert( pRtree->nDim>=1 && pRtree->nDim<=5 ); +#ifndef SQLITE_RTREE_INT_ONLY + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + switch( pRtree->nDim ){ + case 5: area = p->aCoord[9].f - p->aCoord[8].f; + case 4: area *= p->aCoord[7].f - p->aCoord[6].f; + case 3: area *= p->aCoord[5].f - p->aCoord[4].f; + case 2: area *= p->aCoord[3].f - p->aCoord[2].f; + default: area *= p->aCoord[1].f - p->aCoord[0].f; + } + }else +#endif + { + switch( pRtree->nDim ){ + case 5: area = p->aCoord[9].i - p->aCoord[8].i; + case 4: area *= p->aCoord[7].i - p->aCoord[6].i; + case 3: area *= p->aCoord[5].i - p->aCoord[4].i; + case 2: area *= p->aCoord[3].i - p->aCoord[2].i; + default: area *= p->aCoord[1].i - p->aCoord[0].i; + } } return area; } @@ -163082,11 +164889,12 @@ static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ ** of the objects size in each dimension. */ static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){ - RtreeDValue margin = (RtreeDValue)0; - int ii; - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + RtreeDValue margin = 0; + int ii = pRtree->nDim2 - 2; + do{ margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); - } + ii -= 2; + }while( ii>=0 ); return margin; } @@ -163094,17 +164902,19 @@ static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){ ** Store the union of cells p1 and p2 in p1. */ static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ - int ii; + int ii = 0; if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + do{ p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f); p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f); - } + ii += 2; + }while( iinDim2 ); }else{ - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + do{ p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i); p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i); - } + ii += 2; + }while( iinDim2 ); } } @@ -163115,7 +164925,7 @@ static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ int ii; int isInt = (pRtree->eCoordType==RTREE_COORD_INT32); - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + for(ii=0; iinDim2; ii+=2){ RtreeCoord *a1 = &p1->aCoord[ii]; RtreeCoord *a2 = &p2->aCoord[ii]; if( (!isInt && (a2[0].fa1[1].f)) @@ -163150,7 +164960,7 @@ static RtreeDValue cellOverlap( for(ii=0; iinDim*2); jj+=2){ + for(jj=0; jjnDim2; jj+=2){ RtreeDValue x1, x2; x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj])); x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1])); @@ -164206,7 +166016,7 @@ static int rtreeUpdate( ** This problem was discovered after years of use, so we silently ignore ** these kinds of misdeclared tables to avoid breaking any legacy. */ - assert( nData<=(pRtree->nDim*2 + 3) ); + assert( nData<=(pRtree->nDim2 + 3) ); #ifndef SQLITE_RTREE_INT_ONLY if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ @@ -164296,6 +166106,27 @@ static int rtreeUpdate( return rc; } +/* +** Called when a transaction starts. +*/ +static int rtreeBeginTransaction(sqlite3_vtab *pVtab){ + Rtree *pRtree = (Rtree *)pVtab; + assert( pRtree->inWrTrans==0 ); + pRtree->inWrTrans++; + return SQLITE_OK; +} + +/* +** Called when a transaction completes (either by COMMIT or ROLLBACK). +** The sqlite3_blob object should be released at this point. +*/ +static int rtreeEndTransaction(sqlite3_vtab *pVtab){ + Rtree *pRtree = (Rtree *)pVtab; + pRtree->inWrTrans = 0; + nodeBlobReset(pRtree); + return SQLITE_OK; +} + /* ** The xRename method for rtree module virtual tables. */ @@ -164317,6 +166148,7 @@ static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){ return rc; } + /* ** This function populates the pRtree->nRowEst variable with an estimate ** of the number of rows in the virtual table. If possible, this is based @@ -164376,15 +166208,15 @@ static sqlite3_module rtreeModule = { rtreeColumn, /* xColumn - read data */ rtreeRowid, /* xRowid - read data */ rtreeUpdate, /* xUpdate - write data */ - 0, /* xBegin - begin transaction */ - 0, /* xSync - sync transaction */ - 0, /* xCommit - commit transaction */ - 0, /* xRollback - rollback transaction */ + rtreeBeginTransaction, /* xBegin - begin transaction */ + rtreeEndTransaction, /* xSync - sync transaction */ + rtreeEndTransaction, /* xCommit - commit transaction */ + rtreeEndTransaction, /* xRollback - rollback transaction */ 0, /* xFindFunction - function overloading */ rtreeRename, /* xRename - rename the table */ 0, /* xSavepoint */ 0, /* xRelease */ - 0 /* xRollbackTo */ + 0, /* xRollbackTo */ }; static int rtreeSqlInit( @@ -164396,10 +166228,9 @@ static int rtreeSqlInit( ){ int rc = SQLITE_OK; - #define N_STATEMENT 9 + #define N_STATEMENT 8 static const char *azSql[N_STATEMENT] = { - /* Read and write the xxx_node table */ - "SELECT data FROM '%q'.'%q_node' WHERE nodeno = :1", + /* Write the xxx_node table */ "INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(:1, :2)", "DELETE FROM '%q'.'%q_node' WHERE nodeno = :1", @@ -164437,15 +166268,14 @@ static int rtreeSqlInit( } } - appStmt[0] = &pRtree->pReadNode; - appStmt[1] = &pRtree->pWriteNode; - appStmt[2] = &pRtree->pDeleteNode; - appStmt[3] = &pRtree->pReadRowid; - appStmt[4] = &pRtree->pWriteRowid; - appStmt[5] = &pRtree->pDeleteRowid; - appStmt[6] = &pRtree->pReadParent; - appStmt[7] = &pRtree->pWriteParent; - appStmt[8] = &pRtree->pDeleteParent; + appStmt[0] = &pRtree->pWriteNode; + appStmt[1] = &pRtree->pDeleteNode; + appStmt[2] = &pRtree->pReadRowid; + appStmt[3] = &pRtree->pWriteRowid; + appStmt[4] = &pRtree->pDeleteRowid; + appStmt[5] = &pRtree->pReadParent; + appStmt[6] = &pRtree->pWriteParent; + appStmt[7] = &pRtree->pDeleteParent; rc = rtreeQueryStat1(db, pRtree); for(i=0; ibase.pModule = &rtreeModule; pRtree->zDb = (char *)&pRtree[1]; pRtree->zName = &pRtree->zDb[nDb+1]; - pRtree->nDim = (argc-4)/2; - pRtree->nBytesPerCell = 8 + pRtree->nDim*4*2; - pRtree->eCoordType = eCoordType; + pRtree->nDim = (u8)((argc-4)/2); + pRtree->nDim2 = pRtree->nDim*2; + pRtree->nBytesPerCell = 8 + pRtree->nDim2*4; + pRtree->eCoordType = (u8)eCoordType; memcpy(pRtree->zDb, argv[1], nDb); memcpy(pRtree->zName, argv[2], nName); @@ -164658,7 +166489,8 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ UNUSED_PARAMETER(nArg); memset(&node, 0, sizeof(RtreeNode)); memset(&tree, 0, sizeof(Rtree)); - tree.nDim = sqlite3_value_int(apArg[0]); + tree.nDim = (u8)sqlite3_value_int(apArg[0]); + tree.nDim2 = tree.nDim*2; tree.nBytesPerCell = 8 + 8 * tree.nDim; node.zData = (u8 *)sqlite3_value_blob(apArg[1]); @@ -164671,7 +166503,7 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ nodeGetCell(&tree, &node, ii, &cell); sqlite3_snprintf(512-nCell,&zCell[nCell],"%lld", cell.iRowid); nCell = (int)strlen(zCell); - for(jj=0; jjzName, p->nArg, p->enc, p->pContext, p->xFunc, 0, 0 + db, p->zName, p->nArg, p->enc, + p->iContext ? (void*)db : (void*)0, + p->xFunc, 0, 0 ); } @@ -168618,7 +170448,7 @@ static RbuState *rbuLoadState(sqlite3rbu *p){ ** Open the database handle and attach the RBU database as "rbu". If an ** error occurs, leave an error code and message in the RBU handle. */ -static void rbuOpenDatabase(sqlite3rbu *p){ +static void rbuOpenDatabase(sqlite3rbu *p, int *pbRetry){ assert( p->rc || (p->dbMain==0 && p->dbRbu==0) ); assert( p->rc || rbuIsVacuum(p) || p->zTarget!=0 ); @@ -168693,7 +170523,7 @@ static void rbuOpenDatabase(sqlite3rbu *p){ }else{ RbuState *pState = rbuLoadState(p); if( pState ){ - bOpen = (pState->eStage>RBU_STAGE_MOVE); + bOpen = (pState->eStage>=RBU_STAGE_MOVE); rbuFreeState(pState); } } @@ -168705,6 +170535,15 @@ static void rbuOpenDatabase(sqlite3rbu *p){ if( !rbuIsVacuum(p) ){ p->dbMain = rbuOpenDbhandle(p, p->zTarget, 1); }else if( p->pRbuFd->pWalFd ){ + if( pbRetry ){ + p->pRbuFd->bNolock = 0; + sqlite3_close(p->dbRbu); + sqlite3_close(p->dbMain); + p->dbMain = 0; + p->dbRbu = 0; + *pbRetry = 1; + return; + } p->rc = SQLITE_ERROR; p->zErrmsg = sqlite3_mprintf("cannot vacuum wal mode database"); }else{ @@ -168885,16 +170724,18 @@ static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){ if( rc2!=SQLITE_INTERNAL ) p->rc = rc2; } - if( p->rc==SQLITE_OK ){ + if( p->rc==SQLITE_OK && p->nFrame>0 ){ p->eStage = RBU_STAGE_CKPT; p->nStep = (pState ? pState->nRow : 0); p->aBuf = rbuMalloc(p, p->pgsz); p->iWalCksum = rbuShmChecksum(p); } - if( p->rc==SQLITE_OK && pState && pState->iWalCksum!=p->iWalCksum ){ - p->rc = SQLITE_DONE; - p->eStage = RBU_STAGE_DONE; + if( p->rc==SQLITE_OK ){ + if( p->nFrame==0 || (pState && pState->iWalCksum!=p->iWalCksum) ){ + p->rc = SQLITE_DONE; + p->eStage = RBU_STAGE_DONE; + } } } @@ -169067,7 +170908,7 @@ static void rbuMoveOalFile(sqlite3rbu *p){ #endif if( p->rc==SQLITE_OK ){ - rbuOpenDatabase(p); + rbuOpenDatabase(p, 0); rbuSetupCheckpoint(p, 0); } } @@ -169778,6 +171619,7 @@ static sqlite3rbu *openRbuHandle( /* Open the target, RBU and state databases */ if( p->rc==SQLITE_OK ){ char *pCsr = (char*)&p[1]; + int bRetry = 0; if( zTarget ){ p->zTarget = pCsr; memcpy(p->zTarget, zTarget, nTarget+1); @@ -169789,7 +171631,18 @@ static sqlite3rbu *openRbuHandle( if( zState ){ p->zState = rbuMPrintf(p, "%s", zState); } - rbuOpenDatabase(p); + + /* If the first attempt to open the database file fails and the bRetry + ** flag it set, this means that the db was not opened because it seemed + ** to be a wal-mode db. But, this may have happened due to an earlier + ** RBU vacuum operation leaving an old wal file in the directory. + ** If this is the case, it will have been checkpointed and deleted + ** when the handle was closed and a second attempt to open the + ** database may succeed. */ + rbuOpenDatabase(p, &bRetry); + if( bRetry ){ + rbuOpenDatabase(p, 0); + } } if( p->rc==SQLITE_OK ){ @@ -172098,9 +173951,7 @@ static int sessionSerializeValue( if( aBuf ){ sessionVarintPut(&aBuf[1], n); - memcpy(&aBuf[nVarint + 1], eType==SQLITE_TEXT ? - sqlite3_value_text(pValue) : sqlite3_value_blob(pValue), n - ); + if( n ) memcpy(&aBuf[nVarint + 1], z, n); } nByte = 1 + nVarint + n; @@ -173516,7 +175367,7 @@ static void sessionAppendBlob( int nBlob, int *pRc ){ - if( 0==sessionBufferGrow(p, nBlob, pRc) ){ + if( nBlob>0 && 0==sessionBufferGrow(p, nBlob, pRc) ){ memcpy(&p->aBuf[p->nBuf], aBlob, nBlob); p->nBuf += nBlob; } @@ -173702,13 +175553,13 @@ static int sessionAppendUpdate( } default: { - int nByte; - int nHdr = 1 + sessionVarintGet(&pCsr[1], &nByte); + int n; + int nHdr = 1 + sessionVarintGet(&pCsr[1], &n); assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB ); - nAdvance = nHdr + nByte; + nAdvance = nHdr + n; if( eType==sqlite3_column_type(pStmt, i) - && nByte==sqlite3_column_bytes(pStmt, i) - && 0==memcmp(&pCsr[nHdr], sqlite3_column_blob(pStmt, i), nByte) + && n==sqlite3_column_bytes(pStmt, i) + && (n==0 || 0==memcmp(&pCsr[nHdr], sqlite3_column_blob(pStmt, i), n)) ){ break; } @@ -174754,7 +176605,7 @@ SQLITE_API int sqlite3changeset_conflict( if( !pIter->pConflict ){ return SQLITE_MISUSE; } - if( iVal<0 || iVal>=sqlite3_column_count(pIter->pConflict) ){ + if( iVal<0 || iVal>=pIter->nCol ){ return SQLITE_RANGE; } *ppValue = sqlite3_column_value(pIter->pConflict, iVal); @@ -175221,7 +177072,13 @@ static int sessionInsertRow( sessionAppendStr(&buf, "INSERT INTO main.", &rc); sessionAppendIdent(&buf, zTab, &rc); - sessionAppendStr(&buf, " VALUES(?", &rc); + sessionAppendStr(&buf, "(", &rc); + for(i=0; inCol; i++){ + if( i!=0 ) sessionAppendStr(&buf, ", ", &rc); + sessionAppendIdent(&buf, p->azCol[i], &rc); + } + + sessionAppendStr(&buf, ") VALUES(?", &rc); for(i=1; inCol; i++){ sessionAppendStr(&buf, ", ?", &rc); } @@ -175767,11 +177624,17 @@ static int sessionChangesetApply( nTab = (int)strlen(zTab); sApply.azCol = (const char **)zTab; }else{ + int nMinCol = 0; + int i; + sqlite3changeset_pk(pIter, &abPK, 0); rc = sessionTableInfo( db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK ); if( rc!=SQLITE_OK ) break; + for(i=0; i /* amalgamator: keep */ -# define safe_isdigit(x) isdigit((unsigned char)(x)) -# define safe_isalnum(x) isalnum((unsigned char)(x)) +# define safe_isdigit(x) isdigit((unsigned char)(x)) +# define safe_isalnum(x) isalnum((unsigned char)(x)) +# define safe_isxdigit(x) isxdigit((unsigned char)(x)) #endif /* @@ -176961,12 +178829,13 @@ static void jsonReturn( c = z[++i]; if( c=='u' ){ u32 v = 0, k; - for(k=0; k<4 && i='0' && c<='9' ) v = v*16 + c - '0'; - else if( c>='A' && c<='F' ) v = v*16 + c - 'A' + 10; - else if( c>='a' && c<='f' ) v = v*16 + c - 'a' + 10; - else break; + assert( safe_isxdigit(c) ); + if( c<='9' ) v = v*16 + c - '0'; + else if( c<='F' ) v = v*16 + c - 'A' + 10; + else v = v*16 + c - 'a' + 10; } if( v==0 ) break; if( v<=0x7f ){ @@ -177070,6 +178939,15 @@ static int jsonParseAddNode( return pParse->nNode++; } +/* +** Return true if z[] begins with 4 (or more) hexadecimal digits +*/ +static int jsonIs4Hex(const char *z){ + int i; + for(i=0; i<4; i++) if( !safe_isxdigit(z[i]) ) return 0; + return 1; +} + /* ** Parse a single JSON value which begins at pParse->zJson[i]. Return the ** index of the first character past the end of the value parsed. @@ -177144,8 +179022,13 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( c==0 ) return -1; if( c=='\\' ){ c = pParse->zJson[++j]; - if( c==0 ) return -1; - jnFlags = JNODE_ESCAPE; + if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f' + || c=='n' || c=='r' || c=='t' + || (c=='u' && jsonIs4Hex(pParse->zJson+j+1)) ){ + jnFlags = JNODE_ESCAPE; + }else{ + return -1; + } }else if( c=='"' ){ break; } @@ -178013,7 +179896,7 @@ static void jsonObjectFinal(sqlite3_context *ctx){ if( pStr ){ jsonAppendChar(pStr, '}'); if( pStr->bErr ){ - if( pStr->bErr==0 ) sqlite3_result_error_nomem(ctx); + if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx); assert( pStr->bStatic ); }else{ sqlite3_result_text(ctx, pStr->zBuf, pStr->nUsed, @@ -178291,9 +180174,9 @@ static int jsonEachColumn( /* For json_each() path and root are the same so fall through ** into the root case */ } - case JEACH_ROOT: { + default: { const char *zRoot = p->zRoot; - if( zRoot==0 ) zRoot = "$"; + if( zRoot==0 ) zRoot = "$"; sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC); break; } @@ -180424,6 +182307,31 @@ static int fts5yyGrowStack(fts5yyParser *p){ # define fts5YYMALLOCARGTYPE size_t #endif +/* Initialize a new parser that has already been allocated. +*/ +static void sqlite3Fts5ParserInit(void *fts5yypParser){ + fts5yyParser *pParser = (fts5yyParser*)fts5yypParser; +#ifdef fts5YYTRACKMAXSTACKDEPTH + pParser->fts5yyhwm = 0; +#endif +#if fts5YYSTACKDEPTH<=0 + pParser->fts5yytos = NULL; + pParser->fts5yystack = NULL; + pParser->fts5yystksz = 0; + if( fts5yyGrowStack(pParser) ){ + pParser->fts5yystack = &pParser->fts5yystk0; + pParser->fts5yystksz = 1; + } +#endif +#ifndef fts5YYNOERRORRECOVERY + pParser->fts5yyerrcnt = -1; +#endif + pParser->fts5yytos = pParser->fts5yystack; + pParser->fts5yystack[0].stateno = 0; + pParser->fts5yystack[0].major = 0; +} + +#ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK /* ** This function allocates a new parser. ** The only argument is a pointer to a function which works like @@ -180439,28 +182347,11 @@ static int fts5yyGrowStack(fts5yyParser *p){ static void *sqlite3Fts5ParserAlloc(void *(*mallocProc)(fts5YYMALLOCARGTYPE)){ fts5yyParser *pParser; pParser = (fts5yyParser*)(*mallocProc)( (fts5YYMALLOCARGTYPE)sizeof(fts5yyParser) ); - if( pParser ){ -#ifdef fts5YYTRACKMAXSTACKDEPTH - pParser->fts5yyhwm = 0; -#endif -#if fts5YYSTACKDEPTH<=0 - pParser->fts5yytos = NULL; - pParser->fts5yystack = NULL; - pParser->fts5yystksz = 0; - if( fts5yyGrowStack(pParser) ){ - pParser->fts5yystack = &pParser->fts5yystk0; - pParser->fts5yystksz = 1; - } -#endif -#ifndef fts5YYNOERRORRECOVERY - pParser->fts5yyerrcnt = -1; -#endif - pParser->fts5yytos = pParser->fts5yystack; - pParser->fts5yystack[0].stateno = 0; - pParser->fts5yystack[0].major = 0; - } + if( pParser ) sqlite3Fts5ParserInit(pParser); return pParser; } +#endif /* sqlite3Fts5Parser_ENGINEALWAYSONSTACK */ + /* The following function deletes the "minor type" or semantic value ** associated with a symbol. The symbol can be either a terminal @@ -180542,6 +182433,18 @@ static void fts5yy_pop_parser_stack(fts5yyParser *pParser){ fts5yy_destructor(pParser, fts5yytos->major, &fts5yytos->minor); } +/* +** Clear all secondary memory allocations from the parser +*/ +static void sqlite3Fts5ParserFinalize(void *p){ + fts5yyParser *pParser = (fts5yyParser*)p; + while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser); +#if fts5YYSTACKDEPTH<=0 + if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack); +#endif +} + +#ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK /* ** Deallocate and destroy a parser. Destructors are called for ** all stack elements before shutting the parser down. @@ -180554,16 +182457,13 @@ static void sqlite3Fts5ParserFree( void *p, /* The parser to be deleted */ void (*freeProc)(void*) /* Function used to reclaim memory */ ){ - fts5yyParser *pParser = (fts5yyParser*)p; #ifndef fts5YYPARSEFREENEVERNULL - if( pParser==0 ) return; -#endif - while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser); -#if fts5YYSTACKDEPTH<=0 - if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack); + if( p==0 ) return; #endif - (*freeProc)((void*)pParser); + sqlite3Fts5ParserFinalize(p); + (*freeProc)(p); } +#endif /* sqlite3Fts5Parser_ENGINEALWAYSONSTACK */ /* ** Return the peak depth of the stack for a parser. @@ -180674,7 +182574,6 @@ static int fts5yy_find_reduce_action( */ static void fts5yyStackOverflow(fts5yyParser *fts5yypParser){ sqlite3Fts5ParserARG_FETCH; - fts5yypParser->fts5yytos--; #ifndef NDEBUG if( fts5yyTraceFILE ){ fprintf(fts5yyTraceFILE,"%sStack Overflow!\n",fts5yyTracePrompt); @@ -180729,12 +182628,14 @@ static void fts5yy_shift( #endif #if fts5YYSTACKDEPTH>0 if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5YYSTACKDEPTH] ){ + fts5yypParser->fts5yytos--; fts5yyStackOverflow(fts5yypParser); return; } #else if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz] ){ if( fts5yyGrowStack(fts5yypParser) ){ + fts5yypParser->fts5yytos--; fts5yyStackOverflow(fts5yypParser); return; } @@ -184046,48 +185947,61 @@ static int fts5ExprNearTest( ** Initialize all term iterators in the pNear object. If any term is found ** to match no documents at all, return immediately without initializing any ** further iterators. +** +** If an error occurs, return an SQLite error code. Otherwise, return +** SQLITE_OK. It is not considered an error if some term matches zero +** documents. */ static int fts5ExprNearInitAll( Fts5Expr *pExpr, Fts5ExprNode *pNode ){ Fts5ExprNearset *pNear = pNode->pNear; - int i, j; - int rc = SQLITE_OK; - int bEof = 1; + int i; assert( pNode->bNomatch==0 ); - for(i=0; rc==SQLITE_OK && inPhrase; i++){ + for(i=0; inPhrase; i++){ Fts5ExprPhrase *pPhrase = pNear->apPhrase[i]; - for(j=0; jnTerm; j++){ - Fts5ExprTerm *pTerm = &pPhrase->aTerm[j]; - Fts5ExprTerm *p; + if( pPhrase->nTerm==0 ){ + pNode->bEof = 1; + return SQLITE_OK; + }else{ + int j; + for(j=0; jnTerm; j++){ + Fts5ExprTerm *pTerm = &pPhrase->aTerm[j]; + Fts5ExprTerm *p; + int bHit = 0; + + for(p=pTerm; p; p=p->pSynonym){ + int rc; + if( p->pIter ){ + sqlite3Fts5IterClose(p->pIter); + p->pIter = 0; + } + rc = sqlite3Fts5IndexQuery( + pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm), + (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) | + (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0), + pNear->pColset, + &p->pIter + ); + assert( (rc==SQLITE_OK)==(p->pIter!=0) ); + if( rc!=SQLITE_OK ) return rc; + if( 0==sqlite3Fts5IterEof(p->pIter) ){ + bHit = 1; + } + } - for(p=pTerm; p && rc==SQLITE_OK; p=p->pSynonym){ - if( p->pIter ){ - sqlite3Fts5IterClose(p->pIter); - p->pIter = 0; - } - rc = sqlite3Fts5IndexQuery( - pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm), - (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) | - (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0), - pNear->pColset, - &p->pIter - ); - assert( rc==SQLITE_OK || p->pIter==0 ); - if( p->pIter && 0==sqlite3Fts5IterEof(p->pIter) ){ - bEof = 0; + if( bHit==0 ){ + pNode->bEof = 1; + return SQLITE_OK; } } - - if( bEof ) break; } - if( bEof ) break; } - pNode->bEof = bEof; - return rc; + pNode->bEof = 0; + return SQLITE_OK; } /* @@ -184220,7 +186134,7 @@ static int fts5ExprNodeTest_STRING( } }else{ Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter; - if( pIter->iRowid==iLast ) continue; + if( pIter->iRowid==iLast || pIter->bEof ) continue; bMatch = 0; if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){ return rc; @@ -184631,7 +186545,10 @@ static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bD /* If not at EOF but the current rowid occurs earlier than iFirst in ** the iteration order, move to document iFirst or later. */ - if( pRoot->bEof==0 && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0 ){ + if( rc==SQLITE_OK + && 0==pRoot->bEof + && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0 + ){ rc = fts5ExprNodeNext(p, pRoot, 1, iFirst); } @@ -184885,7 +186802,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( rc = fts5ParseStringFromToken(pToken, &z); if( rc==SQLITE_OK ){ - int flags = FTS5_TOKENIZE_QUERY | (bPrefix ? FTS5_TOKENIZE_QUERY : 0); + int flags = FTS5_TOKENIZE_QUERY | (bPrefix ? FTS5_TOKENIZE_PREFIX : 0); int n; sqlite3Fts5Dequote(z); n = (int)strlen(z); @@ -188559,7 +190476,7 @@ static void fts5SegIterNext( else if( pLeaf->nn>pLeaf->szLeaf ){ pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32( &pLeaf->p[pLeaf->szLeaf], iOff - ); + ); pIter->iLeafOffset = iOff; pIter->iEndofDoclist = iOff; bNewTerm = 1; @@ -188593,6 +190510,7 @@ static void fts5SegIterNext( */ int nSz; assert( p->rc==SQLITE_OK ); + assert( pIter->iLeafOffset<=pIter->pLeaf->nn ); fts5FastGetVarint32(pIter->pLeaf->p, pIter->iLeafOffset, nSz); pIter->bDel = (nSz & 0x0001); pIter->nPos = nSz>>1; @@ -189360,6 +191278,7 @@ static void fts5MultiIterNext( i64 iFrom /* Advance at least as far as this */ ){ int bUseFrom = bFrom; + assert( pIter->base.bEof==0 ); while( p->rc==SQLITE_OK ){ int iFirst = pIter->aFirst[1].iFirst; int bNewTerm = 0; @@ -189586,7 +191505,7 @@ static void fts5ChunkIterate( break; }else{ pgno++; - pData = fts5DataRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno)); + pData = fts5LeafRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno)); if( pData==0 ) break; pChunk = &pData->p[4]; nChunk = MIN(nRem, pData->szLeaf - 4); @@ -192348,7 +194267,7 @@ static void fts5IndexIntegrityCheckSegment( ** ignore this b-tree entry. Otherwise, load it into memory. */ if( iIdxLeafpgnoFirst ) continue; iRow = FTS5_SEGMENT_ROWID(pSeg->iSegid, iIdxLeaf); - pLeaf = fts5DataRead(p, iRow); + pLeaf = fts5LeafRead(p, iRow); if( pLeaf==0 ) break; /* Check that the leaf contains at least one term, and that it is equal @@ -195624,7 +197543,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c", -1, SQLITE_TRANSIENT); } static int fts5Init(sqlite3 *db){ @@ -199486,4 +201405,4 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){ #else // USE_LIBSQLITE3 // If users really want to link against the system sqlite3 we // need to make this file a noop. - #endif \ No newline at end of file + #endif diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index d900cdd9c..460cf55e6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -122,13 +122,13 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.15.1" -#define SQLITE_VERSION_NUMBER 3015001 -#define SQLITE_SOURCE_ID "2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36" +#define SQLITE_VERSION "3.17.0" +#define SQLITE_VERSION_NUMBER 3017000 +#define SQLITE_SOURCE_ID "2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c" /* ** CAPI3REF: Run-Time Library Version Numbers -** KEYWORDS: sqlite3_version, sqlite3_sourceid +** KEYWORDS: sqlite3_version sqlite3_sourceid ** ** These interfaces provide the same information as the [SQLITE_VERSION], ** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros @@ -260,7 +260,11 @@ typedef struct sqlite3 sqlite3; */ #ifdef SQLITE_INT64_TYPE typedef SQLITE_INT64_TYPE sqlite_int64; - typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; +# ifdef SQLITE_UINT64_TYPE + typedef SQLITE_UINT64_TYPE sqlite_uint64; +# else + typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; +# endif #elif defined(_MSC_VER) || defined(__BORLANDC__) typedef __int64 sqlite_int64; typedef unsigned __int64 sqlite_uint64; @@ -573,7 +577,7 @@ SQLITE_API int sqlite3_exec( ** file that were written at the application level might have changed ** and that adjacent bytes, even bytes within the same sector are ** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN -** flag indicate that a file cannot be deleted when open. The +** flag indicates that a file cannot be deleted when open. The ** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on ** read-only media and cannot be changed even by processes with ** elevated privileges. @@ -723,6 +727,9 @@ struct sqlite3_file { **
    • [SQLITE_IOCAP_ATOMIC64K] **
    • [SQLITE_IOCAP_SAFE_APPEND] **
    • [SQLITE_IOCAP_SEQUENTIAL] +**
    • [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN] +**
    • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] +**
    • [SQLITE_IOCAP_IMMUTABLE] **
    ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of @@ -1036,6 +1043,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_VFS_POINTER 27 #define SQLITE_FCNTL_JOURNAL_POINTER 28 #define SQLITE_FCNTL_WIN32_GET_HANDLE 29 +#define SQLITE_FCNTL_PDB 30 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1988,6 +1996,18 @@ struct sqlite3_mem_methods { ** until after the database connection closes. ** ** +**
    SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
    +**
    Usually, when a database in wal mode is closed or detached from a +** database handle, SQLite checks if this will mean that there are now no +** connections at all to the database. If so, it performs a checkpoint +** operation before closing the connection. This option may be used to +** override this behaviour. The first parameter passed to this operation +** is an integer - non-zero to disable checkpoints-on-close, or zero (the +** default) to enable them. The second parameter is a pointer to an integer +** into which is written 0 or 1 to indicate whether checkpoints-on-close +** have been disabled - 0 if they are not disabled, 1 if they are. +**
    +** ** */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ @@ -1996,6 +2016,7 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */ +#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */ /* @@ -3597,6 +3618,10 @@ SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt); ** sqlite3_stmt_readonly() to return true since, while those statements ** change the configuration of a database connection, they do not make ** changes to the content of the database files on disk. +** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since +** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and +** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so +** sqlite3_stmt_readonly() returns false for those commands. */ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); @@ -3879,8 +3904,12 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); ** METHOD: sqlite3_stmt ** ** ^Return the number of columns in the result set returned by the -** [prepared statement]. ^This routine returns 0 if pStmt is an SQL -** statement that does not return data (for example an [UPDATE]). +** [prepared statement]. ^If this routine returns 0, that means the +** [prepared statement] returns no data (for example an [UPDATE]). +** ^However, just because this routine returns a positive number does not +** mean that one or more rows of data will be returned. ^A SELECT statement +** will always have a positive sqlite3_column_count() but depending on the +** WHERE clause constraints and the table content, it might return no rows. ** ** See also: [sqlite3_data_count()] */ @@ -5389,7 +5418,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); ** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified. ** ** ^In the current implementation, the update hook -** is not invoked when duplication rows are deleted because of an +** is not invoked when conflicting rows are deleted because of an ** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook ** invoked when rows are deleted using the [truncate optimization]. ** The exceptions defined in this paragraph might change in a future @@ -6171,6 +6200,12 @@ typedef struct sqlite3_blob sqlite3_blob; ** [database connection] error code and message accessible via ** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. ** +** A BLOB referenced by sqlite3_blob_open() may be read using the +** [sqlite3_blob_read()] interface and modified by using +** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a +** different row of the same table using the [sqlite3_blob_reopen()] +** interface. However, the column, table, or database of a [BLOB handle] +** cannot be changed after the [BLOB handle] is opened. ** ** ^(If the row that a BLOB handle points to is modified by an ** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects @@ -6194,6 +6229,10 @@ typedef struct sqlite3_blob sqlite3_blob; ** ** To avoid a resource leak, every open [BLOB handle] should eventually ** be released by a call to [sqlite3_blob_close()]. +** +** See also: [sqlite3_blob_close()], +** [sqlite3_blob_reopen()], [sqlite3_blob_read()], +** [sqlite3_blob_bytes()], [sqlite3_blob_write()]. */ SQLITE_API int sqlite3_blob_open( sqlite3*, @@ -6209,11 +6248,11 @@ SQLITE_API int sqlite3_blob_open( ** CAPI3REF: Move a BLOB Handle to a New Row ** METHOD: sqlite3_blob ** -** ^This function is used to move an existing blob handle so that it points +** ^This function is used to move an existing [BLOB handle] so that it points ** to a different row of the same database table. ^The new row is identified ** by the rowid value passed as the second argument. Only the row can be ** changed. ^The database, table and column on which the blob handle is open -** remain the same. Moving an existing blob handle to a new row can be +** remain the same. Moving an existing [BLOB handle] to a new row is ** faster than closing the existing handle and opening a new one. ** ** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - @@ -8142,7 +8181,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** ** ^The [sqlite3_preupdate_hook()] interface registers a callback function ** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation -** on a [rowid table]. +** on a database table. ** ^At most one preupdate hook may be registered at a time on a single ** [database connection]; each call to [sqlite3_preupdate_hook()] overrides ** the previous setting. @@ -8151,9 +8190,9 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as ** the first parameter to callbacks. ** -** ^The preupdate hook only fires for changes to [rowid tables]; the preupdate -** hook is not invoked for changes to [virtual tables] or [WITHOUT ROWID] -** tables. +** ^The preupdate hook only fires for changes to real database tables; the +** preupdate hook is not invoked for changes to [virtual tables] or to +** system tables like sqlite_master or sqlite_stat1. ** ** ^The second parameter to the preupdate callback is a pointer to ** the [database connection] that registered the preupdate hook. @@ -8167,12 +8206,16 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** databases.)^ ** ^The fifth parameter to the preupdate callback is the name of the ** table that is being modified. -** ^The sixth parameter to the preupdate callback is the initial [rowid] of the -** row being changes for SQLITE_UPDATE and SQLITE_DELETE changes and is -** undefined for SQLITE_INSERT changes. -** ^The seventh parameter to the preupdate callback is the final [rowid] of -** the row being changed for SQLITE_UPDATE and SQLITE_INSERT changes and is -** undefined for SQLITE_DELETE changes. +** +** For an UPDATE or DELETE operation on a [rowid table], the sixth +** parameter passed to the preupdate callback is the initial [rowid] of the +** row being modified or deleted. For an INSERT operation on a rowid table, +** or any operation on a WITHOUT ROWID table, the value of the sixth +** parameter is undefined. For an INSERT or UPDATE on a rowid table the +** seventh parameter is the final rowid value of the row being inserted +** or updated. The value of the seventh parameter passed to the callback +** function is not defined for operations on WITHOUT ROWID tables, or for +** INSERT operations on rowid tables. ** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces @@ -8212,7 +8255,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** ** See also: [sqlite3_update_hook()] */ -SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook( +#if defined(SQLITE_ENABLE_PREUPDATE_HOOK) +SQLITE_API void *sqlite3_preupdate_hook( sqlite3 *db, void(*xPreUpdate)( void *pCtx, /* Copy of third arg to preupdate_hook() */ @@ -8225,10 +8269,11 @@ SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook( ), void* ); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_count(sqlite3 *); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_depth(sqlite3 *); -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_count(sqlite3 *); +SQLITE_API int sqlite3_preupdate_depth(sqlite3 *); +SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +#endif /* ** CAPI3REF: Low-level system error code @@ -8244,7 +8289,7 @@ SQLITE_API int sqlite3_system_errno(sqlite3*); /* ** CAPI3REF: Database Snapshot -** KEYWORDS: {snapshot} +** KEYWORDS: {snapshot} {sqlite3_snapshot} ** EXPERIMENTAL ** ** An instance of the snapshot object records the state of a [WAL mode] @@ -8268,7 +8313,9 @@ SQLITE_API int sqlite3_system_errno(sqlite3*); ** to an historical snapshot (if possible). The destructor for ** sqlite3_snapshot objects is [sqlite3_snapshot_free()]. */ -typedef struct sqlite3_snapshot sqlite3_snapshot; +typedef struct sqlite3_snapshot { + unsigned char hidden[48]; +} sqlite3_snapshot; /* ** CAPI3REF: Record A Database Snapshot @@ -8279,9 +8326,32 @@ typedef struct sqlite3_snapshot sqlite3_snapshot; ** schema S in database connection D. ^On success, the ** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly ** created [sqlite3_snapshot] object into *P and returns SQLITE_OK. -** ^If schema S of [database connection] D is not a [WAL mode] database -** that is in a read transaction, then [sqlite3_snapshot_get(D,S,P)] -** leaves the *P value unchanged and returns an appropriate [error code]. +** If there is not already a read-transaction open on schema S when +** this function is called, one is opened automatically. +** +** The following must be true for this function to succeed. If any of +** the following statements are false when sqlite3_snapshot_get() is +** called, SQLITE_ERROR is returned. The final value of *P is undefined +** in this case. +** +**
      +**
    • The database handle must be in [autocommit mode]. +** +**
    • Schema S of [database connection] D must be a [WAL mode] database. +** +**
    • There must not be a write transaction open on schema S of database +** connection D. +** +**
    • One or more transactions must have been written to the current wal +** file since it was created on disk (by any connection). This means +** that a snapshot cannot be taken on a wal mode database with no wal +** file immediately after it is first opened. At least one transaction +** must be written to it first. +**
    +** +** This function may also return SQLITE_NOMEM. If it is called with the +** database handle in autocommit mode but fails for some other reason, +** whether or not a read transaction is opened on schema S is undefined. ** ** The [sqlite3_snapshot] object returned from a successful call to ** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()] @@ -8374,6 +8444,28 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( sqlite3_snapshot *p2 ); +/* +** CAPI3REF: Recover snapshots from a wal file +** EXPERIMENTAL +** +** If all connections disconnect from a database file but do not perform +** a checkpoint, the existing wal file is opened along with the database +** file the next time the database is opened. At this point it is only +** possible to successfully call sqlite3_snapshot_open() to open the most +** recent snapshot of the database (the one at the head of the wal file), +** even though the wal file may contain other valid snapshots for which +** clients have sqlite3_snapshot handles. +** +** This function attempts to scan the wal file associated with database zDb +** of database handle db and make all valid snapshots available to +** sqlite3_snapshot_open(). It is an error if there is already a read +** transaction open on the database, or if the database is not a wal mode +** database. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -8559,7 +8651,7 @@ typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; ** attached database. It is not an error if database zDb is not attached ** to the database when the session object is created. */ -int sqlite3session_create( +SQLITE_API int sqlite3session_create( sqlite3 *db, /* Database handle */ const char *zDb, /* Name of db (e.g. "main") */ sqlite3_session **ppSession /* OUT: New session object */ @@ -8577,7 +8669,7 @@ int sqlite3session_create( ** are attached is closed. Refer to the documentation for ** [sqlite3session_create()] for details. */ -void sqlite3session_delete(sqlite3_session *pSession); +SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* @@ -8597,7 +8689,7 @@ void sqlite3session_delete(sqlite3_session *pSession); ** The return value indicates the final state of the session object: 0 if ** the session is disabled, or 1 if it is enabled. */ -int sqlite3session_enable(sqlite3_session *pSession, int bEnable); +SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable); /* ** CAPI3REF: Set Or Clear the Indirect Change Flag @@ -8626,7 +8718,7 @@ int sqlite3session_enable(sqlite3_session *pSession, int bEnable); ** The return value indicates the final state of the indirect flag: 0 if ** it is clear, or 1 if it is set. */ -int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); +SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); /* ** CAPI3REF: Attach A Table To A Session Object @@ -8656,7 +8748,7 @@ int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); ** SQLITE_OK is returned if the call completes without error. Or, if an error ** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned. */ -int sqlite3session_attach( +SQLITE_API int sqlite3session_attach( sqlite3_session *pSession, /* Session object */ const char *zTab /* Table name */ ); @@ -8670,7 +8762,7 @@ int sqlite3session_attach( ** If xFilter returns 0, changes is not tracked. Note that once a table is ** attached, xFilter will not be called again. */ -void sqlite3session_table_filter( +SQLITE_API void sqlite3session_table_filter( sqlite3_session *pSession, /* Session object */ int(*xFilter)( void *pCtx, /* Copy of third arg to _filter_table() */ @@ -8783,7 +8875,7 @@ void sqlite3session_table_filter( ** another field of the same row is updated while the session is enabled, the ** resulting changeset will contain an UPDATE change that updates both fields. */ -int sqlite3session_changeset( +SQLITE_API int sqlite3session_changeset( sqlite3_session *pSession, /* Session object */ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ void **ppChangeset /* OUT: Buffer containing changeset */ @@ -8827,7 +8919,8 @@ int sqlite3session_changeset( ** the from-table, a DELETE record is added to the session object. ** **
  • For each row (primary key) that exists in both tables, but features -** different in each, an UPDATE record is added to the session. +** different non-PK values in each, an UPDATE record is added to the +** session. ** ** ** To clarify, if this function is called and then a changeset constructed @@ -8844,7 +8937,7 @@ int sqlite3session_changeset( ** message. It is the responsibility of the caller to free this buffer using ** sqlite3_free(). */ -int sqlite3session_diff( +SQLITE_API int sqlite3session_diff( sqlite3_session *pSession, const char *zFromDb, const char *zTbl, @@ -8880,7 +8973,7 @@ int sqlite3session_diff( ** a single table are grouped together, tables appear in the order in which ** they were attached to the session object). */ -int sqlite3session_patchset( +SQLITE_API int sqlite3session_patchset( sqlite3_session *pSession, /* Session object */ int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ void **ppPatchset /* OUT: Buffer containing changeset */ @@ -8901,7 +8994,7 @@ int sqlite3session_patchset( ** guaranteed that a call to sqlite3session_changeset() will return a ** changeset containing zero changes. */ -int sqlite3session_isempty(sqlite3_session *pSession); +SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); /* ** CAPI3REF: Create An Iterator To Traverse A Changeset @@ -8936,7 +9029,7 @@ int sqlite3session_isempty(sqlite3_session *pSession); ** the applies to table X, then one for table Y, and then later on visit ** another change for table X. */ -int sqlite3changeset_start( +SQLITE_API int sqlite3changeset_start( sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */ int nChangeset, /* Size of changeset blob in bytes */ void *pChangeset /* Pointer to blob containing changeset */ @@ -8965,7 +9058,7 @@ int sqlite3changeset_start( ** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or ** SQLITE_NOMEM. */ -int sqlite3changeset_next(sqlite3_changeset_iter *pIter); +SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); /* ** CAPI3REF: Obtain The Current Operation From A Changeset Iterator @@ -8993,7 +9086,7 @@ int sqlite3changeset_next(sqlite3_changeset_iter *pIter); ** SQLite error code is returned. The values of the output variables may not ** be trusted in this case. */ -int sqlite3changeset_op( +SQLITE_API int sqlite3changeset_op( sqlite3_changeset_iter *pIter, /* Iterator object */ const char **pzTab, /* OUT: Pointer to table name */ int *pnCol, /* OUT: Number of columns in table */ @@ -9026,7 +9119,7 @@ int sqlite3changeset_op( ** SQLITE_OK is returned and the output variables populated as described ** above. */ -int sqlite3changeset_pk( +SQLITE_API int sqlite3changeset_pk( sqlite3_changeset_iter *pIter, /* Iterator object */ unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */ int *pnCol /* OUT: Number of entries in output array */ @@ -9056,7 +9149,7 @@ int sqlite3changeset_pk( ** If some other error occurs (e.g. an OOM condition), an SQLite error code ** is returned and *ppValue is set to NULL. */ -int sqlite3changeset_old( +SQLITE_API int sqlite3changeset_old( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int iVal, /* Column number */ sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */ @@ -9089,7 +9182,7 @@ int sqlite3changeset_old( ** If some other error occurs (e.g. an OOM condition), an SQLite error code ** is returned and *ppValue is set to NULL. */ -int sqlite3changeset_new( +SQLITE_API int sqlite3changeset_new( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int iVal, /* Column number */ sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */ @@ -9116,7 +9209,7 @@ int sqlite3changeset_new( ** If some other error occurs (e.g. an OOM condition), an SQLite error code ** is returned and *ppValue is set to NULL. */ -int sqlite3changeset_conflict( +SQLITE_API int sqlite3changeset_conflict( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int iVal, /* Column number */ sqlite3_value **ppValue /* OUT: Value from conflicting row */ @@ -9132,7 +9225,7 @@ int sqlite3changeset_conflict( ** ** In all other cases this function returns SQLITE_MISUSE. */ -int sqlite3changeset_fk_conflicts( +SQLITE_API int sqlite3changeset_fk_conflicts( sqlite3_changeset_iter *pIter, /* Changeset iterator */ int *pnOut /* OUT: Number of FK violations */ ); @@ -9165,7 +9258,7 @@ int sqlite3changeset_fk_conflicts( ** // An error has occurred ** } */ -int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); +SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); /* ** CAPI3REF: Invert A Changeset @@ -9195,7 +9288,7 @@ int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); ** WARNING/TODO: This function currently assumes that the input is a valid ** changeset. If it is not, the results are undefined. */ -int sqlite3changeset_invert( +SQLITE_API int sqlite3changeset_invert( int nIn, const void *pIn, /* Input changeset */ int *pnOut, void **ppOut /* OUT: Inverse of input */ ); @@ -9224,7 +9317,7 @@ int sqlite3changeset_invert( ** ** Refer to the sqlite3_changegroup documentation below for details. */ -int sqlite3changeset_concat( +SQLITE_API int sqlite3changeset_concat( int nA, /* Number of bytes in buffer pA */ void *pA, /* Pointer to buffer containing changeset A */ int nB, /* Number of bytes in buffer pB */ @@ -9412,7 +9505,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); **
      **
    • The table has the same name as the name recorded in the ** changeset, and -**
    • The table has the same number of columns as recorded in the +**
    • The table has at least as many columns as recorded in the ** changeset, and **
    • The table has primary key columns in the same position as ** recorded in the changeset. @@ -9457,7 +9550,11 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** If a row with matching primary key values is found, but one or more of ** the non-primary key fields contains a value different from the original ** row value stored in the changeset, the conflict-handler function is -** invoked with [SQLITE_CHANGESET_DATA] as the second argument. +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the +** database table has more columns than are recorded in the changeset, +** only the values of those non-primary key fields are compared against +** the current database contents - any trailing database table columns +** are ignored. ** ** If no row with matching primary key values is found in the database, ** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] @@ -9472,7 +9569,9 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** **
      INSERT Changes
      ** For each INSERT change, an attempt is made to insert the new row into -** the database. +** the database. If the changeset row contains fewer fields than the +** database table, the trailing fields are populated with their default +** values. ** ** If the attempt to insert the row fails because the database already ** contains a row with the same primary key values, the conflict handler @@ -9490,13 +9589,13 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** For each UPDATE change, this function checks if the target database ** contains a row with the same primary key value (or values) as the ** original row values stored in the changeset. If it does, and the values -** stored in all non-primary key columns also match the values stored in -** the changeset the row is updated within the target database. +** stored in all modified non-primary key columns also match the values +** stored in the changeset the row is updated within the target database. ** ** If a row with matching primary key values is found, but one or more of -** the non-primary key fields contains a value different from an original -** row value stored in the changeset, the conflict-handler function is -** invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since +** the modified non-primary key fields contains a value different from an +** original row value stored in the changeset, the conflict-handler function +** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since ** UPDATE changes only contain values for non-primary key fields that are ** to be modified, only those fields need to match the original values to ** avoid the SQLITE_CHANGESET_DATA conflict-handler callback. @@ -9524,7 +9623,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*); ** rolled back, restoring the target database to its original state, and an ** SQLite error code returned. */ -int sqlite3changeset_apply( +SQLITE_API int sqlite3changeset_apply( sqlite3 *db, /* Apply change to "main" db of this handle */ int nChangeset, /* Size of changeset in bytes */ void *pChangeset, /* Changeset blob */ @@ -9725,7 +9824,7 @@ int sqlite3changeset_apply( ** parameter set to a value less than or equal to zero. Other than this, ** no guarantees are made as to the size of the chunks of data returned. */ -int sqlite3changeset_apply_strm( +SQLITE_API int sqlite3changeset_apply_strm( sqlite3 *db, /* Apply change to "main" db of this handle */ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ void *pIn, /* First arg for xInput */ @@ -9740,7 +9839,7 @@ int sqlite3changeset_apply_strm( ), void *pCtx /* First argument passed to xConflict */ ); -int sqlite3changeset_concat_strm( +SQLITE_API int sqlite3changeset_concat_strm( int (*xInputA)(void *pIn, void *pData, int *pnData), void *pInA, int (*xInputB)(void *pIn, void *pData, int *pnData), @@ -9748,23 +9847,23 @@ int sqlite3changeset_concat_strm( int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); -int sqlite3changeset_invert_strm( +SQLITE_API int sqlite3changeset_invert_strm( int (*xInput)(void *pIn, void *pData, int *pnData), void *pIn, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); -int sqlite3changeset_start_strm( +SQLITE_API int sqlite3changeset_start_strm( sqlite3_changeset_iter **pp, int (*xInput)(void *pIn, void *pData, int *pnData), void *pIn ); -int sqlite3session_changeset_strm( +SQLITE_API int sqlite3session_changeset_strm( sqlite3_session *pSession, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); -int sqlite3session_patchset_strm( +SQLITE_API int sqlite3session_patchset_strm( sqlite3_session *pSession, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index efb50b995..d6937ae0f 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -10,6 +10,7 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE #cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4_UNICODE61 #cgo CFLAGS: -DSQLITE_TRACE_SIZE_LIMIT=15 +#cgo CFLAGS: -DSQLITE_DISABLE_INTRINSIC #cgo CFLAGS: -Wno-deprecated-declarations #ifndef USE_LIBSQLITE3 #include @@ -766,14 +767,18 @@ func (s *SQLiteStmt) query(ctx context.Context, args []namedValue) (driver.Rows, done: make(chan struct{}), } - go func() { + go func(db *C.sqlite3) { select { case <-ctx.Done(): - C.sqlite3_interrupt(s.c.db) - rows.Close() + select { + case <-rows.done: + default: + C.sqlite3_interrupt(db) + rows.Close() + } case <-rows.done: } - }() + }(s.c.db) return rows, nil } diff --git a/vendor/github.com/mattn/go-sqlite3/tool/upgrade.go b/vendor/github.com/mattn/go-sqlite3/tool/upgrade.go index adfe36365..f93f35c46 100644 --- a/vendor/github.com/mattn/go-sqlite3/tool/upgrade.go +++ b/vendor/github.com/mattn/go-sqlite3/tool/upgrade.go @@ -28,7 +28,7 @@ func main() { var url string doc.Find("a").Each(func(_ int, s *goquery.Selection) { if url == "" && strings.HasPrefix(s.Text(), "sqlite-amalgamation-") { - url = "https://www.sqlite.org/2016/" + s.Text() + url = "https://www.sqlite.org/2017/" + s.Text() } }) if url == "" { diff --git a/vendor/github.com/pivotal-golang/lager/LICENSE b/vendor/github.com/pivotal-golang/lager/LICENSE new file mode 100644 index 000000000..f49a4e16e --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/pivotal-golang/lager/NOTICE b/vendor/github.com/pivotal-golang/lager/NOTICE new file mode 100644 index 000000000..3c8dd5b60 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/NOTICE @@ -0,0 +1,20 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/github.com/pivotal-golang/lager/README.md b/vendor/github.com/pivotal-golang/lager/README.md new file mode 100644 index 000000000..c9f28cc6d --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/README.md @@ -0,0 +1,78 @@ +lager +===== + +**Note**: This repository should be imported as `code.cloudfoundry.org/lager`. + +Lager is a logging library for go. + +## Usage + +Instantiate a logger with the name of your component. + +```go +import ( + "code.cloudfoundry.org/lager" +) + +logger := lager.NewLogger("my-app") +``` + +### Sinks + +Lager can write logs to a variety of destinations. You can specify the destinations +using Lager sinks: + +To write to an arbitrary `Writer` object: + +```go +logger.RegisterSink(lager.NewWriterSink(myWriter, lager.INFO)) +``` + +### Emitting logs + +Lager supports the usual level-based logging, with an optional argument for arbitrary key-value data. + +```go +logger.Info("doing-stuff", lager.Data{ + "informative": true, +}) +``` + +output: +```json +{ "source": "my-app", "message": "doing-stuff", "data": { "informative": true }, "timestamp": 1232345, "log_level": 1 } +``` + +Error messages also take an `Error` object: + +```go +logger.Error("failed-to-do-stuff", errors.New("Something went wrong")) +``` + +output: +```json +{ "source": "my-app", "message": "failed-to-do-stuff", "data": { "error": "Something went wrong" }, "timestamp": 1232345, "log_level": 1 } +``` + +### Sessions + +You can avoid repetition of contextual data using 'Sessions': + +```go + +contextualLogger := logger.Session("my-task", lager.Data{ + "request-id": 5, +}) + +contextualLogger.Info("my-action") +``` + +output: + +```json +{ "source": "my-app", "message": "my-task.my-action", "data": { "request-id": 5 }, "timestamp": 1232345, "log_level": 1 } +``` + +## License + +Lager is [Apache 2.0](https://github.com/cloudfoundry/lager/blob/master/LICENSE) licensed. diff --git a/vendor/github.com/pivotal-golang/lager/chug/chug.go b/vendor/github.com/pivotal-golang/lager/chug/chug.go new file mode 100644 index 000000000..80672fbcb --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/chug/chug.go @@ -0,0 +1,130 @@ +package chug + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "strings" + "time" + + "code.cloudfoundry.org/lager" +) + +type Entry struct { + IsLager bool + Raw []byte + Log LogEntry +} + +type LogEntry struct { + Timestamp time.Time + LogLevel lager.LogLevel + + Source string + Message string + Session string + + Error error + Trace string + + Data lager.Data +} + +func Chug(reader io.Reader, out chan<- Entry) { + scanner := bufio.NewReader(reader) + for { + line, err := scanner.ReadBytes('\n') + if line != nil { + out <- entry(bytes.TrimSuffix(line, []byte{'\n'})) + } + if err != nil { + break + } + } + close(out) +} + +func entry(raw []byte) (entry Entry) { + copiedBytes := make([]byte, len(raw)) + copy(copiedBytes, raw) + entry = Entry{ + IsLager: false, + Raw: copiedBytes, + } + + rawString := string(raw) + idx := strings.Index(rawString, "{") + if idx == -1 { + return + } + + var lagerLog lager.LogFormat + decoder := json.NewDecoder(strings.NewReader(rawString[idx:])) + err := decoder.Decode(&lagerLog) + if err != nil { + return + } + + entry.Log, entry.IsLager = convertLagerLog(lagerLog) + + return +} + +func convertLagerLog(lagerLog lager.LogFormat) (LogEntry, bool) { + timestamp, err := strconv.ParseFloat(lagerLog.Timestamp, 64) + + if err != nil { + return LogEntry{}, false + } + + data := lagerLog.Data + + var logErr error + if lagerLog.LogLevel == lager.ERROR || lagerLog.LogLevel == lager.FATAL { + dataErr, ok := lagerLog.Data["error"] + if ok { + errorString, ok := dataErr.(string) + if !ok { + return LogEntry{}, false + } + logErr = errors.New(errorString) + delete(lagerLog.Data, "error") + } + } + + var logTrace string + dataTrace, ok := lagerLog.Data["trace"] + if ok { + logTrace, ok = dataTrace.(string) + if !ok { + return LogEntry{}, false + } + delete(lagerLog.Data, "trace") + } + + var logSession string + dataSession, ok := lagerLog.Data["session"] + if ok { + logSession, ok = dataSession.(string) + if !ok { + return LogEntry{}, false + } + delete(lagerLog.Data, "session") + } + + return LogEntry{ + Timestamp: time.Unix(0, int64(timestamp*1e9)), + LogLevel: lagerLog.LogLevel, + Source: lagerLog.Source, + Message: lagerLog.Message, + Session: logSession, + + Error: logErr, + Trace: logTrace, + + Data: data, + }, true +} diff --git a/vendor/github.com/pivotal-golang/lager/chug/chug_suite_test.go b/vendor/github.com/pivotal-golang/lager/chug/chug_suite_test.go new file mode 100644 index 000000000..46cc34c22 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/chug/chug_suite_test.go @@ -0,0 +1,13 @@ +package chug_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestChug(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Chug Suite") +} diff --git a/vendor/github.com/pivotal-golang/lager/chug/chug_test.go b/vendor/github.com/pivotal-golang/lager/chug/chug_test.go new file mode 100644 index 000000000..7c2623165 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/chug/chug_test.go @@ -0,0 +1,247 @@ +package chug_test + +import ( + "errors" + "io" + "time" + + "code.cloudfoundry.org/lager" + . "code.cloudfoundry.org/lager/chug" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Chug", func() { + var ( + logger lager.Logger + stream chan Entry + pipeReader *io.PipeReader + pipeWriter *io.PipeWriter + ) + + BeforeEach(func() { + pipeReader, pipeWriter = io.Pipe() + logger = lager.NewLogger("chug-test") + logger.RegisterSink(lager.NewWriterSink(pipeWriter, lager.DEBUG)) + stream = make(chan Entry, 100) + go Chug(pipeReader, stream) + }) + + AfterEach(func() { + pipeWriter.Close() + Eventually(stream).Should(BeClosed()) + }) + + Context("when fed a stream of well-formed lager messages", func() { + It("should return parsed lager messages", func() { + data := lager.Data{"some-float": 3.0, "some-string": "foo"} + logger.Debug("chug", data) + logger.Info("again", data) + + entry := <-stream + Expect(entry.IsLager).To(BeTrue()) + Expect(entry.Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.DEBUG, + Source: "chug-test", + Message: "chug-test.chug", + Data: data, + })) + + entry = <-stream + Expect(entry.IsLager).To(BeTrue()) + Expect(entry.Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.INFO, + Source: "chug-test", + Message: "chug-test.again", + Data: data, + })) + + }) + + It("should parse the timestamp", func() { + logger.Debug("chug") + entry := <-stream + Expect(entry.Log.Timestamp).To(BeTemporally("~", time.Now(), 10*time.Millisecond)) + }) + + Context("when parsing an error message", func() { + It("should include the error", func() { + data := lager.Data{"some-float": 3.0, "some-string": "foo"} + logger.Error("chug", errors.New("some-error"), data) + Expect((<-stream).Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.ERROR, + Source: "chug-test", + Message: "chug-test.chug", + Error: errors.New("some-error"), + Data: lager.Data{"some-float": 3.0, "some-string": "foo"}, + })) + + }) + }) + + Context("when parsing an info message with an error", func() { + It("should not take the error out of the data map", func() { + data := lager.Data{"some-float": 3.0, "some-string": "foo", "error": "some-error"} + logger.Info("chug", data) + Expect((<-stream).Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.INFO, + Source: "chug-test", + Message: "chug-test.chug", + Error: nil, + Data: lager.Data{"some-float": 3.0, "some-string": "foo", "error": "some-error"}, + })) + + }) + }) + + Context("when multiple sessions have been established", func() { + It("should build up the task array appropriately", func() { + firstSession := logger.Session("first-session") + firstSession.Info("encabulate") + nestedSession := firstSession.Session("nested-session-1") + nestedSession.Info("baconize") + firstSession.Info("remodulate") + nestedSession.Info("ergonomize") + nestedSession = firstSession.Session("nested-session-2") + nestedSession.Info("modernify") + + Expect((<-stream).Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.INFO, + Source: "chug-test", + Message: "chug-test.first-session.encabulate", + Session: "1", + Data: lager.Data{}, + })) + + Expect((<-stream).Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.INFO, + Source: "chug-test", + Message: "chug-test.first-session.nested-session-1.baconize", + Session: "1.1", + Data: lager.Data{}, + })) + + Expect((<-stream).Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.INFO, + Source: "chug-test", + Message: "chug-test.first-session.remodulate", + Session: "1", + Data: lager.Data{}, + })) + + Expect((<-stream).Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.INFO, + Source: "chug-test", + Message: "chug-test.first-session.nested-session-1.ergonomize", + Session: "1.1", + Data: lager.Data{}, + })) + + Expect((<-stream).Log).To(MatchLogEntry(LogEntry{ + LogLevel: lager.INFO, + Source: "chug-test", + Message: "chug-test.first-session.nested-session-2.modernify", + Session: "1.2", + Data: lager.Data{}, + })) + + }) + }) + }) + + Context("handling lager JSON that is surrounded by non-JSON", func() { + var input []byte + var entry Entry + + BeforeEach(func() { + input = []byte(`[some-component][e]{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":0,"data":{"some-float":3,"some-string":"foo"}}...some trailing stuff`) + pipeWriter.Write(input) + pipeWriter.Write([]byte("\n")) + + Eventually(stream).Should(Receive(&entry)) + }) + + It("should be a lager message", func() { + Expect(entry.IsLager).To(BeTrue()) + }) + + It("should contain all the data in Raw", func() { + Expect(entry.Raw).To(Equal(input)) + }) + + It("should succesfully parse the lager message", func() { + Expect(entry.Log.Source).To(Equal("chug-test")) + }) + }) + + Context("handling malformed/non-lager data", func() { + var input []byte + var entry Entry + + JustBeforeEach(func() { + pipeWriter.Write(input) + pipeWriter.Write([]byte("\n")) + + Eventually(stream).Should(Receive(&entry)) + }) + + itReturnsRawData := func() { + It("returns raw data", func() { + Expect(entry.IsLager).To(BeFalse()) + Expect(entry.Log).To(BeZero()) + Expect(entry.Raw).To(Equal(input)) + }) + } + + Context("when fed a stream of malformed lager messages", func() { + Context("when the timestamp is invalid", func() { + BeforeEach(func() { + input = []byte(`{"timestamp":"tomorrow","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","error":7}}`) + }) + + itReturnsRawData() + }) + + Context("when the error does not parse", func() { + BeforeEach(func() { + input = []byte(`{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","error":7}}`) + }) + + itReturnsRawData() + }) + + Context("when the trace does not parse", func() { + BeforeEach(func() { + input = []byte(`{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","trace":7}}`) + }) + + itReturnsRawData() + }) + + Context("when the session does not parse", func() { + BeforeEach(func() { + input = []byte(`{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","session":7}}`) + }) + + itReturnsRawData() + }) + }) + + Context("When fed JSON that is not a lager message at all", func() { + BeforeEach(func() { + input = []byte(`{"source":"chattanooga"}`) + }) + + itReturnsRawData() + }) + + Context("When fed none-JSON that is not a lager message at all", func() { + BeforeEach(func() { + input = []byte(`ß`) + }) + + itReturnsRawData() + }) + }) +}) diff --git a/vendor/github.com/pivotal-golang/lager/chug/match_log_entry_test.go b/vendor/github.com/pivotal-golang/lager/chug/match_log_entry_test.go new file mode 100644 index 000000000..03d6c77b3 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/chug/match_log_entry_test.go @@ -0,0 +1,41 @@ +package chug_test + +import ( + "fmt" + "reflect" + + "code.cloudfoundry.org/lager/chug" + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +func MatchLogEntry(entry chug.LogEntry) types.GomegaMatcher { + return &logEntryMatcher{entry} +} + +type logEntryMatcher struct { + entry chug.LogEntry +} + +func (m *logEntryMatcher) Match(actual interface{}) (success bool, err error) { + actualEntry, ok := actual.(chug.LogEntry) + if !ok { + return false, fmt.Errorf("MatchLogEntry must be passed a chug.LogEntry. Got:\n%s", format.Object(actual, 1)) + } + + return m.entry.LogLevel == actualEntry.LogLevel && + m.entry.Source == actualEntry.Source && + m.entry.Message == actualEntry.Message && + m.entry.Session == actualEntry.Session && + reflect.DeepEqual(m.entry.Error, actualEntry.Error) && + m.entry.Trace == actualEntry.Trace && + reflect.DeepEqual(m.entry.Data, actualEntry.Data), nil +} + +func (m *logEntryMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to equal", m.entry) +} + +func (m *logEntryMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", m.entry) +} diff --git a/vendor/github.com/pivotal-golang/lager/chug/package.go b/vendor/github.com/pivotal-golang/lager/chug/package.go new file mode 100644 index 000000000..a29db591b --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/chug/package.go @@ -0,0 +1 @@ +package chug // import "code.cloudfoundry.org/lager/chug" diff --git a/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgo_reporter.go b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgo_reporter.go new file mode 100644 index 000000000..00b7b8f14 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgo_reporter.go @@ -0,0 +1,155 @@ +package ginkgoreporter + +import ( + "fmt" + "io" + "time" + + "code.cloudfoundry.org/lager" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type SuiteStartSummary struct { + RandomSeed int64 `json:"random_seed"` + SuiteDescription string `json:"description"` + NumberOfSpecsThatWillBeRun int `json:"num_specs"` +} + +type SuiteEndSummary struct { + SuiteDescription string `json:"description"` + Passed bool + NumberOfSpecsThatWillBeRun int `json:"num_specs"` + NumberOfPassedSpecs int `json:"num_passed"` + NumberOfFailedSpecs int `json:"num_failed"` +} + +type SpecSummary struct { + Name []string `json:"name"` + Location string `json:"location"` + State string `json:"state"` + Passed bool `json:"passed"` + RunTime time.Duration `json:"run_time"` + + StackTrace string `json:"stack_trace,omitempty"` +} + +type SetupSummary struct { + Name string `json:"name"` + State string `json:"state"` + Passed bool `json:"passed"` + RunTime time.Duration `json:"run_time,omitempty"` + + StackTrace string `json:"stack_trace,omitempty"` +} + +func New(writer io.Writer) *GinkgoReporter { + logger := lager.NewLogger("ginkgo") + logger.RegisterSink(lager.NewWriterSink(writer, lager.DEBUG)) + return &GinkgoReporter{ + writer: writer, + logger: logger, + } +} + +type GinkgoReporter struct { + logger lager.Logger + writer io.Writer + session lager.Logger +} + +func (g *GinkgoReporter) wrappedWithNewlines(f func()) { + g.writer.Write([]byte("\n")) + f() + g.writer.Write([]byte("\n")) +} + +func (g *GinkgoReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + if config.ParallelTotal > 1 { + var session = g.logger + for i := 0; i < config.ParallelNode; i++ { + session = g.logger.Session(fmt.Sprintf("node-%d", i+1)) + } + g.logger = session + } +} + +func (g *GinkgoReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { +} + +func (g *GinkgoReporter) SpecWillRun(specSummary *types.SpecSummary) { + g.wrappedWithNewlines(func() { + g.session = g.logger.Session("spec") + g.session.Info("start", lager.Data{ + "summary": SpecSummary{ + Name: specSummary.ComponentTexts, + Location: specSummary.ComponentCodeLocations[len(specSummary.ComponentTexts)-1].String(), + }, + }) + }) +} + +func (g *GinkgoReporter) SpecDidComplete(specSummary *types.SpecSummary) { + g.wrappedWithNewlines(func() { + if g.session == nil { + return + } + summary := SpecSummary{ + Name: specSummary.ComponentTexts, + Location: specSummary.ComponentCodeLocations[len(specSummary.ComponentTexts)-1].String(), + State: stateAsString(specSummary.State), + Passed: passed(specSummary.State), + RunTime: specSummary.RunTime, + } + + if passed(specSummary.State) { + g.session.Info("end", lager.Data{ + "summary": summary, + }) + } else { + summary.StackTrace = specSummary.Failure.Location.FullStackTrace + g.session.Error("end", errorForFailure(specSummary.Failure), lager.Data{ + "summary": summary, + }) + } + g.session = nil + }) +} + +func (g *GinkgoReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { +} + +func (g *GinkgoReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { +} + +func stateAsString(state types.SpecState) string { + switch state { + case types.SpecStatePending: + return "PENDING" + case types.SpecStateSkipped: + return "SKIPPED" + case types.SpecStatePassed: + return "PASSED" + case types.SpecStateFailed: + return "FAILED" + case types.SpecStatePanicked: + return "PANICKED" + case types.SpecStateTimedOut: + return "TIMED OUT" + default: + return "INVALID" + } +} + +func passed(state types.SpecState) bool { + return !(state == types.SpecStateFailed || state == types.SpecStatePanicked || state == types.SpecStateTimedOut) +} + +func errorForFailure(failure types.SpecFailure) error { + message := failure.Message + if failure.ForwardedPanic != "" { + message += fmt.Sprintf("%s", failure.ForwardedPanic) + } + + return fmt.Errorf("%s\n%s", message, failure.Location.String()) +} diff --git a/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgoreporter_suite_test.go b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgoreporter_suite_test.go new file mode 100644 index 000000000..fa0791408 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgoreporter_suite_test.go @@ -0,0 +1,13 @@ +package ginkgoreporter_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestGinkgoReporter(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "GinkgoReporter Suite") +} diff --git a/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgoreporter_test.go b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgoreporter_test.go new file mode 100644 index 000000000..49cde0731 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/ginkgoreporter_test.go @@ -0,0 +1,185 @@ +package ginkgoreporter_test + +import ( + "bytes" + "encoding/json" + "time" + + "code.cloudfoundry.org/lager" + "code.cloudfoundry.org/lager/chug" + . "code.cloudfoundry.org/lager/ginkgoreporter" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" +) + +var _ = Describe("Ginkgoreporter", func() { + var ( + reporter reporters.Reporter + buffer *bytes.Buffer + ) + + BeforeEach(func() { + buffer = &bytes.Buffer{} + reporter = New(buffer) + }) + + fetchLogs := func() []chug.LogEntry { + out := make(chan chug.Entry, 1000) + chug.Chug(buffer, out) + logs := []chug.LogEntry{} + for entry := range out { + if entry.IsLager { + logs = append(logs, entry.Log) + } + } + return logs + } + + jsonRoundTrip := func(object interface{}) interface{} { + jsonEncoded, err := json.Marshal(object) + Expect(err).NotTo(HaveOccurred()) + var out interface{} + err = json.Unmarshal(jsonEncoded, &out) + Expect(err).NotTo(HaveOccurred()) + return out + } + + Describe("Announcing specs", func() { + var summary *types.SpecSummary + BeforeEach(func() { + summary = &types.SpecSummary{ + ComponentTexts: []string{"A", "B"}, + ComponentCodeLocations: []types.CodeLocation{ + { + FileName: "file/a", + LineNumber: 3, + FullStackTrace: "some-stack-trace", + }, + { + FileName: "file/b", + LineNumber: 4, + FullStackTrace: "some-stack-trace", + }, + }, + RunTime: time.Minute, + State: types.SpecStatePassed, + } + }) + + Context("when running in parallel", func() { + It("should include the node # in the session and message", func() { + configType := config.GinkgoConfigType{ + ParallelTotal: 3, + ParallelNode: 2, + } + suiteSummary := &types.SuiteSummary{} + reporter.SpecSuiteWillBegin(configType, suiteSummary) + + reporter.SpecWillRun(summary) + reporter.SpecDidComplete(summary) + reporter.SpecWillRun(summary) + reporter.SpecDidComplete(summary) + + logs := fetchLogs() + Expect(logs[0].Session).To(Equal("2.1")) + Expect(logs[0].Message).To(Equal("ginkgo.node-2.spec.start")) + Expect(logs[1].Session).To(Equal("2.1")) + Expect(logs[1].Message).To(Equal("ginkgo.node-2.spec.end")) + Expect(logs[2].Session).To(Equal("2.2")) + Expect(logs[0].Message).To(Equal("ginkgo.node-2.spec.start")) + Expect(logs[3].Session).To(Equal("2.2")) + Expect(logs[1].Message).To(Equal("ginkgo.node-2.spec.end")) + }) + }) + + Describe("incrementing sessions", func() { + It("should increment the session counter as specs run", func() { + reporter.SpecWillRun(summary) + reporter.SpecDidComplete(summary) + reporter.SpecWillRun(summary) + reporter.SpecDidComplete(summary) + + logs := fetchLogs() + Expect(logs[0].Session).To(Equal("1")) + Expect(logs[1].Session).To(Equal("1")) + Expect(logs[2].Session).To(Equal("2")) + Expect(logs[3].Session).To(Equal("2")) + }) + }) + + Context("when a spec starts", func() { + BeforeEach(func() { + reporter.SpecWillRun(summary) + }) + + It("should log about the spec starting", func() { + log := fetchLogs()[0] + Expect(log.LogLevel).To(Equal(lager.INFO)) + Expect(log.Source).To(Equal("ginkgo")) + Expect(log.Message).To(Equal("ginkgo.spec.start")) + Expect(log.Session).To(Equal("1")) + Expect(log.Data["summary"]).To(Equal(jsonRoundTrip(SpecSummary{ + Name: []string{"A", "B"}, + Location: "file/b:4", + }))) + + }) + + Context("when the spec succeeds", func() { + It("should info", func() { + reporter.SpecDidComplete(summary) + log := fetchLogs()[1] + Expect(log.LogLevel).To(Equal(lager.INFO)) + Expect(log.Source).To(Equal("ginkgo")) + Expect(log.Message).To(Equal("ginkgo.spec.end")) + Expect(log.Session).To(Equal("1")) + Expect(log.Data["summary"]).To(Equal(jsonRoundTrip(SpecSummary{ + Name: []string{"A", "B"}, + Location: "file/b:4", + State: "PASSED", + Passed: true, + RunTime: time.Minute, + }))) + + }) + }) + + Context("when the spec fails", func() { + BeforeEach(func() { + summary.State = types.SpecStateFailed + summary.Failure = types.SpecFailure{ + Message: "something failed!", + Location: types.CodeLocation{ + FileName: "some/file", + LineNumber: 3, + FullStackTrace: "some-stack-trace", + }, + } + }) + + It("should error", func() { + reporter.SpecDidComplete(summary) + log := fetchLogs()[1] + Expect(log.LogLevel).To(Equal(lager.ERROR)) + Expect(log.Source).To(Equal("ginkgo")) + Expect(log.Message).To(Equal("ginkgo.spec.end")) + Expect(log.Session).To(Equal("1")) + Expect(log.Error.Error()).To(Equal("something failed!\nsome/file:3")) + Expect(log.Data["summary"]).To(Equal(jsonRoundTrip(SpecSummary{ + Name: []string{"A", "B"}, + Location: "file/b:4", + State: "FAILED", + Passed: false, + RunTime: time.Minute, + StackTrace: "some-stack-trace", + }))) + + }) + }) + }) + }) +}) diff --git a/vendor/github.com/pivotal-golang/lager/ginkgoreporter/package.go b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/package.go new file mode 100644 index 000000000..a9000a1b6 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/ginkgoreporter/package.go @@ -0,0 +1 @@ +package ginkgoreporter // import "code.cloudfoundry.org/lager/ginkgoreporter" diff --git a/vendor/github.com/pivotal-golang/lager/lager_suite_test.go b/vendor/github.com/pivotal-golang/lager/lager_suite_test.go new file mode 100644 index 000000000..b7670a7f1 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lager_suite_test.go @@ -0,0 +1,13 @@ +package lager_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestLager(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Lager Suite") +} diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/LICENSE b/vendor/github.com/pivotal-golang/lager/lagerflags/LICENSE new file mode 100644 index 000000000..d9a10c0d8 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/NOTICE b/vendor/github.com/pivotal-golang/lager/lagerflags/NOTICE new file mode 100644 index 000000000..b85f83fcc --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/NOTICE @@ -0,0 +1,15 @@ +CF Lager + +Copyright (c) 2014-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/README.md b/vendor/github.com/pivotal-golang/lager/lagerflags/README.md new file mode 100644 index 000000000..cc081ba2e --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/README.md @@ -0,0 +1,56 @@ +lagerflags +======== + +**Note**: This repository should be imported as `code.cloudfoundry.org/lager/lagerflags`. + +This library provides a flag called `logLevel`. The logger returned by +`lagerflags.New()` will use the value of that flag to determine the log level. + +To use, simply import this package in your `main.go` and call `lagerflags.New(COMPONENT_NAME)` to get a logger. + +For example: + +```golang +package main + +import ( + "flag" + "fmt" + + "github.com/cloudfoundry/lager/lagerflags" + "github.com/cloudfoundry/lager" +) + +func main() { + lagerflags.AddFlags(flag.CommandLine) + + flag.Parse() + + logger, reconfigurableSink := lagerflags.New("my-component") + logger.Info("starting") + + // Display the current minimum log level + fmt.Printf("Current log level is ") + switch reconfigurableSink.GetMinLevel() { + case lager.DEBUG: + fmt.Println("debug") + case lager.INFO: + fmt.Println("info") + case lager.ERROR: + fmt.Println("error") + case lager.FATAL: + fmt.Println("fatal") + } + + // Change the minimum log level dynamically + reconfigurableSink.SetMinLevel(lager.ERROR) + logger.Debug("will-not-log") +} +``` + +Running the program above as `go run main.go --logLevel debug` will generate the following output: + +``` +{"timestamp":"1464388983.540486336","source":"my-component","message":"my-component.starting","log_level":1,"data":{}} +Current log level is debug +``` diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/integration/integration_suite_test.go b/vendor/github.com/pivotal-golang/lager/lagerflags/integration/integration_suite_test.go new file mode 100644 index 000000000..bc28aeb3b --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/integration/integration_suite_test.go @@ -0,0 +1,67 @@ +package main_test + +import ( + "os/exec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "testing" +) + +var testBinary string + +func TestIntegration(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Integration Suite") +} + +var _ = BeforeSuite(func() { + var err error + testBinary, err = gexec.Build("code.cloudfoundry.org/lager/lagerflags/integration", "-race") + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + gexec.CleanupBuildArtifacts() +}) + +var _ = Describe("CF-Lager", func() { + It("provides flags", func() { + session, err := gexec.Start(exec.Command(testBinary, "--help"), GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + session.Wait() + Expect(session.Err.Contents()).To(ContainSubstring("-logLevel")) + }) + + It("pipes output to stdout", func() { + session, err := gexec.Start(exec.Command(testBinary), GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + session.Wait() + + Expect(session.Out.Contents()).To(ContainSubstring("info")) + }) + + It("defaults to the info log level", func() { + session, err := gexec.Start(exec.Command(testBinary), GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + session.Wait() + + Expect(session.Out.Contents()).NotTo(ContainSubstring("debug")) + Expect(session.Out.Contents()).To(ContainSubstring("info")) + Expect(session.Out.Contents()).To(ContainSubstring("error")) + Expect(session.Out.Contents()).To(ContainSubstring("fatal")) + }) + + It("honors the passed-in log level", func() { + session, err := gexec.Start(exec.Command(testBinary, "-logLevel=debug"), GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + session.Wait() + + Expect(session.Out.Contents()).To(ContainSubstring("debug")) + Expect(session.Out.Contents()).To(ContainSubstring("info")) + Expect(session.Out.Contents()).To(ContainSubstring("error")) + Expect(session.Out.Contents()).To(ContainSubstring("fatal")) + }) +}) diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/integration/main.go b/vendor/github.com/pivotal-golang/lager/lagerflags/integration/main.go new file mode 100644 index 000000000..22896ec4c --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/integration/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "errors" + "flag" + + "code.cloudfoundry.org/lager" + "code.cloudfoundry.org/lager/lagerflags" +) + +func main() { + lagerflags.AddFlags(flag.CommandLine) + flag.Parse() + + logger, _ := lagerflags.New("cf-lager-integration") + + logger.Debug("component-does-action", lager.Data{"debug-detail": "foo"}) + logger.Info("another-component-action", lager.Data{"info-detail": "bar"}) + logger.Error("component-failed-something", errors.New("error"), lager.Data{"error-detail": "baz"}) + logger.Fatal("component-failed-badly", errors.New("fatal"), lager.Data{"fatal-detail": "quux"}) +} diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/integration/package.go b/vendor/github.com/pivotal-golang/lager/lagerflags/integration/package.go new file mode 100644 index 000000000..0a628ffdd --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/integration/package.go @@ -0,0 +1 @@ +package main // import "code.cloudfoundry.org/lager/lagerflags/integration" diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/lagerflags.go b/vendor/github.com/pivotal-golang/lager/lagerflags/lagerflags.go new file mode 100644 index 000000000..e22874b9c --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/lagerflags.go @@ -0,0 +1,69 @@ +package lagerflags + +import ( + "flag" + "fmt" + "os" + + "code.cloudfoundry.org/lager" +) + +const ( + DEBUG = "debug" + INFO = "info" + ERROR = "error" + FATAL = "fatal" +) + +type LagerConfig struct { + LogLevel string `json:"log_level,omitempty"` +} + +func DefaultLagerConfig() LagerConfig { + return LagerConfig{ + LogLevel: string(INFO), + } +} + +var minLogLevel string + +func AddFlags(flagSet *flag.FlagSet) { + flagSet.StringVar( + &minLogLevel, + "logLevel", + string(INFO), + "log level: debug, info, error or fatal", + ) +} + +func New(component string) (lager.Logger, *lager.ReconfigurableSink) { + return newLogger(component, minLogLevel) +} + +func NewFromConfig(component string, config LagerConfig) (lager.Logger, *lager.ReconfigurableSink) { + return newLogger(component, config.LogLevel) +} + +func newLogger(component, minLogLevel string) (lager.Logger, *lager.ReconfigurableSink) { + var minLagerLogLevel lager.LogLevel + + switch minLogLevel { + case DEBUG: + minLagerLogLevel = lager.DEBUG + case INFO: + minLagerLogLevel = lager.INFO + case ERROR: + minLagerLogLevel = lager.ERROR + case FATAL: + minLagerLogLevel = lager.FATAL + default: + panic(fmt.Errorf("unknown log level: %s", minLogLevel)) + } + + logger := lager.NewLogger(component) + + sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), minLagerLogLevel) + logger.RegisterSink(sink) + + return logger, sink +} diff --git a/vendor/github.com/pivotal-golang/lager/lagerflags/package.go b/vendor/github.com/pivotal-golang/lager/lagerflags/package.go new file mode 100644 index 000000000..84ef39d41 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagerflags/package.go @@ -0,0 +1 @@ +package lagerflags // import "code.cloudfoundry.org/lager/lagerflags" diff --git a/vendor/github.com/pivotal-golang/lager/lagertest/package.go b/vendor/github.com/pivotal-golang/lager/lagertest/package.go new file mode 100644 index 000000000..ed8804820 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagertest/package.go @@ -0,0 +1 @@ +package lagertest // import "code.cloudfoundry.org/lager/lagertest" diff --git a/vendor/github.com/pivotal-golang/lager/lagertest/test_sink.go b/vendor/github.com/pivotal-golang/lager/lagertest/test_sink.go new file mode 100644 index 000000000..79782ab05 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/lagertest/test_sink.go @@ -0,0 +1,71 @@ +package lagertest + +import ( + "bytes" + "encoding/json" + "io" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega/gbytes" + + "code.cloudfoundry.org/lager" +) + +type TestLogger struct { + lager.Logger + *TestSink +} + +type TestSink struct { + lager.Sink + buffer *gbytes.Buffer +} + +func NewTestLogger(component string) *TestLogger { + logger := lager.NewLogger(component) + + testSink := NewTestSink() + logger.RegisterSink(testSink) + logger.RegisterSink(lager.NewWriterSink(ginkgo.GinkgoWriter, lager.DEBUG)) + + return &TestLogger{logger, testSink} +} + +func NewTestSink() *TestSink { + buffer := gbytes.NewBuffer() + + return &TestSink{ + Sink: lager.NewWriterSink(buffer, lager.DEBUG), + buffer: buffer, + } +} + +func (s *TestSink) Buffer() *gbytes.Buffer { + return s.buffer +} + +func (s *TestSink) Logs() []lager.LogFormat { + logs := []lager.LogFormat{} + + decoder := json.NewDecoder(bytes.NewBuffer(s.buffer.Contents())) + for { + var log lager.LogFormat + if err := decoder.Decode(&log); err == io.EOF { + return logs + } else if err != nil { + panic(err) + } + logs = append(logs, log) + } + + return logs +} + +func (s *TestSink) LogMessages() []string { + logs := s.Logs() + messages := make([]string, 0, len(logs)) + for _, log := range logs { + messages = append(messages, log.Message) + } + return messages +} diff --git a/vendor/github.com/pivotal-golang/lager/logger.go b/vendor/github.com/pivotal-golang/lager/logger.go new file mode 100644 index 000000000..70727655a --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/logger.go @@ -0,0 +1,179 @@ +package lager + +import ( + "fmt" + "runtime" + "sync/atomic" + "time" +) + +const StackTraceBufferSize = 1024 * 100 + +type Logger interface { + RegisterSink(Sink) + Session(task string, data ...Data) Logger + SessionName() string + Debug(action string, data ...Data) + Info(action string, data ...Data) + Error(action string, err error, data ...Data) + Fatal(action string, err error, data ...Data) + WithData(Data) Logger +} + +type logger struct { + component string + task string + sinks []Sink + sessionID string + nextSession uint32 + data Data +} + +func NewLogger(component string) Logger { + return &logger{ + component: component, + task: component, + sinks: []Sink{}, + data: Data{}, + } +} + +func (l *logger) RegisterSink(sink Sink) { + l.sinks = append(l.sinks, sink) +} + +func (l *logger) SessionName() string { + return l.task +} + +func (l *logger) Session(task string, data ...Data) Logger { + sid := atomic.AddUint32(&l.nextSession, 1) + + var sessionIDstr string + + if l.sessionID != "" { + sessionIDstr = fmt.Sprintf("%s.%d", l.sessionID, sid) + } else { + sessionIDstr = fmt.Sprintf("%d", sid) + } + + return &logger{ + component: l.component, + task: fmt.Sprintf("%s.%s", l.task, task), + sinks: l.sinks, + sessionID: sessionIDstr, + data: l.baseData(data...), + } +} + +func (l *logger) WithData(data Data) Logger { + return &logger{ + component: l.component, + task: l.task, + sinks: l.sinks, + sessionID: l.sessionID, + data: l.baseData(data), + } +} + +func (l *logger) Debug(action string, data ...Data) { + log := LogFormat{ + Timestamp: currentTimestamp(), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: DEBUG, + Data: l.baseData(data...), + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Info(action string, data ...Data) { + log := LogFormat{ + Timestamp: currentTimestamp(), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: INFO, + Data: l.baseData(data...), + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Error(action string, err error, data ...Data) { + logData := l.baseData(data...) + + if err != nil { + logData["error"] = err.Error() + } + + log := LogFormat{ + Timestamp: currentTimestamp(), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: ERROR, + Data: logData, + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Fatal(action string, err error, data ...Data) { + logData := l.baseData(data...) + + stackTrace := make([]byte, StackTraceBufferSize) + stackSize := runtime.Stack(stackTrace, false) + stackTrace = stackTrace[:stackSize] + + if err != nil { + logData["error"] = err.Error() + } + + logData["trace"] = string(stackTrace) + + log := LogFormat{ + Timestamp: currentTimestamp(), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: FATAL, + Data: logData, + } + + for _, sink := range l.sinks { + sink.Log(log) + } + + panic(err) +} + +func (l *logger) baseData(givenData ...Data) Data { + data := Data{} + + for k, v := range l.data { + data[k] = v + } + + if len(givenData) > 0 { + for _, dataArg := range givenData { + for key, val := range dataArg { + data[key] = val + } + } + } + + if l.sessionID != "" { + data["session"] = l.sessionID + } + + return data +} + +func currentTimestamp() string { + return fmt.Sprintf("%.9f", float64(time.Now().UnixNano())/1e9) +} diff --git a/vendor/github.com/pivotal-golang/lager/logger_test.go b/vendor/github.com/pivotal-golang/lager/logger_test.go new file mode 100644 index 000000000..1d7e173ad --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/logger_test.go @@ -0,0 +1,358 @@ +package lager_test + +import ( + "errors" + "fmt" + "strconv" + "time" + + "code.cloudfoundry.org/lager" + "code.cloudfoundry.org/lager/lagertest" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Logger", func() { + var logger lager.Logger + var testSink *lagertest.TestSink + + var component = "my-component" + var action = "my-action" + var logData = lager.Data{ + "foo": "bar", + "a-number": 7, + } + var anotherLogData = lager.Data{ + "baz": "quux", + "b-number": 43, + } + + BeforeEach(func() { + logger = lager.NewLogger(component) + testSink = lagertest.NewTestSink() + logger.RegisterSink(testSink) + }) + + var TestCommonLogFeatures = func(level lager.LogLevel) { + var log lager.LogFormat + + BeforeEach(func() { + log = testSink.Logs()[0] + }) + + It("writes a log to the sink", func() { + Expect(testSink.Logs()).To(HaveLen(1)) + }) + + It("records the source component", func() { + Expect(log.Source).To(Equal(component)) + }) + + It("outputs a properly-formatted message", func() { + Expect(log.Message).To(Equal(fmt.Sprintf("%s.%s", component, action))) + }) + + It("has a timestamp", func() { + expectedTime := float64(time.Now().UnixNano()) / 1e9 + parsedTimestamp, err := strconv.ParseFloat(log.Timestamp, 64) + Expect(err).NotTo(HaveOccurred()) + Expect(parsedTimestamp).To(BeNumerically("~", expectedTime, 1.0)) + }) + + It("sets the proper output level", func() { + Expect(log.LogLevel).To(Equal(level)) + }) + } + + var TestLogData = func() { + var log lager.LogFormat + + BeforeEach(func() { + log = testSink.Logs()[0] + }) + + It("data contains custom user data", func() { + Expect(log.Data["foo"]).To(Equal("bar")) + Expect(log.Data["a-number"]).To(BeNumerically("==", 7)) + Expect(log.Data["baz"]).To(Equal("quux")) + Expect(log.Data["b-number"]).To(BeNumerically("==", 43)) + }) + } + + Describe("Session", func() { + var session lager.Logger + + BeforeEach(func() { + session = logger.Session("sub-action") + }) + + Describe("the returned logger", func() { + JustBeforeEach(func() { + session.Debug("some-debug-action", lager.Data{"level": "debug"}) + session.Info("some-info-action", lager.Data{"level": "info"}) + session.Error("some-error-action", errors.New("oh no!"), lager.Data{"level": "error"}) + + defer func() { + recover() + }() + + session.Fatal("some-fatal-action", errors.New("oh no!"), lager.Data{"level": "fatal"}) + }) + + It("logs with a shared session id in the data", func() { + Expect(testSink.Logs()[0].Data["session"]).To(Equal("1")) + Expect(testSink.Logs()[1].Data["session"]).To(Equal("1")) + Expect(testSink.Logs()[2].Data["session"]).To(Equal("1")) + Expect(testSink.Logs()[3].Data["session"]).To(Equal("1")) + }) + + It("logs with the task added to the message", func() { + Expect(testSink.Logs()[0].Message).To(Equal("my-component.sub-action.some-debug-action")) + Expect(testSink.Logs()[1].Message).To(Equal("my-component.sub-action.some-info-action")) + Expect(testSink.Logs()[2].Message).To(Equal("my-component.sub-action.some-error-action")) + Expect(testSink.Logs()[3].Message).To(Equal("my-component.sub-action.some-fatal-action")) + }) + + It("logs with the original data", func() { + Expect(testSink.Logs()[0].Data["level"]).To(Equal("debug")) + Expect(testSink.Logs()[1].Data["level"]).To(Equal("info")) + Expect(testSink.Logs()[2].Data["level"]).To(Equal("error")) + Expect(testSink.Logs()[3].Data["level"]).To(Equal("fatal")) + }) + + Context("with data", func() { + BeforeEach(func() { + session = logger.Session("sub-action", lager.Data{"foo": "bar"}) + }) + + It("logs with the data added to the message", func() { + Expect(testSink.Logs()[0].Data["foo"]).To(Equal("bar")) + Expect(testSink.Logs()[1].Data["foo"]).To(Equal("bar")) + Expect(testSink.Logs()[2].Data["foo"]).To(Equal("bar")) + Expect(testSink.Logs()[3].Data["foo"]).To(Equal("bar")) + }) + + It("keeps the original data", func() { + Expect(testSink.Logs()[0].Data["level"]).To(Equal("debug")) + Expect(testSink.Logs()[1].Data["level"]).To(Equal("info")) + Expect(testSink.Logs()[2].Data["level"]).To(Equal("error")) + Expect(testSink.Logs()[3].Data["level"]).To(Equal("fatal")) + }) + }) + + Context("with another session", func() { + BeforeEach(func() { + session = logger.Session("next-sub-action") + }) + + It("logs with a shared session id in the data", func() { + Expect(testSink.Logs()[0].Data["session"]).To(Equal("2")) + Expect(testSink.Logs()[1].Data["session"]).To(Equal("2")) + Expect(testSink.Logs()[2].Data["session"]).To(Equal("2")) + Expect(testSink.Logs()[3].Data["session"]).To(Equal("2")) + }) + + It("logs with the task added to the message", func() { + Expect(testSink.Logs()[0].Message).To(Equal("my-component.next-sub-action.some-debug-action")) + Expect(testSink.Logs()[1].Message).To(Equal("my-component.next-sub-action.some-info-action")) + Expect(testSink.Logs()[2].Message).To(Equal("my-component.next-sub-action.some-error-action")) + Expect(testSink.Logs()[3].Message).To(Equal("my-component.next-sub-action.some-fatal-action")) + }) + }) + + Describe("WithData", func() { + BeforeEach(func() { + session = logger.WithData(lager.Data{"foo": "bar"}) + }) + + It("returns a new logger with the given data", func() { + Expect(testSink.Logs()[0].Data["foo"]).To(Equal("bar")) + Expect(testSink.Logs()[1].Data["foo"]).To(Equal("bar")) + Expect(testSink.Logs()[2].Data["foo"]).To(Equal("bar")) + Expect(testSink.Logs()[3].Data["foo"]).To(Equal("bar")) + }) + + It("does not append to the logger's task", func() { + Expect(testSink.Logs()[0].Message).To(Equal("my-component.some-debug-action")) + }) + }) + + Context("with a nested session", func() { + BeforeEach(func() { + session = session.Session("sub-sub-action") + }) + + It("logs with a shared session id in the data", func() { + Expect(testSink.Logs()[0].Data["session"]).To(Equal("1.1")) + Expect(testSink.Logs()[1].Data["session"]).To(Equal("1.1")) + Expect(testSink.Logs()[2].Data["session"]).To(Equal("1.1")) + Expect(testSink.Logs()[3].Data["session"]).To(Equal("1.1")) + }) + + It("logs with the task added to the message", func() { + Expect(testSink.Logs()[0].Message).To(Equal("my-component.sub-action.sub-sub-action.some-debug-action")) + Expect(testSink.Logs()[1].Message).To(Equal("my-component.sub-action.sub-sub-action.some-info-action")) + Expect(testSink.Logs()[2].Message).To(Equal("my-component.sub-action.sub-sub-action.some-error-action")) + Expect(testSink.Logs()[3].Message).To(Equal("my-component.sub-action.sub-sub-action.some-fatal-action")) + }) + }) + }) + }) + + Describe("Debug", func() { + Context("with log data", func() { + BeforeEach(func() { + logger.Debug(action, logData, anotherLogData) + }) + + TestCommonLogFeatures(lager.DEBUG) + TestLogData() + }) + + Context("with no log data", func() { + BeforeEach(func() { + logger.Debug(action) + }) + + TestCommonLogFeatures(lager.DEBUG) + }) + }) + + Describe("Info", func() { + Context("with log data", func() { + BeforeEach(func() { + logger.Info(action, logData, anotherLogData) + }) + + TestCommonLogFeatures(lager.INFO) + TestLogData() + }) + + Context("with no log data", func() { + BeforeEach(func() { + logger.Info(action) + }) + + TestCommonLogFeatures(lager.INFO) + }) + }) + + Describe("Error", func() { + var err = errors.New("oh noes!") + Context("with log data", func() { + BeforeEach(func() { + logger.Error(action, err, logData, anotherLogData) + }) + + TestCommonLogFeatures(lager.ERROR) + TestLogData() + + It("data contains error message", func() { + Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) + }) + }) + + Context("with no log data", func() { + BeforeEach(func() { + logger.Error(action, err) + }) + + TestCommonLogFeatures(lager.ERROR) + + It("data contains error message", func() { + Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) + }) + }) + + Context("with no error", func() { + BeforeEach(func() { + logger.Error(action, nil) + }) + + TestCommonLogFeatures(lager.ERROR) + + It("does not contain the error message", func() { + Expect(testSink.Logs()[0].Data).NotTo(HaveKey("error")) + }) + }) + }) + + Describe("Fatal", func() { + var err = errors.New("oh noes!") + var fatalErr interface{} + + Context("with log data", func() { + BeforeEach(func() { + defer func() { + fatalErr = recover() + }() + + logger.Fatal(action, err, logData, anotherLogData) + }) + + TestCommonLogFeatures(lager.FATAL) + TestLogData() + + It("data contains error message", func() { + Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) + }) + + It("data contains stack trace", func() { + Expect(testSink.Logs()[0].Data["trace"]).NotTo(BeEmpty()) + }) + + It("panics with the provided error", func() { + Expect(fatalErr).To(Equal(err)) + }) + }) + + Context("with no log data", func() { + BeforeEach(func() { + defer func() { + fatalErr = recover() + }() + + logger.Fatal(action, err) + }) + + TestCommonLogFeatures(lager.FATAL) + + It("data contains error message", func() { + Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) + }) + + It("data contains stack trace", func() { + Expect(testSink.Logs()[0].Data["trace"]).NotTo(BeEmpty()) + }) + + It("panics with the provided error", func() { + Expect(fatalErr).To(Equal(err)) + }) + }) + + Context("with no error", func() { + BeforeEach(func() { + defer func() { + fatalErr = recover() + }() + + logger.Fatal(action, nil) + }) + + TestCommonLogFeatures(lager.FATAL) + + It("does not contain the error message", func() { + Expect(testSink.Logs()[0].Data).NotTo(HaveKey("error")) + }) + + It("data contains stack trace", func() { + Expect(testSink.Logs()[0].Data["trace"]).NotTo(BeEmpty()) + }) + + It("panics with the provided error (i.e. nil)", func() { + Expect(fatalErr).To(BeNil()) + }) + }) + }) +}) diff --git a/vendor/github.com/pivotal-golang/lager/models.go b/vendor/github.com/pivotal-golang/lager/models.go new file mode 100644 index 000000000..d17b9b07d --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/models.go @@ -0,0 +1,39 @@ +package lager + +import ( + "encoding/json" + "fmt" +) + +type LogLevel int + +const ( + DEBUG LogLevel = iota + INFO + ERROR + FATAL +) + +type Data map[string]interface{} + +type LogFormat struct { + Timestamp string `json:"timestamp"` + Source string `json:"source"` + Message string `json:"message"` + LogLevel LogLevel `json:"log_level"` + Data Data `json:"data"` +} + +func (log LogFormat) ToJSON() []byte { + content, err := json.Marshal(log) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + log.Data = map[string]interface{}{"lager serialisation error": err.Error(), "data_dump": fmt.Sprintf("%#v", log.Data)} + content, err = json.Marshal(log) + } + if err != nil { + panic(err) + } + } + return content +} diff --git a/vendor/github.com/pivotal-golang/lager/package.go b/vendor/github.com/pivotal-golang/lager/package.go new file mode 100644 index 000000000..7e8b063de --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/package.go @@ -0,0 +1 @@ +package lager // import "code.cloudfoundry.org/lager" diff --git a/vendor/github.com/pivotal-golang/lager/reconfigurable_sink.go b/vendor/github.com/pivotal-golang/lager/reconfigurable_sink.go new file mode 100644 index 000000000..7c3b228e3 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/reconfigurable_sink.go @@ -0,0 +1,35 @@ +package lager + +import "sync/atomic" + +type ReconfigurableSink struct { + sink Sink + + minLogLevel int32 +} + +func NewReconfigurableSink(sink Sink, initialMinLogLevel LogLevel) *ReconfigurableSink { + return &ReconfigurableSink{ + sink: sink, + + minLogLevel: int32(initialMinLogLevel), + } +} + +func (sink *ReconfigurableSink) Log(log LogFormat) { + minLogLevel := LogLevel(atomic.LoadInt32(&sink.minLogLevel)) + + if log.LogLevel < minLogLevel { + return + } + + sink.sink.Log(log) +} + +func (sink *ReconfigurableSink) SetMinLevel(level LogLevel) { + atomic.StoreInt32(&sink.minLogLevel, int32(level)) +} + +func (sink *ReconfigurableSink) GetMinLevel() LogLevel { + return LogLevel(atomic.LoadInt32(&sink.minLogLevel)) +} diff --git a/vendor/github.com/pivotal-golang/lager/reconfigurable_sink_test.go b/vendor/github.com/pivotal-golang/lager/reconfigurable_sink_test.go new file mode 100644 index 000000000..466b73707 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/reconfigurable_sink_test.go @@ -0,0 +1,66 @@ +package lager_test + +import ( + "code.cloudfoundry.org/lager" + "code.cloudfoundry.org/lager/lagertest" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ReconfigurableSink", func() { + var ( + testSink *lagertest.TestSink + + sink *lager.ReconfigurableSink + ) + + BeforeEach(func() { + testSink = lagertest.NewTestSink() + + sink = lager.NewReconfigurableSink(testSink, lager.INFO) + }) + + It("returns the current level", func() { + Expect(sink.GetMinLevel()).To(Equal(lager.INFO)) + }) + + Context("when logging above the minimum log level", func() { + var log lager.LogFormat + + BeforeEach(func() { + log = lager.LogFormat{LogLevel: lager.INFO, Message: "hello world"} + sink.Log(log) + }) + + It("writes to the given sink", func() { + Expect(testSink.Buffer().Contents()).To(MatchJSON(log.ToJSON())) + }) + }) + + Context("when logging below the minimum log level", func() { + BeforeEach(func() { + sink.Log(lager.LogFormat{LogLevel: lager.DEBUG, Message: "hello world"}) + }) + + It("does not write to the given writer", func() { + Expect(testSink.Buffer().Contents()).To(BeEmpty()) + }) + }) + + Context("when reconfigured to a new log level", func() { + BeforeEach(func() { + sink.SetMinLevel(lager.DEBUG) + }) + + It("writes logs above the new log level", func() { + log := lager.LogFormat{LogLevel: lager.DEBUG, Message: "hello world"} + sink.Log(log) + Expect(testSink.Buffer().Contents()).To(MatchJSON(log.ToJSON())) + }) + + It("returns the newly updated level", func() { + Expect(sink.GetMinLevel()).To(Equal(lager.DEBUG)) + }) + }) +}) diff --git a/vendor/github.com/pivotal-golang/lager/writer_sink.go b/vendor/github.com/pivotal-golang/lager/writer_sink.go new file mode 100644 index 000000000..bb8fbf151 --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/writer_sink.go @@ -0,0 +1,38 @@ +package lager + +import ( + "io" + "sync" +) + +// A Sink represents a write destination for a Logger. It provides +// a thread-safe interface for writing logs +type Sink interface { + //Log to the sink. Best effort -- no need to worry about errors. + Log(LogFormat) +} + +type writerSink struct { + writer io.Writer + minLogLevel LogLevel + writeL *sync.Mutex +} + +func NewWriterSink(writer io.Writer, minLogLevel LogLevel) Sink { + return &writerSink{ + writer: writer, + minLogLevel: minLogLevel, + writeL: new(sync.Mutex), + } +} + +func (sink *writerSink) Log(log LogFormat) { + if log.LogLevel < sink.minLogLevel { + return + } + + sink.writeL.Lock() + sink.writer.Write(log.ToJSON()) + sink.writer.Write([]byte("\n")) + sink.writeL.Unlock() +} diff --git a/vendor/github.com/pivotal-golang/lager/writer_sink_test.go b/vendor/github.com/pivotal-golang/lager/writer_sink_test.go new file mode 100644 index 000000000..748a30a0c --- /dev/null +++ b/vendor/github.com/pivotal-golang/lager/writer_sink_test.go @@ -0,0 +1,134 @@ +package lager_test + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + "sync" + + "code.cloudfoundry.org/lager" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("WriterSink", func() { + const MaxThreads = 100 + + var sink lager.Sink + var writer *copyWriter + + BeforeSuite(func() { + runtime.GOMAXPROCS(MaxThreads) + }) + + BeforeEach(func() { + writer = NewCopyWriter() + sink = lager.NewWriterSink(writer, lager.INFO) + }) + + Context("when logging above the minimum log level", func() { + BeforeEach(func() { + sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: "hello world"}) + }) + + It("writes to the given writer", func() { + Expect(writer.Copy()).To(MatchJSON(`{"message":"hello world","log_level":1,"timestamp":"","source":"","data":null}`)) + }) + }) + + Context("when a unserializable object is passed into data", func() { + BeforeEach(func() { + sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: "hello world", Data: map[string]interface{}{"some_key": func() {}}}) + }) + + It("logs the serialization error", func() { + message := map[string]interface{}{} + json.Unmarshal(writer.Copy(), &message) + Expect(message["message"]).To(Equal("hello world")) + Expect(message["log_level"]).To(Equal(float64(1))) + Expect(message["data"].(map[string]interface{})["lager serialisation error"]).To(Equal("json: unsupported type: func()")) + Expect(message["data"].(map[string]interface{})["data_dump"]).ToNot(BeEmpty()) + }) + + Measure("should be efficient", func(b Benchmarker) { + runtime := b.Time("runtime", func() { + for i := 0; i < 5000; i++ { + sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: "hello world", Data: map[string]interface{}{"some_key": func() {}}}) + Expect(writer.Copy()).ToNot(BeEmpty()) + } + }) + + Expect(runtime.Seconds()).To(BeNumerically("<", 1), "logging shouldn't take too long.") + }, 1) + }) + + Context("when logging below the minimum log level", func() { + BeforeEach(func() { + sink.Log(lager.LogFormat{LogLevel: lager.DEBUG, Message: "hello world"}) + }) + + It("does not write to the given writer", func() { + Expect(writer.Copy()).To(Equal([]byte{})) + }) + }) + + Context("when logging from multiple threads", func() { + var content = "abcdefg " + + BeforeEach(func() { + wg := new(sync.WaitGroup) + for i := 0; i < MaxThreads; i++ { + wg.Add(1) + go func() { + sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: content}) + wg.Done() + }() + } + wg.Wait() + }) + + It("writes to the given writer", func() { + lines := strings.Split(string(writer.Copy()), "\n") + for _, line := range lines { + if line == "" { + continue + } + Expect(line).To(MatchJSON(fmt.Sprintf(`{"message":"%s","log_level":1,"timestamp":"","source":"","data":null}`, content))) + } + }) + }) +}) + +// copyWriter is an INTENTIONALLY UNSAFE writer. Use it to test code that +// should be handling thread safety. +type copyWriter struct { + contents []byte + lock *sync.RWMutex +} + +func NewCopyWriter() *copyWriter { + return ©Writer{ + contents: []byte{}, + lock: new(sync.RWMutex), + } +} + +// no, we really mean RLock on write. +func (writer *copyWriter) Write(p []byte) (n int, err error) { + writer.lock.RLock() + defer writer.lock.RUnlock() + + writer.contents = append(writer.contents, p...) + return len(p), nil +} + +func (writer *copyWriter) Copy() []byte { + writer.lock.Lock() + defer writer.lock.Unlock() + + contents := make([]byte, len(writer.contents)) + copy(contents, writer.contents) + return contents +} diff --git a/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/golang.org/x/net/bpf/vm_bpf_test.go index 77fa8fe4a..76dd970e6 100644 --- a/vendor/golang.org/x/net/bpf/vm_bpf_test.go +++ b/vendor/golang.org/x/net/bpf/vm_bpf_test.go @@ -149,6 +149,9 @@ func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { p := ipv4.NewPacketConn(l) if err = p.SetBPF(prog); err != nil { + if err.Error() == "operation not supported" { // TODO: gross. remove once 19051 fixed. + t.Skip("Skipping until Issue 19051 is fixed.") + } t.Fatalf("failed to attach BPF program to listener: %v", err) } diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go index 9f0f90f1b..72411b1b6 100644 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go @@ -19,6 +19,7 @@ func TestGo17Context(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "ok") })) + defer ts.Close() ctx := context.Background() resp, err := Get(ctx, http.DefaultClient, ts.URL) if resp == nil || err != nil { diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go new file mode 100644 index 000000000..da43b0ba4 --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -0,0 +1,1418 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dnsmessage provides a mostly RFC 1035 compliant implementation of +// DNS message packing and unpacking. +// +// This implementation is designed to minimize heap allocations and avoid +// unnecessary packing and unpacking as much as possible. +package dnsmessage + +import ( + "errors" +) + +// Packet formats + +// A Type is a type of DNS request and response. +type Type uint16 + +// A Class is a type of network. +type Class uint16 + +// An OpCode is a DNS operation code. +type OpCode uint16 + +// An RCode is a DNS response status code. +type RCode uint16 + +// Wire constants. +const ( + // ResourceHeader.Type and Question.Type + TypeA Type = 1 + TypeNS Type = 2 + TypeCNAME Type = 5 + TypeSOA Type = 6 + TypePTR Type = 12 + TypeMX Type = 15 + TypeTXT Type = 16 + TypeAAAA Type = 28 + TypeSRV Type = 33 + + // Question.Type + TypeWKS Type = 11 + TypeHINFO Type = 13 + TypeMINFO Type = 14 + TypeAXFR Type = 252 + TypeALL Type = 255 + + // ResourceHeader.Class and Question.Class + ClassINET Class = 1 + ClassCSNET Class = 2 + ClassCHAOS Class = 3 + ClassHESIOD Class = 4 + + // Question.Class + ClassANY Class = 255 + + // Message.Rcode + RCodeSuccess RCode = 0 + RCodeFormatError RCode = 1 + RCodeServerFailure RCode = 2 + RCodeNameError RCode = 3 + RCodeNotImplemented RCode = 4 + RCodeRefused RCode = 5 +) + +var ( + // ErrNotStarted indicates that the prerequisite information isn't + // available yet because the previous records haven't been appropriately + // parsed or skipped. + ErrNotStarted = errors.New("parsing of this type isn't available yet") + + // ErrSectionDone indicated that all records in the section have been + // parsed. + ErrSectionDone = errors.New("parsing of this section has completed") + + errBaseLen = errors.New("insufficient data for base length type") + errCalcLen = errors.New("insufficient data for calculated length type") + errReserved = errors.New("segment prefix is reserved") + errTooManyPtr = errors.New("too many pointers (>10)") + errInvalidPtr = errors.New("invalid pointer") + errResourceLen = errors.New("insufficient data for resource body length") + errSegTooLong = errors.New("segment length too long") + errZeroSegLen = errors.New("zero length segment") + errResTooLong = errors.New("resource length too long") + errTooManyQuestions = errors.New("too many Questions to pack (>65535)") + errTooManyAnswers = errors.New("too many Answers to pack (>65535)") + errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)") + errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") +) + +type nestedError struct { + // s is the current level's error message. + s string + + // err is the nested error. + err error +} + +// nestedError implements error.Error. +func (e *nestedError) Error() string { + return e.s + ": " + e.err.Error() +} + +// Header is a representation of a DNS message header. +type Header struct { + ID uint16 + Response bool + OpCode OpCode + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + RCode RCode +} + +func (m *Header) pack() (id uint16, bits uint16) { + id = m.ID + bits = uint16(m.OpCode)<<11 | uint16(m.RCode) + if m.RecursionAvailable { + bits |= headerBitRA + } + if m.RecursionDesired { + bits |= headerBitRD + } + if m.Truncated { + bits |= headerBitTC + } + if m.Authoritative { + bits |= headerBitAA + } + if m.Response { + bits |= headerBitQR + } + return +} + +// Message is a representation of a DNS message. +type Message struct { + Header + Questions []Question + Answers []Resource + Authorities []Resource + Additionals []Resource +} + +type section uint8 + +const ( + sectionHeader section = iota + sectionQuestions + sectionAnswers + sectionAuthorities + sectionAdditionals + sectionDone + + headerBitQR = 1 << 15 // query/response (response=1) + headerBitAA = 1 << 10 // authoritative + headerBitTC = 1 << 9 // truncated + headerBitRD = 1 << 8 // recursion desired + headerBitRA = 1 << 7 // recursion available +) + +var sectionNames = map[section]string{ + sectionHeader: "header", + sectionQuestions: "Question", + sectionAnswers: "Answer", + sectionAuthorities: "Authority", + sectionAdditionals: "Additional", +} + +// header is the wire format for a DNS message header. +type header struct { + id uint16 + bits uint16 + questions uint16 + answers uint16 + authorities uint16 + additionals uint16 +} + +func (h *header) count(sec section) uint16 { + switch sec { + case sectionQuestions: + return h.questions + case sectionAnswers: + return h.answers + case sectionAuthorities: + return h.authorities + case sectionAdditionals: + return h.additionals + } + return 0 +} + +func (h *header) pack(msg []byte) []byte { + msg = packUint16(msg, h.id) + msg = packUint16(msg, h.bits) + msg = packUint16(msg, h.questions) + msg = packUint16(msg, h.answers) + msg = packUint16(msg, h.authorities) + return packUint16(msg, h.additionals) +} + +func (h *header) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if h.id, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"id", err} + } + if h.bits, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"bits", err} + } + if h.questions, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"questions", err} + } + if h.answers, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"answers", err} + } + if h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"authorities", err} + } + if h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"additionals", err} + } + return newOff, nil +} + +func (h *header) header() Header { + return Header{ + ID: h.id, + Response: (h.bits & headerBitQR) != 0, + OpCode: OpCode(h.bits>>11) & 0xF, + Authoritative: (h.bits & headerBitAA) != 0, + Truncated: (h.bits & headerBitTC) != 0, + RecursionDesired: (h.bits & headerBitRD) != 0, + RecursionAvailable: (h.bits & headerBitRA) != 0, + RCode: RCode(h.bits & 0xF), + } +} + +// A Resource is a DNS resource record. +type Resource interface { + // Header return's the Resource's ResourceHeader. + Header() *ResourceHeader + + // pack packs a Resource except for its header. + pack(msg []byte, compression map[string]int) ([]byte, error) + + // realType returns the actual type of the Resource. This is used to + // fill in the header Type field. + realType() Type +} + +func packResource(msg []byte, resource Resource, compression map[string]int) ([]byte, error) { + oldMsg := msg + resource.Header().Type = resource.realType() + msg, length, err := resource.Header().pack(msg, compression) + if err != nil { + return msg, &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + msg, err = resource.pack(msg, compression) + if err != nil { + return msg, &nestedError{"content", err} + } + conLen := len(msg) - preLen + if conLen > int(^uint16(0)) { + return oldMsg, errResTooLong + } + // Fill in the length now that we know how long the content is. + packUint16(length[:0], uint16(conLen)) + resource.Header().Length = uint16(conLen) + return msg, nil +} + +// A Parser allows incrementally parsing a DNS message. +// +// When parsing is started, the Header is parsed. Next, each Question can be +// either parsed or skipped. Alternatively, all Questions can be skipped at +// once. When all Questions have been parsed, attempting to parse Questions +// will return (nil, nil) and attempting to skip Questions will return +// (true, nil). After all Questions have been either parsed or skipped, all +// Answers, Authorities and Additionals can be either parsed or skipped in the +// same way, and each type of Resource must be fully parsed or skipped before +// proceeding to the next type of Resource. +// +// Note that there is no requirement to fully skip or parse the message. +type Parser struct { + msg []byte + header header + + section section + off int + index int + resHeaderValid bool + resHeader ResourceHeader +} + +// Start parses the header and enables the parsing of Questions. +func (p *Parser) Start(msg []byte) (Header, error) { + if p.msg != nil { + *p = Parser{} + } + p.msg = msg + var err error + if p.off, err = p.header.unpack(msg, 0); err != nil { + return Header{}, &nestedError{"unpacking header", err} + } + p.section = sectionQuestions + return p.header.header(), nil +} + +func (p *Parser) checkAdvance(sec section) error { + if p.section < sec { + return ErrNotStarted + } + if p.section > sec { + return ErrSectionDone + } + p.resHeaderValid = false + if p.index == int(p.header.count(sec)) { + p.index = 0 + p.section++ + return ErrSectionDone + } + return nil +} + +func (p *Parser) resource(sec section) (Resource, error) { + var r Resource + hdr, err := p.resourceHeader(sec) + if err != nil { + return r, err + } + p.resHeaderValid = false + r, p.off, err = unpackResource(p.msg, p.off, hdr) + if err != nil { + return nil, &nestedError{"unpacking " + sectionNames[sec], err} + } + p.index++ + return r, nil +} + +func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) { + if p.resHeaderValid { + return p.resHeader, nil + } + if err := p.checkAdvance(sec); err != nil { + return ResourceHeader{}, err + } + var hdr ResourceHeader + off, err := hdr.unpack(p.msg, p.off) + if err != nil { + return ResourceHeader{}, err + } + p.resHeaderValid = true + p.resHeader = hdr + p.off = off + return hdr, nil +} + +func (p *Parser) skipResource(sec section) error { + if p.resHeaderValid { + newOff := p.off + int(p.resHeader.Length) + if newOff > len(p.msg) { + return errResourceLen + } + p.off = newOff + p.resHeaderValid = false + p.index++ + return nil + } + if err := p.checkAdvance(sec); err != nil { + return err + } + var err error + p.off, err = skipResource(p.msg, p.off) + if err != nil { + return &nestedError{"skipping: " + sectionNames[sec], err} + } + p.index++ + return nil +} + +// Question parses a single Question. +func (p *Parser) Question() (Question, error) { + if err := p.checkAdvance(sectionQuestions); err != nil { + return Question{}, err + } + name, off, err := unpackName(p.msg, p.off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Name", err} + } + typ, off, err := unpackType(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Type", err} + } + class, off, err := unpackClass(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Class", err} + } + p.off = off + p.index++ + return Question{name, typ, class}, nil +} + +// AllQuestions parses all Questions. +func (p *Parser) AllQuestions() ([]Question, error) { + qs := make([]Question, 0, p.header.questions) + for { + q, err := p.Question() + if err == ErrSectionDone { + return qs, nil + } + if err != nil { + return nil, err + } + qs = append(qs, q) + } +} + +// SkipQuestion skips a single Question. +func (p *Parser) SkipQuestion() error { + if err := p.checkAdvance(sectionQuestions); err != nil { + return err + } + off, err := skipName(p.msg, p.off) + if err != nil { + return &nestedError{"skipping Question Name", err} + } + if off, err = skipType(p.msg, off); err != nil { + return &nestedError{"skipping Question Type", err} + } + if off, err = skipClass(p.msg, off); err != nil { + return &nestedError{"skipping Question Class", err} + } + p.off = off + p.index++ + return nil +} + +// SkipAllQuestions skips all Questions. +func (p *Parser) SkipAllQuestions() error { + for { + if err := p.SkipQuestion(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AnswerHeader parses a single Answer ResourceHeader. +func (p *Parser) AnswerHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAnswers) +} + +// Answer parses a single Answer Resource. +func (p *Parser) Answer() (Resource, error) { + return p.resource(sectionAnswers) +} + +// AllAnswers parses all Answer Resources. +func (p *Parser) AllAnswers() ([]Resource, error) { + as := make([]Resource, 0, p.header.answers) + for { + a, err := p.Answer() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAnswer skips a single Answer Resource. +func (p *Parser) SkipAnswer() error { + return p.skipResource(sectionAnswers) +} + +// SkipAllAnswers skips all Answer Resources. +func (p *Parser) SkipAllAnswers() error { + for { + if err := p.SkipAnswer(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AuthorityHeader parses a single Authority ResourceHeader. +func (p *Parser) AuthorityHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAuthorities) +} + +// Authority parses a single Authority Resource. +func (p *Parser) Authority() (Resource, error) { + return p.resource(sectionAuthorities) +} + +// AllAuthorities parses all Authority Resources. +func (p *Parser) AllAuthorities() ([]Resource, error) { + as := make([]Resource, 0, p.header.authorities) + for { + a, err := p.Authority() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAuthority skips a single Authority Resource. +func (p *Parser) SkipAuthority() error { + return p.skipResource(sectionAuthorities) +} + +// SkipAllAuthorities skips all Authority Resources. +func (p *Parser) SkipAllAuthorities() error { + for { + if err := p.SkipAuthority(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AdditionalHeader parses a single Additional ResourceHeader. +func (p *Parser) AdditionalHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAdditionals) +} + +// Additional parses a single Additional Resource. +func (p *Parser) Additional() (Resource, error) { + return p.resource(sectionAdditionals) +} + +// AllAdditionals parses all Additional Resources. +func (p *Parser) AllAdditionals() ([]Resource, error) { + as := make([]Resource, 0, p.header.additionals) + for { + a, err := p.Additional() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAdditional skips a single Additional Resource. +func (p *Parser) SkipAdditional() error { + return p.skipResource(sectionAdditionals) +} + +// SkipAllAdditionals skips all Additional Resources. +func (p *Parser) SkipAllAdditionals() error { + for { + if err := p.SkipAdditional(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// Unpack parses a full Message. +func (m *Message) Unpack(msg []byte) error { + var p Parser + var err error + if m.Header, err = p.Start(msg); err != nil { + return err + } + if m.Questions, err = p.AllQuestions(); err != nil { + return err + } + if m.Answers, err = p.AllAnswers(); err != nil { + return err + } + if m.Authorities, err = p.AllAuthorities(); err != nil { + return err + } + if m.Additionals, err = p.AllAdditionals(); err != nil { + return err + } + return nil +} + +// Pack packs a full Message. +func (m *Message) Pack() ([]byte, error) { + // Validate the lengths. It is very unlikely that anyone will try to + // pack more than 65535 of any particular type, but it is possible and + // we should fail gracefully. + if len(m.Questions) > int(^uint16(0)) { + return nil, errTooManyQuestions + } + if len(m.Answers) > int(^uint16(0)) { + return nil, errTooManyAnswers + } + if len(m.Authorities) > int(^uint16(0)) { + return nil, errTooManyAuthorities + } + if len(m.Additionals) > int(^uint16(0)) { + return nil, errTooManyAdditionals + } + + var h header + h.id, h.bits = m.Header.pack() + + h.questions = uint16(len(m.Questions)) + h.answers = uint16(len(m.Answers)) + h.authorities = uint16(len(m.Authorities)) + h.additionals = uint16(len(m.Additionals)) + + // The starting capacity doesn't matter too much, but most DNS responses + // Will be <= 512 bytes as it is the limit for DNS over UDP. + msg := make([]byte, 0, 512) + + msg = h.pack(msg) + + // RFC 1035 allows (but does not require) compression for packing. RFC + // 1035 requires unpacking implementations to support compression, so + // unconditionally enabling it is fine. + // + // DNS lookups are typically done over UDP, and RFC 1035 states that UDP + // DNS packets can be a maximum of 512 bytes long. Without compression, + // many DNS response packets are over this limit, so enabling + // compression will help ensure compliance. + compression := map[string]int{} + + for _, q := range m.Questions { + var err error + msg, err = q.pack(msg, compression) + if err != nil { + return nil, &nestedError{"packing Question", err} + } + } + for _, a := range m.Answers { + var err error + msg, err = packResource(msg, a, compression) + if err != nil { + return nil, &nestedError{"packing Answer", err} + } + } + for _, a := range m.Authorities { + var err error + msg, err = packResource(msg, a, compression) + if err != nil { + return nil, &nestedError{"packing Authority", err} + } + } + for _, a := range m.Additionals { + var err error + msg, err = packResource(msg, a, compression) + if err != nil { + return nil, &nestedError{"packing Additional", err} + } + } + + return msg, nil +} + +// An ResourceHeader is the header of a DNS resource record. There are +// many types of DNS resource records, but they all share the same header. +type ResourceHeader struct { + // Name is the domain name for which this resource record pertains. + Name string + + // Type is the type of DNS resource record. + // + // This field will be set automatically during packing. + Type Type + + // Class is the class of network to which this DNS resource record + // pertains. + Class Class + + // TTL is the length of time (measured in seconds) which this resource + // record is valid for (time to live). All Resources in a set should + // have the same TTL (RFC 2181 Section 5.2). + TTL uint32 + + // Length is the length of data in the resource record after the header. + // + // This field will be set automatically during packing. + Length uint16 +} + +// Header implements Resource.Header. +func (h *ResourceHeader) Header() *ResourceHeader { + return h +} + +// pack packs all of the fields in a ResourceHeader except for the length. The +// length bytes are returned as a slice so they can be filled in after the rest +// of the Resource has been packed. +func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int) (msg []byte, length []byte, err error) { + msg = oldMsg + if msg, err = packName(msg, h.Name, compression); err != nil { + return oldMsg, nil, &nestedError{"Name", err} + } + msg = packType(msg, h.Type) + msg = packClass(msg, h.Class) + msg = packUint32(msg, h.TTL) + lenBegin := len(msg) + msg = packUint16(msg, h.Length) + return msg, msg[lenBegin:], nil +} + +func (h *ResourceHeader) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if h.Name, newOff, err = unpackName(msg, newOff); err != nil { + return off, &nestedError{"Name", err} + } + if h.Type, newOff, err = unpackType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if h.Class, newOff, err = unpackClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + if h.Length, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"Length", err} + } + return newOff, nil +} + +func skipResource(msg []byte, off int) (int, error) { + newOff, err := skipName(msg, off) + if err != nil { + return off, &nestedError{"Name", err} + } + if newOff, err = skipType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if newOff, err = skipClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if newOff, err = skipUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + length, newOff, err := unpackUint16(msg, newOff) + if err != nil { + return off, &nestedError{"Length", err} + } + if newOff += int(length); newOff > len(msg) { + return off, errResourceLen + } + return newOff, nil +} + +func packUint16(msg []byte, field uint16) []byte { + return append(msg, byte(field>>8), byte(field)) +} + +func unpackUint16(msg []byte, off int) (uint16, int, error) { + if off+2 > len(msg) { + return 0, off, errBaseLen + } + return uint16(msg[off])<<8 | uint16(msg[off+1]), off + 2, nil +} + +func skipUint16(msg []byte, off int) (int, error) { + if off+2 > len(msg) { + return off, errBaseLen + } + return off + 2, nil +} + +func packType(msg []byte, field Type) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackType(msg []byte, off int) (Type, int, error) { + t, o, err := unpackUint16(msg, off) + return Type(t), o, err +} + +func skipType(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +func packClass(msg []byte, field Class) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackClass(msg []byte, off int) (Class, int, error) { + c, o, err := unpackUint16(msg, off) + return Class(c), o, err +} + +func skipClass(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +func packUint32(msg []byte, field uint32) []byte { + return append( + msg, + byte(field>>24), + byte(field>>16), + byte(field>>8), + byte(field), + ) +} + +func unpackUint32(msg []byte, off int) (uint32, int, error) { + if off+4 > len(msg) { + return 0, off, errBaseLen + } + v := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3]) + return v, off + 4, nil +} + +func skipUint32(msg []byte, off int) (int, error) { + if off+4 > len(msg) { + return off, errBaseLen + } + return off + 4, nil +} + +func packText(msg []byte, field string) []byte { + for len(field) > 0 { + l := len(field) + if l > 255 { + l = 255 + } + msg = append(msg, byte(l)) + msg = append(msg, field[:l]...) + field = field[l:] + } + return msg +} + +func unpackText(msg []byte, off int) (string, int, error) { + if off >= len(msg) { + return "", off, errBaseLen + } + beginOff := off + 1 + endOff := beginOff + int(msg[off]) + if endOff > len(msg) { + return "", off, errCalcLen + } + return string(msg[beginOff:endOff]), endOff, nil +} + +func skipText(msg []byte, off int) (int, error) { + if off >= len(msg) { + return off, errBaseLen + } + endOff := off + 1 + int(msg[off]) + if endOff > len(msg) { + return off, errCalcLen + } + return endOff, nil +} + +func packBytes(msg []byte, field []byte) []byte { + return append(msg, field...) +} + +func unpackBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + copy(field, msg[off:newOff]) + return newOff, nil +} + +func skipBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + return newOff, nil +} + +// packName packs a domain name. +// +// Domain names are a sequence of counted strings split at the dots. They end +// with a zero-length string. Compression can be used to reuse domain suffixes. +// +// The compression map will be updated with new domain suffixes. If compression +// is nil, compression will not be used. +func packName(msg []byte, name string, compression map[string]int) ([]byte, error) { + oldMsg := msg + + // Add a trailing dot to canonicalize name. + if n := len(name); n == 0 || name[n-1] != '.' { + name += "." + } + + // Allow root domain. + if name == "." { + return append(msg, 0), nil + } + + // Emit sequence of counted strings, chopping at dots. + for i, begin := 0, 0; i < len(name); i++ { + // Check for the end of the segment. + if name[i] == '.' { + // The two most significant bits have special meaning. + // It isn't allowed for segments to be long enough to + // need them. + if i-begin >= 1<<6 { + return oldMsg, errSegTooLong + } + + // Segments must have a non-zero length. + if i-begin == 0 { + return oldMsg, errZeroSegLen + } + + msg = append(msg, byte(i-begin)) + + for j := begin; j < i; j++ { + msg = append(msg, name[j]) + } + + begin = i + 1 + continue + } + + // We can only compress domain suffixes starting with a new + // segment. A pointer is two bytes with the two most significant + // bits set to 1 to indicate that it is a pointer. + if (i == 0 || name[i-1] == '.') && compression != nil { + if ptr, ok := compression[name[i:]]; ok { + // Hit. Emit a pointer instead of the rest of + // the domain. + return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil + } + + // Miss. Add the suffix to the compression table if the + // offset can be stored in the available 14 bytes. + if len(msg) <= int(^uint16(0)>>2) { + compression[name[i:]] = len(msg) + } + } + } + return append(msg, 0), nil +} + +// unpackName unpacks a domain name. +func unpackName(msg []byte, off int) (string, int, error) { + // currOff is the current working offset. + currOff := off + + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + + // name is the domain name being unpacked. + name := make([]byte, 0, 255) + + // ptr is the number of pointers followed. + var ptr int +Loop: + for { + if currOff >= len(msg) { + return "", off, errBaseLen + } + c := int(msg[currOff]) + currOff++ + switch c & 0xC0 { + case 0x00: // String segment + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + endOff := currOff + c + if endOff > len(msg) { + return "", off, errCalcLen + } + name = append(name, msg[currOff:endOff]...) + name = append(name, '.') + currOff = endOff + case 0xC0: // Pointer + if currOff >= len(msg) { + return "", off, errInvalidPtr + } + c1 := msg[currOff] + currOff++ + if ptr == 0 { + newOff = currOff + } + // Don't follow too many pointers, maybe there's a loop. + if ptr++; ptr > 10 { + return "", off, errTooManyPtr + } + currOff = (c^0xC0)<<8 | int(c1) + default: + // Prefixes 0x80 and 0x40 are reserved. + return "", off, errReserved + } + } + if len(name) == 0 { + name = append(name, '.') + } + if ptr == 0 { + newOff = currOff + } + return string(name), newOff, nil +} + +func skipName(msg []byte, off int) (int, error) { + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + +Loop: + for { + if newOff >= len(msg) { + return off, errBaseLen + } + c := int(msg[newOff]) + newOff++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + // literal string + newOff += c + if newOff > len(msg) { + return off, errCalcLen + } + case 0xC0: + // Pointer to somewhere else in msg. + + // Pointers are two bytes. + newOff++ + + // Don't follow the pointer as the data here has ended. + break Loop + default: + // Prefixes 0x80 and 0x40 are reserved. + return off, errReserved + } + } + + return newOff, nil +} + +// A Question is a DNS query. +type Question struct { + Name string + Type Type + Class Class +} + +func (q *Question) pack(msg []byte, compression map[string]int) ([]byte, error) { + msg, err := packName(msg, q.Name, compression) + if err != nil { + return msg, &nestedError{"Name", err} + } + msg = packType(msg, q.Type) + return packClass(msg, q.Class), nil +} + +func unpackResource(msg []byte, off int, hdr ResourceHeader) (Resource, int, error) { + var ( + r Resource + err error + name string + ) + switch hdr.Type { + case TypeA: + r, err = unpackAResource(hdr, msg, off) + name = "A" + case TypeNS: + r, err = unpackNSResource(hdr, msg, off) + name = "NS" + case TypeCNAME: + r, err = unpackCNAMEResource(hdr, msg, off) + name = "CNAME" + case TypeSOA: + r, err = unpackSOAResource(hdr, msg, off) + name = "SOA" + case TypePTR: + r, err = unpackPTRResource(hdr, msg, off) + name = "PTR" + case TypeMX: + r, err = unpackMXResource(hdr, msg, off) + name = "MX" + case TypeTXT: + r, err = unpackTXTResource(hdr, msg, off) + name = "TXT" + case TypeAAAA: + r, err = unpackAAAAResource(hdr, msg, off) + name = "AAAA" + case TypeSRV: + r, err = unpackSRVResource(hdr, msg, off) + name = "SRV" + } + if err != nil { + return nil, off, &nestedError{name + " record", err} + } + if r != nil { + return r, off + int(hdr.Length), nil + } + return nil, off, errors.New("invalid resource type: " + string(hdr.Type+'0')) +} + +// A CNAMEResource is a CNAME Resource record. +type CNAMEResource struct { + ResourceHeader + + CNAME string +} + +func (r *CNAMEResource) realType() Type { + return TypeCNAME +} + +func (r *CNAMEResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packName(msg, r.CNAME, compression) +} + +func unpackCNAMEResource(hdr ResourceHeader, msg []byte, off int) (*CNAMEResource, error) { + cname, _, err := unpackName(msg, off) + if err != nil { + return nil, err + } + return &CNAMEResource{hdr, cname}, nil +} + +// An MXResource is an MX Resource record. +type MXResource struct { + ResourceHeader + + Pref uint16 + MX string +} + +func (r *MXResource) realType() Type { + return TypeMX +} + +func (r *MXResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Pref) + msg, err := packName(msg, r.MX, compression) + if err != nil { + return oldMsg, &nestedError{"MXResource.MX", err} + } + return msg, nil +} + +func unpackMXResource(hdr ResourceHeader, msg []byte, off int) (*MXResource, error) { + pref, off, err := unpackUint16(msg, off) + if err != nil { + return nil, &nestedError{"Pref", err} + } + mx, _, err := unpackName(msg, off) + if err != nil { + return nil, &nestedError{"MX", err} + } + return &MXResource{hdr, pref, mx}, nil +} + +// An NSResource is an NS Resource record. +type NSResource struct { + ResourceHeader + + NS string +} + +func (r *NSResource) realType() Type { + return TypeNS +} + +func (r *NSResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packName(msg, r.NS, compression) +} + +func unpackNSResource(hdr ResourceHeader, msg []byte, off int) (*NSResource, error) { + ns, _, err := unpackName(msg, off) + if err != nil { + return nil, err + } + return &NSResource{hdr, ns}, nil +} + +// A PTRResource is a PTR Resource record. +type PTRResource struct { + ResourceHeader + + PTR string +} + +func (r *PTRResource) realType() Type { + return TypePTR +} + +func (r *PTRResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packName(msg, r.PTR, compression) +} + +func unpackPTRResource(hdr ResourceHeader, msg []byte, off int) (*PTRResource, error) { + ptr, _, err := unpackName(msg, off) + if err != nil { + return nil, err + } + return &PTRResource{hdr, ptr}, nil +} + +// An SOAResource is an SOA Resource record. +type SOAResource struct { + ResourceHeader + + NS string + MBox string + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + + // MinTTL the is the default TTL of Resources records which did not + // contain a TTL value and the TTL of negative responses. (RFC 2308 + // Section 4) + MinTTL uint32 +} + +func (r *SOAResource) realType() Type { + return TypeSOA +} + +func (r *SOAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + oldMsg := msg + msg, err := packName(msg, r.NS, compression) + if err != nil { + return oldMsg, &nestedError{"SOAResource.NS", err} + } + msg, err = packName(msg, r.MBox, compression) + if err != nil { + return oldMsg, &nestedError{"SOAResource.MBox", err} + } + msg = packUint32(msg, r.Serial) + msg = packUint32(msg, r.Refresh) + msg = packUint32(msg, r.Retry) + msg = packUint32(msg, r.Expire) + return packUint32(msg, r.MinTTL), nil +} + +func unpackSOAResource(hdr ResourceHeader, msg []byte, off int) (*SOAResource, error) { + ns, off, err := unpackName(msg, off) + if err != nil { + return nil, &nestedError{"NS", err} + } + mbox, off, err := unpackName(msg, off) + if err != nil { + return nil, &nestedError{"MBox", err} + } + serial, off, err := unpackUint32(msg, off) + if err != nil { + return nil, &nestedError{"Serial", err} + } + refresh, off, err := unpackUint32(msg, off) + if err != nil { + return nil, &nestedError{"Refresh", err} + } + retry, off, err := unpackUint32(msg, off) + if err != nil { + return nil, &nestedError{"Retry", err} + } + expire, off, err := unpackUint32(msg, off) + if err != nil { + return nil, &nestedError{"Expire", err} + } + minTTL, _, err := unpackUint32(msg, off) + if err != nil { + return nil, &nestedError{"MinTTL", err} + } + return &SOAResource{hdr, ns, mbox, serial, refresh, retry, expire, minTTL}, nil +} + +// A TXTResource is a TXT Resource record. +type TXTResource struct { + ResourceHeader + + Txt string // Not a domain name. +} + +func (r *TXTResource) realType() Type { + return TypeTXT +} + +func (r *TXTResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packText(msg, r.Txt), nil +} + +func unpackTXTResource(hdr ResourceHeader, msg []byte, off int) (*TXTResource, error) { + var txt string + for n := uint16(0); n < hdr.Length; { + var t string + var err error + if t, off, err = unpackText(msg, off); err != nil { + return nil, &nestedError{"text", err} + } + // Check if we got too many bytes. + if hdr.Length-n < uint16(len(t))+1 { + return nil, errCalcLen + } + n += uint16(len(t)) + 1 + txt += t + } + return &TXTResource{hdr, txt}, nil +} + +// An SRVResource is an SRV Resource record. +type SRVResource struct { + ResourceHeader + + Priority uint16 + Weight uint16 + Port uint16 + Target string // Not compressed as per RFC 2782. +} + +func (r *SRVResource) realType() Type { + return TypeSRV +} + +func (r *SRVResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Priority) + msg = packUint16(msg, r.Weight) + msg = packUint16(msg, r.Port) + msg, err := packName(msg, r.Target, nil) + if err != nil { + return oldMsg, &nestedError{"SRVResource.Target", err} + } + return msg, nil +} + +func unpackSRVResource(hdr ResourceHeader, msg []byte, off int) (*SRVResource, error) { + priority, off, err := unpackUint16(msg, off) + if err != nil { + return nil, &nestedError{"Priority", err} + } + weight, off, err := unpackUint16(msg, off) + if err != nil { + return nil, &nestedError{"Weight", err} + } + port, off, err := unpackUint16(msg, off) + if err != nil { + return nil, &nestedError{"Port", err} + } + target, _, err := unpackName(msg, off) + if err != nil { + return nil, &nestedError{"Target", err} + } + return &SRVResource{hdr, priority, weight, port, target}, nil +} + +// An AResource is an A Resource record. +type AResource struct { + ResourceHeader + + A [4]byte +} + +func (r *AResource) realType() Type { + return TypeA +} + +func (r *AResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packBytes(msg, r.A[:]), nil +} + +func unpackAResource(hdr ResourceHeader, msg []byte, off int) (*AResource, error) { + var a [4]byte + if _, err := unpackBytes(msg, off, a[:]); err != nil { + return nil, err + } + return &AResource{hdr, a}, nil +} + +// An AAAAResource is an AAAA Resource record. +type AAAAResource struct { + ResourceHeader + + AAAA [16]byte +} + +func (r *AAAAResource) realType() Type { + return TypeAAAA +} + +func (r *AAAAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packBytes(msg, r.AAAA[:]), nil +} + +func unpackAAAAResource(hdr ResourceHeader, msg []byte, off int) (*AAAAResource, error) { + var aaaa [16]byte + if _, err := unpackBytes(msg, off, aaaa[:]); err != nil { + return nil, err + } + return &AAAAResource{hdr, aaaa}, nil +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go new file mode 100644 index 000000000..46edd7243 --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -0,0 +1,575 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dnsmessage + +import ( + "fmt" + "net" + "reflect" + "strings" + "testing" +) + +func (m *Message) String() string { + s := fmt.Sprintf("Message: %#v\n", &m.Header) + if len(m.Questions) > 0 { + s += "-- Questions\n" + for _, q := range m.Questions { + s += fmt.Sprintf("%#v\n", q) + } + } + if len(m.Answers) > 0 { + s += "-- Answers\n" + for _, a := range m.Answers { + s += fmt.Sprintf("%#v\n", a) + } + } + if len(m.Authorities) > 0 { + s += "-- Authorities\n" + for _, ns := range m.Authorities { + s += fmt.Sprintf("%#v\n", ns) + } + } + if len(m.Additionals) > 0 { + s += "-- Additionals\n" + for _, e := range m.Additionals { + s += fmt.Sprintf("%#v\n", e) + } + } + return s +} + +func TestQuestionPackUnpack(t *testing.T) { + want := Question{ + Name: ".", + Type: TypeA, + Class: ClassINET, + } + buf, err := want.pack(make([]byte, 1, 50), map[string]int{}) + if err != nil { + t.Fatal("Packing failed:", err) + } + var p Parser + p.msg = buf + p.header.questions = 1 + p.section = sectionQuestions + p.off = 1 + got, err := p.Question() + if err != nil { + t.Fatalf("Unpacking failed: %v\n%s", err, string(buf[1:])) + } + if p.off != len(buf) { + t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", p.off, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Got = %+v, want = %+v", got, want) + } +} + +func TestNamePackUnpack(t *testing.T) { + tests := []struct { + in string + want string + err error + }{ + {"", ".", nil}, + {".", ".", nil}, + {"google..com", "", errZeroSegLen}, + {"google.com", "google.com.", nil}, + {"google..com.", "", errZeroSegLen}, + {"google.com.", "google.com.", nil}, + {".google.com.", "", errZeroSegLen}, + {"www..google.com.", "", errZeroSegLen}, + {"www.google.com.", "www.google.com.", nil}, + } + + for _, test := range tests { + buf, err := packName(make([]byte, 0, 30), test.in, map[string]int{}) + if err != test.err { + t.Errorf("Packing of %s: got err = %v, want err = %v", test.in, err, test.err) + continue + } + if test.err != nil { + continue + } + got, n, err := unpackName(buf, 0) + if err != nil { + t.Errorf("Unpacking for %s failed: %v", test.in, err) + continue + } + if n != len(buf) { + t.Errorf( + "Unpacked different amount than packed for %s: got n = %d, want = %d", + test.in, + n, + len(buf), + ) + } + if got != test.want { + t.Errorf("Unpacking packing of %s: got = %s, want = %s", test.in, got, test.want) + } + } +} + +func TestDNSPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: ".", + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b, err := want.Pack() + if err != nil { + t.Fatalf("%d: packing failed: %v", i, err) + } + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: unpacking failed: %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got = %+v, want = %+v", i, &got, &want) + } + } +} + +func TestSkipAll(t *testing.T) { + msg := largeTestMsg() + buf, err := msg.Pack() + if err != nil { + t.Fatal("Packing large test message:", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + for i := 1; i <= 3; i++ { + if err := test.f(); err != nil { + t.Errorf("Call #%d to %s(): %v", i, test.name, err) + } + } + } +} + +func TestSkipNotStarted(t *testing.T) { + var p Parser + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + if err := test.f(); err != ErrNotStarted { + t.Errorf("Got %s() = %v, want = %v", test.name, err, ErrNotStarted) + } + } +} + +func TestTooManyRecords(t *testing.T) { + const recs = int(^uint16(0)) + 1 + tests := []struct { + name string + msg Message + want error + }{ + { + "Questions", + Message{ + Questions: make([]Question, recs), + }, + errTooManyQuestions, + }, + { + "Answers", + Message{ + Answers: make([]Resource, recs), + }, + errTooManyAnswers, + }, + { + "Authorities", + Message{ + Authorities: make([]Resource, recs), + }, + errTooManyAuthorities, + }, + { + "Additionals", + Message{ + Additionals: make([]Resource, recs), + }, + errTooManyAdditionals, + }, + } + + for _, test := range tests { + if _, got := test.msg.Pack(); got != test.want { + t.Errorf("Packing %d %s: got = %v, want = %v", recs, test.name, got, test.want) + } + } +} + +func TestVeryLongTxt(t *testing.T) { + want := &TXTResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeTXT, + Class: ClassINET, + }, + Txt: loremIpsum, + } + buf, err := packResource(make([]byte, 0, 8000), want, map[string]int{}) + if err != nil { + t.Fatal("Packing failed:", err) + } + var hdr ResourceHeader + off, err := hdr.unpack(buf, 0) + if err != nil { + t.Fatal("Unpacking ResourceHeader failed:", err) + } + got, n, err := unpackResource(buf, off, hdr) + if err != nil { + t.Fatal("Unpacking failed:", err) + } + if n != len(buf) { + t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", n, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Got = %+v, want = %+v", got, want) + } +} + +func ExampleHeaderSearch() { + msg := Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: "foo.bar.example.com.", + Type: TypeA, + Class: ClassINET, + }, + { + Name: "bar.example.com.", + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + &AResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeA, + Class: ClassINET, + }, + A: [4]byte{127, 0, 0, 1}, + }, + &AResource{ + ResourceHeader: ResourceHeader{ + Name: "bar.example.com.", + Type: TypeA, + Class: ClassINET, + }, + A: [4]byte{127, 0, 0, 2}, + }, + }, + } + + buf, err := msg.Pack() + if err != nil { + panic(err) + } + + wantName := "bar.example.com." + + var p Parser + if _, err := p.Start(buf); err != nil { + panic(err) + } + + for { + q, err := p.Question() + if err == ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if q.Name != wantName { + continue + } + + fmt.Println("Found question for name", wantName) + if err := p.SkipAllQuestions(); err != nil { + panic(err) + } + break + } + + var gotIPs []net.IP + for { + h, err := p.AnswerHeader() + if err == ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if (h.Type != TypeA && h.Type != TypeAAAA) || h.Class != ClassINET { + continue + } + + if !strings.EqualFold(h.Name, wantName) { + if err := p.SkipAnswer(); err != nil { + panic(err) + } + continue + } + a, err := p.Answer() + if err != nil { + panic(err) + } + + switch r := a.(type) { + default: + panic(fmt.Sprintf("unknown type: %T", r)) + case *AResource: + gotIPs = append(gotIPs, r.A[:]) + case *AAAAResource: + gotIPs = append(gotIPs, r.AAAA[:]) + } + } + + fmt.Printf("Found A/AAAA records for name %s: %v\n", wantName, gotIPs) + + // Output: + // Found question for name bar.example.com. + // Found A/AAAA records for name bar.example.com.: [127.0.0.2] +} + +func largeTestMsg() Message { + return Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: "foo.bar.example.com.", + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + &AResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeA, + Class: ClassINET, + }, + A: [4]byte{127, 0, 0, 1}, + }, + &AResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeA, + Class: ClassINET, + }, + A: [4]byte{127, 0, 0, 2}, + }, + }, + Authorities: []Resource{ + &NSResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeNS, + Class: ClassINET, + }, + NS: "ns1.example.com.", + }, + &NSResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeNS, + Class: ClassINET, + }, + NS: "ns2.example.com.", + }, + }, + Additionals: []Resource{ + &TXTResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeTXT, + Class: ClassINET, + }, + Txt: "So Long, and Thanks for All the Fish", + }, + &TXTResource{ + ResourceHeader: ResourceHeader{ + Name: "foo.bar.example.com.", + Type: TypeTXT, + Class: ClassINET, + }, + Txt: "Hamster Huey and the Gooey Kablooie", + }, + }, + } +} + +const loremIpsum = ` +Lorem ipsum dolor sit amet, nec enim antiopam id, an ullum choro +nonumes qui, pro eu debet honestatis mediocritatem. No alia enim eos, +magna signiferumque ex vis. Mei no aperiri dissentias, cu vel quas +regione. Malorum quaeque vim ut, eum cu semper aliquid invidunt, ei +nam ipsum assentior. + +Nostrum appellantur usu no, vis ex probatus adipiscing. Cu usu illum +facilis eleifend. Iusto conceptam complectitur vim id. Tale omnesque +no usu, ei oblique sadipscing vim. At nullam voluptua usu, mei laudem +reformidans et. Qui ei eros porro reformidans, ius suas veritus +torquatos ex. Mea te facer alterum consequat. + +Soleat torquatos democritum sed et, no mea congue appareat, facer +aliquam nec in. Has te ipsum tritani. At justo dicta option nec, movet +phaedrum ad nam. Ea detracto verterem liberavisse has, delectus +suscipiantur in mei. Ex nam meliore complectitur. Ut nam omnis +honestatis quaerendum, ea mea nihil affert detracto, ad vix rebum +mollis. + +Ut epicurei praesent neglegentur pri, prima fuisset intellegebat ad +vim. An habemus comprehensam usu, at enim dignissim pro. Eam reque +vivendum adipisci ea. Vel ne odio choro minimum. Sea admodum +dissentiet ex. Mundi tamquam evertitur ius cu. Homero postea iisque ut +pro, vel ne saepe senserit consetetur. + +Nulla utamur facilisis ius ea, in viderer diceret pertinax eum. Mei no +enim quodsi facilisi, ex sed aeterno appareat mediocritatem, eum +sententiae deterruisset ut. At suas timeam euismod cum, offendit +appareat interpretaris ne vix. Vel ea civibus albucius, ex vim quidam +accusata intellegebat, noluisse instructior sea id. Nec te nonumes +habemus appellantur, quis dignissim vituperata eu nam. + +At vix apeirian patrioque vituperatoribus, an usu agam assum. Debet +iisque an mea. Per eu dicant ponderum accommodare. Pri alienum +placerat senserit an, ne eum ferri abhorreant vituperatoribus. Ut mea +eligendi disputationi. Ius no tation everti impedit, ei magna quidam +mediocritatem pri. + +Legendos perpetua iracundia ne usu, no ius ullum epicurei intellegam, +ad modus epicuri lucilius eam. In unum quaerendum usu. Ne diam paulo +has, ea veri virtute sed. Alia honestatis conclusionemque mea eu, ut +iudico albucius his. + +Usu essent probatus eu, sed omnis dolor delicatissimi ex. No qui augue +dissentias dissentiet. Laudem recteque no usu, vel an velit noluisse, +an sed utinam eirmod appetere. Ne mea fuisset inimicus ocurreret. At +vis dicant abhorreant, utinam forensibus nec ne, mei te docendi +consequat. Brute inermis persecuti cum id. Ut ipsum munere propriae +usu, dicit graeco disputando id has. + +Eros dolore quaerendum nam ei. Timeam ornatus inciderint pro id. Nec +torquatos sadipscing ei, ancillae molestie per in. Malis principes duo +ea, usu liber postulant ei. + +Graece timeam voluptatibus eu eam. Alia probatus quo no, ea scripta +feugiat duo. Congue option meliore ex qui, noster invenire appellantur +ea vel. Eu exerci legendos vel. Consetetur repudiandae vim ut. Vix an +probo minimum, et nam illud falli tempor. + +Cum dico signiferumque eu. Sed ut regione maiorum, id veritus insolens +tacimates vix. Eu mel sint tamquam lucilius, duo no oporteat +tacimates. Atqui augue concludaturque vix ei, id mel utroque menandri. + +Ad oratio blandit aliquando pro. Vis et dolorum rationibus +philosophia, ad cum nulla molestie. Hinc fuisset adversarium eum et, +ne qui nisl verear saperet, vel te quaestio forensibus. Per odio +option delenit an. Alii placerat has no, in pri nihil platonem +cotidieque. Est ut elit copiosae scaevola, debet tollit maluisset sea +an. + +Te sea hinc debet pericula, liber ridens fabulas cu sed, quem mutat +accusam mea et. Elitr labitur albucius et pri, an labore feugait mel. +Velit zril melius usu ea. Ad stet putent interpretaris qui. Mel no +error volumus scripserit. In pro paulo iudico, quo ei dolorem +verterem, affert fabellas dissentiet ea vix. + +Vis quot deserunt te. Error aliquid detraxit eu usu, vis alia eruditi +salutatus cu. Est nostrud bonorum an, ei usu alii salutatus. Vel at +nisl primis, eum ex aperiri noluisse reformidans. Ad veri velit +utroque vis, ex equidem detraxit temporibus has. + +Inermis appareat usu ne. Eros placerat periculis mea ad, in dictas +pericula pro. Errem postulant at usu, ea nec amet ornatus mentitum. Ad +mazim graeco eum, vel ex percipit volutpat iudicabit, sit ne delicata +interesset. Mel sapientem prodesset abhorreant et, oblique suscipit +eam id. + +An maluisset disputando mea, vidit mnesarchum pri et. Malis insolens +inciderint no sea. Ea persius maluisset vix, ne vim appellantur +instructior, consul quidam definiebas pri id. Cum integre feugiat +pericula in, ex sed persius similique, mel ne natum dicit percipitur. + +Primis discere ne pri, errem putent definitionem at vis. Ei mel dolore +neglegentur, mei tincidunt percipitur ei. Pro ad simul integre +rationibus. Eu vel alii honestatis definitiones, mea no nonumy +reprehendunt. + +Dicta appareat legendos est cu. Eu vel congue dicunt omittam, no vix +adhuc minimum constituam, quot noluisse id mel. Eu quot sale mutat +duo, ex nisl munere invenire duo. Ne nec ullum utamur. Pro alterum +debitis nostrum no, ut vel aliquid vivendo. + +Aliquip fierent praesent quo ne, id sit audiam recusabo delicatissimi. +Usu postulant incorrupte cu. At pro dicit tibique intellegam, cibo +dolore impedit id eam, et aeque feugait assentior has. Quando sensibus +nec ex. Possit sensibus pri ad, unum mutat periculis cu vix. + +Mundi tibique vix te, duo simul partiendo qualisque id, est at vidit +sonet tempor. No per solet aeterno deseruisse. Petentium salutandi +definiebas pri cu. Munere vivendum est in. Ei justo congue eligendi +vis, modus offendit omittantur te mel. + +Integre voluptaria in qui, sit habemus tractatos constituam no. Utinam +melius conceptam est ne, quo in minimum apeirian delicata, ut ius +porro recusabo. Dicant expetenda vix no, ludus scripserit sed ex, eu +his modo nostro. Ut etiam sonet his, quodsi inciderint philosophia te +per. Nullam lobortis eu cum, vix an sonet efficiendi repudiandae. Vis +ad idque fabellas intellegebat. + +Eum commodo senserit conclusionemque ex. Sed forensibus sadipscing ut, +mei in facer delicata periculis, sea ne hinc putent cetero. Nec ne +alia corpora invenire, alia prima soleat te cum. Eleifend posidonium +nam at. + +Dolorum indoctum cu quo, ex dolor legendos recteque eam, cu pri zril +discere. Nec civibus officiis dissentiunt ex, est te liber ludus +elaboraret. Cum ea fabellas invenire. Ex vim nostrud eripuit +comprehensam, nam te inermis delectus, saepe inermis senserit. +` diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 000000000..a3067f8de --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/databuffer_test.go b/vendor/golang.org/x/net/http2/databuffer_test.go new file mode 100644 index 000000000..ca227b528 --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer_test.go @@ -0,0 +1,155 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "reflect" + "testing" +) + +func fmtDataChunk(chunk []byte) string { + out := "" + var last byte + var count int + for _, c := range chunk { + if c != last { + if count > 0 { + out += fmt.Sprintf(" x %d ", count) + count = 0 + } + out += string([]byte{c}) + last = c + } + count++ + } + if count > 0 { + out += fmt.Sprintf(" x %d", count) + } + return out +} + +func fmtDataChunks(chunks [][]byte) string { + var out string + for _, chunk := range chunks { + out += fmt.Sprintf("{%q}", fmtDataChunk(chunk)) + } + return out +} + +func testDataBuffer(t *testing.T, wantBytes []byte, setup func(t *testing.T) *dataBuffer) { + // Run setup, then read the remaining bytes from the dataBuffer and check + // that they match wantBytes. We use different read sizes to check corner + // cases in Read. + for _, readSize := range []int{1, 2, 1 * 1024, 32 * 1024} { + t.Run(fmt.Sprintf("ReadSize=%d", readSize), func(t *testing.T) { + b := setup(t) + buf := make([]byte, readSize) + var gotRead bytes.Buffer + for { + n, err := b.Read(buf) + gotRead.Write(buf[:n]) + if err == errReadEmpty { + break + } + if err != nil { + t.Fatalf("error after %v bytes: %v", gotRead.Len(), err) + } + } + if got, want := gotRead.Bytes(), wantBytes; !bytes.Equal(got, want) { + t.Errorf("FinalRead=%q, want %q", fmtDataChunk(got), fmtDataChunk(want)) + } + }) + } +} + +func TestDataBufferAllocation(t *testing.T) { + writes := [][]byte{ + bytes.Repeat([]byte("a"), 1*1024-1), + []byte{'a'}, + bytes.Repeat([]byte("b"), 4*1024-1), + []byte{'b'}, + bytes.Repeat([]byte("c"), 8*1024-1), + []byte{'c'}, + bytes.Repeat([]byte("d"), 16*1024-1), + []byte{'d'}, + bytes.Repeat([]byte("e"), 32*1024), + } + var wantRead bytes.Buffer + for _, p := range writes { + wantRead.Write(p) + } + + testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer { + b := &dataBuffer{} + for _, p := range writes { + if n, err := b.Write(p); n != len(p) || err != nil { + t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p)) + } + } + want := [][]byte{ + bytes.Repeat([]byte("a"), 1*1024), + bytes.Repeat([]byte("b"), 4*1024), + bytes.Repeat([]byte("c"), 8*1024), + bytes.Repeat([]byte("d"), 16*1024), + bytes.Repeat([]byte("e"), 16*1024), + bytes.Repeat([]byte("e"), 16*1024), + } + if !reflect.DeepEqual(b.chunks, want) { + t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want)) + } + return b + }) +} + +func TestDataBufferAllocationWithExpected(t *testing.T) { + writes := [][]byte{ + bytes.Repeat([]byte("a"), 1*1024), // allocates 16KB + bytes.Repeat([]byte("b"), 14*1024), + bytes.Repeat([]byte("c"), 15*1024), // allocates 16KB more + bytes.Repeat([]byte("d"), 2*1024), + bytes.Repeat([]byte("e"), 1*1024), // overflows 32KB expectation, allocates just 1KB + } + var wantRead bytes.Buffer + for _, p := range writes { + wantRead.Write(p) + } + + testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer { + b := &dataBuffer{expected: 32 * 1024} + for _, p := range writes { + if n, err := b.Write(p); n != len(p) || err != nil { + t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p)) + } + } + want := [][]byte{ + append(bytes.Repeat([]byte("a"), 1*1024), append(bytes.Repeat([]byte("b"), 14*1024), bytes.Repeat([]byte("c"), 1*1024)...)...), + append(bytes.Repeat([]byte("c"), 14*1024), bytes.Repeat([]byte("d"), 2*1024)...), + bytes.Repeat([]byte("e"), 1*1024), + } + if !reflect.DeepEqual(b.chunks, want) { + t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want)) + } + return b + }) +} + +func TestDataBufferWriteAfterPartialRead(t *testing.T) { + testDataBuffer(t, []byte("cdxyz"), func(t *testing.T) *dataBuffer { + b := &dataBuffer{} + if n, err := b.Write([]byte("abcd")); n != 4 || err != nil { + t.Fatalf("Write(\"abcd\")=%v,%v want 4,nil", n, err) + } + p := make([]byte, 2) + if n, err := b.Read(p); n != 2 || err != nil || !bytes.Equal(p, []byte("ab")) { + t.Fatalf("Read()=%q,%v,%v want \"ab\",2,nil", p, n, err) + } + if n, err := b.Write([]byte("xyz")); n != 3 || err != nil { + t.Fatalf("Write(\"xyz\")=%v,%v want 3,nil", n, err) + } + return b + }) +} diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0bf..000000000 --- a/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/golang.org/x/net/http2/fixed_buffer_test.go b/vendor/golang.org/x/net/http2/fixed_buffer_test.go deleted file mode 100644 index f5432f8d8..000000000 --- a/vendor/golang.org/x/net/http2/fixed_buffer_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "reflect" - "testing" -) - -var bufferReadTests = []struct { - buf fixedBuffer - read, wn int - werr error - wp []byte - wbuf fixedBuffer -}{ - { - fixedBuffer{[]byte{'a', 0}, 0, 1}, - 5, 1, nil, []byte{'a'}, - fixedBuffer{[]byte{'a', 0}, 0, 0}, - }, - { - fixedBuffer{[]byte{0, 'a'}, 1, 2}, - 5, 1, nil, []byte{'a'}, - fixedBuffer{[]byte{0, 'a'}, 0, 0}, - }, - { - fixedBuffer{[]byte{'a', 'b'}, 0, 2}, - 1, 1, nil, []byte{'a'}, - fixedBuffer{[]byte{'a', 'b'}, 1, 2}, - }, - { - fixedBuffer{[]byte{}, 0, 0}, - 5, 0, errReadEmpty, []byte{}, - fixedBuffer{[]byte{}, 0, 0}, - }, -} - -func TestBufferRead(t *testing.T) { - for i, tt := range bufferReadTests { - read := make([]byte, tt.read) - n, err := tt.buf.Read(read) - if n != tt.wn { - t.Errorf("#%d: wn = %d want %d", i, n, tt.wn) - continue - } - if err != tt.werr { - t.Errorf("#%d: werr = %v want %v", i, err, tt.werr) - continue - } - read = read[:n] - if !reflect.DeepEqual(read, tt.wp) { - t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp) - } - if !reflect.DeepEqual(tt.buf, tt.wbuf) { - t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf) - } - } -} - -var bufferWriteTests = []struct { - buf fixedBuffer - write, wn int - werr error - wbuf fixedBuffer -}{ - { - buf: fixedBuffer{ - buf: []byte{}, - }, - wbuf: fixedBuffer{ - buf: []byte{}, - }, - }, - { - buf: fixedBuffer{ - buf: []byte{1, 'a'}, - }, - write: 1, - wn: 1, - wbuf: fixedBuffer{ - buf: []byte{0, 'a'}, - w: 1, - }, - }, - { - buf: fixedBuffer{ - buf: []byte{'a', 1}, - r: 1, - w: 1, - }, - write: 2, - wn: 2, - wbuf: fixedBuffer{ - buf: []byte{0, 0}, - w: 2, - }, - }, - { - buf: fixedBuffer{ - buf: []byte{}, - }, - write: 5, - werr: errWriteFull, - wbuf: fixedBuffer{ - buf: []byte{}, - }, - }, -} - -func TestBufferWrite(t *testing.T) { - for i, tt := range bufferWriteTests { - n, err := tt.buf.Write(make([]byte, tt.write)) - if n != tt.wn { - t.Errorf("#%d: wrote %d bytes; want %d", i, n, tt.wn) - continue - } - if err != tt.werr { - t.Errorf("#%d: error = %v; want %v", i, err, tt.werr) - continue - } - if !reflect.DeepEqual(tt.buf, tt.wbuf) { - t.Errorf("#%d: buf = %+v; want %+v", i, tt.buf, tt.wbuf) - } - } -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 957358837..3b1489072 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, @@ -323,6 +323,8 @@ type Framer struct { debugFramerBuf *bytes.Buffer debugReadLoggerf func(string, ...interface{}) debugWriteLoggerf func(string, ...interface{}) + + frameCache *frameCache // nil if frames aren't reused (default) } func (fr *Framer) maxHeaderListSize() uint32 { @@ -398,6 +400,27 @@ const ( maxFrameSize = 1<<24 - 1 ) +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ @@ -477,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } - f, err := typeFrameParser(fh.Type)(fh, payload) + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) @@ -565,7 +588,7 @@ func (f *DataFrame) Data() []byte { return f.data } -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier @@ -574,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // PROTOCOL_ERROR. return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } - f := &DataFrame{ - FrameHeader: fh, - } + f := fc.getDataFrame() + f.FrameHeader = fh + var padSize byte if fh.Flags.Has(FlagDataPadded) { var err error @@ -672,7 +695,7 @@ type SettingsFrame struct { p []byte } -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the // SETTINGS frame MUST be empty. Receipt of a @@ -774,7 +797,7 @@ type PingFrame struct { func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -814,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte { return f.debugData } -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } @@ -854,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte { return f.p } -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } @@ -865,7 +888,7 @@ type WindowUpdateFrame struct { Increment uint32 // never read with high bit set } -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -930,7 +953,7 @@ func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } @@ -1067,7 +1090,7 @@ func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } @@ -1114,7 +1137,7 @@ type RSTStreamFrame struct { ErrCode ErrCode } -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -1144,7 +1167,7 @@ type ContinuationFrame struct { headerFragBuf []byte } -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } @@ -1194,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } diff --git a/vendor/golang.org/x/net/http2/frame_test.go b/vendor/golang.org/x/net/http2/frame_test.go index 311f86f1c..37266bc58 100644 --- a/vendor/golang.org/x/net/http2/frame_test.go +++ b/vendor/golang.org/x/net/http2/frame_test.go @@ -1096,6 +1096,95 @@ func TestMetaFrameHeader(t *testing.T) { } } +func TestSetReuseFrames(t *testing.T) { + fr, buf := testFramer() + fr.SetReuseFrames() + + // Check that DataFrames are reused. Note that + // SetReuseFrames only currently implements reuse of DataFrames. + firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t) + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t) + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("", 0, fr, buf, t) + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("HHH", 3, fr, buf, t) + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } +} + +func TestSetReuseFramesMoreThanOnce(t *testing.T) { + fr, buf := testFramer() + fr.SetReuseFrames() + + firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t) + fr.SetReuseFrames() + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t) + // SetReuseFrames should be idempotent + fr.SetReuseFrames() + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } +} + +func TestNoSetReuseFrames(t *testing.T) { + fr, buf := testFramer() + const numNewDataFrames = 10 + dfSoFar := make([]interface{}, numNewDataFrames) + + // Check that DataFrames are not reused if SetReuseFrames wasn't called. + // SetReuseFrames only currently implements reuse of DataFrames. + for i := 0; i < numNewDataFrames; i++ { + df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t) + for _, item := range dfSoFar { + if df == item { + t.Errorf("Expected Framer to return new DataFrames since SetNoReuseFrames not set.") + } + } + dfSoFar[i] = df + } +} + +func readAndVerifyDataFrame(data string, length byte, fr *Framer, buf *bytes.Buffer, t *testing.T) *DataFrame { + var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4 + fr.WriteData(streamID, true, []byte(data)) + wantEnc := "\x00\x00" + string(length) + "\x00\x01\x01\x02\x03\x04" + data + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + df, ok := f.(*DataFrame) + if !ok { + t.Fatalf("got %T; want *DataFrame", f) + } + if !bytes.Equal(df.Data(), []byte(data)) { + t.Errorf("got %q; want %q", df.Data(), []byte(data)) + } + if f.Header().Flags&1 == 0 { + t.Errorf("didn't see END_STREAM flag") + } + return df +} + func encodeHeaderRaw(t *testing.T, pairs ...string) []byte { var he hpackEncoder return he.encodeHeaderRaw(t, pairs...) diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 6b3b9f8b4..54726c2a3 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -39,6 +39,7 @@ func NewEncoder(w io.Writer) *Encoder { tableSizeUpdate: false, w: w, } + e.dynTab.table.init() e.dynTab.setMaxSize(initialHeaderTableSize) return e } @@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error { // only name matches, i points to that index and nameValueMatch // becomes false. func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true } - j, nameValueMatch := e.dynTab.search(f) + j, nameValueMatch := e.dynTab.table.search(f) if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) + return j + uint64(staticTable.len()), nameValueMatch } - return + + return i, false } // SetMaxDynamicTableSize changes the dynamic header table size to v. diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go index 92286f3ba..05f12db9c 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode_test.go +++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go @@ -7,6 +7,8 @@ package hpack import ( "bytes" "encoding/hex" + "fmt" + "math/rand" "reflect" "strings" "testing" @@ -101,17 +103,20 @@ func TestEncoderSearchTable(t *testing.T) { wantMatch bool }{ // Name and Value match - {pair("foo", "bar"), uint64(len(staticTable) + 3), true}, - {pair("blake", "miz"), uint64(len(staticTable) + 2), true}, + {pair("foo", "bar"), uint64(staticTable.len()) + 3, true}, + {pair("blake", "miz"), uint64(staticTable.len()) + 2, true}, {pair(":method", "GET"), 2, true}, - // Only name match because Sensitive == true - {HeaderField{":method", "GET", true}, 2, false}, + // Only name match because Sensitive == true. This is allowed to match + // any ":method" entry. The current implementation uses the last entry + // added in newStaticTable. + {HeaderField{":method", "GET", true}, 3, false}, // Only Name matches - {pair("foo", "..."), uint64(len(staticTable) + 3), false}, - {pair("blake", "..."), uint64(len(staticTable) + 2), false}, - {pair(":method", "..."), 2, false}, + {pair("foo", "..."), uint64(staticTable.len()) + 3, false}, + {pair("blake", "..."), uint64(staticTable.len()) + 2, false}, + // As before, this is allowed to match any ":method" entry. + {pair(":method", "..."), 3, false}, // None match {pair("foo-", "bar"), 0, false}, @@ -328,3 +333,54 @@ func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { func removeSpace(s string) string { return strings.Replace(s, " ", "", -1) } + +func BenchmarkEncoderSearchTable(b *testing.B) { + e := NewEncoder(nil) + + // A sample of possible header fields. + // This is not based on any actual data from HTTP/2 traces. + var possible []HeaderField + for _, f := range staticTable.ents { + if f.Value == "" { + possible = append(possible, f) + continue + } + // Generate 5 random values, except for cookie and set-cookie, + // which we know can have many values in practice. + num := 5 + if f.Name == "cookie" || f.Name == "set-cookie" { + num = 25 + } + for i := 0; i < num; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + for k := 0; k < 10; k++ { + f := HeaderField{ + Name: fmt.Sprintf("x-header-%d", k), + Sensitive: rand.Int()%2 == 0, + } + for i := 0; i < 5; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + + // Add a random sample to the dynamic table. This very loosely simulates + // a history of 100 requests with 20 header fields per request. + for r := 0; r < 100*20; r++ { + f := possible[rand.Int31n(int32(len(possible)))] + // Skip if this is in the staticTable verbatim. + if _, has := staticTable.search(f); !has { + e.dynTab.add(f) + } + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for _, f := range possible { + e.searchTable(f) + } + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 007bc7f45..176644acd 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod emit: emitFunc, emitEnabled: true, } + d.dynTab.table.init() d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.setMaxSize(maxDynamicTableSize) return d @@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // ents is the FIFO described at // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 + table headerFieldTable + size uint32 // in bytes maxSize uint32 // current maxSize allowedMaxSize uint32 // maxSize may go up to this, inclusive } @@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) { dt.evict() } -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) + dt.table.addEntry(f) dt.size += f.Size() dt.evict() } -// If we're too big, evict old stuff (front of the slice) +// If we're too big, evict old stuff. func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] - } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return + dt.table.evictOldest(n) } func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() } func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { + // See Section 2.3.3. + if i == 0 { return } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } if i > uint64(d.maxTableIndex()) { return } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true } // Decode decodes an entire block. diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go index 4c7b17bfb..c2f8fd102 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack_test.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go @@ -5,117 +5,16 @@ package hpack import ( - "bufio" "bytes" "encoding/hex" "fmt" "math/rand" "reflect" - "regexp" - "strconv" "strings" "testing" "time" ) -func TestStaticTable(t *testing.T) { - fromSpec := ` - +-------+-----------------------------+---------------+ - | 1 | :authority | | - | 2 | :method | GET | - | 3 | :method | POST | - | 4 | :path | / | - | 5 | :path | /index.html | - | 6 | :scheme | http | - | 7 | :scheme | https | - | 8 | :status | 200 | - | 9 | :status | 204 | - | 10 | :status | 206 | - | 11 | :status | 304 | - | 12 | :status | 400 | - | 13 | :status | 404 | - | 14 | :status | 500 | - | 15 | accept-charset | | - | 16 | accept-encoding | gzip, deflate | - | 17 | accept-language | | - | 18 | accept-ranges | | - | 19 | accept | | - | 20 | access-control-allow-origin | | - | 21 | age | | - | 22 | allow | | - | 23 | authorization | | - | 24 | cache-control | | - | 25 | content-disposition | | - | 26 | content-encoding | | - | 27 | content-language | | - | 28 | content-length | | - | 29 | content-location | | - | 30 | content-range | | - | 31 | content-type | | - | 32 | cookie | | - | 33 | date | | - | 34 | etag | | - | 35 | expect | | - | 36 | expires | | - | 37 | from | | - | 38 | host | | - | 39 | if-match | | - | 40 | if-modified-since | | - | 41 | if-none-match | | - | 42 | if-range | | - | 43 | if-unmodified-since | | - | 44 | last-modified | | - | 45 | link | | - | 46 | location | | - | 47 | max-forwards | | - | 48 | proxy-authenticate | | - | 49 | proxy-authorization | | - | 50 | range | | - | 51 | referer | | - | 52 | refresh | | - | 53 | retry-after | | - | 54 | server | | - | 55 | set-cookie | | - | 56 | strict-transport-security | | - | 57 | transfer-encoding | | - | 58 | user-agent | | - | 59 | vary | | - | 60 | via | | - | 61 | www-authenticate | | - +-------+-----------------------------+---------------+ -` - bs := bufio.NewScanner(strings.NewReader(fromSpec)) - re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) - for bs.Scan() { - l := bs.Text() - if !strings.Contains(l, "|") { - continue - } - m := re.FindStringSubmatch(l) - if m == nil { - continue - } - i, err := strconv.Atoi(m[1]) - if err != nil { - t.Errorf("Bogus integer on line %q", l) - continue - } - if i < 1 || i > len(staticTable) { - t.Errorf("Bogus index %d on line %q", i, l) - continue - } - if got, want := staticTable[i-1].Name, m[2]; got != want { - t.Errorf("header index %d name = %q; want %q", i, got, want) - } - if got, want := staticTable[i-1].Value, m[3]; got != want { - t.Errorf("header index %d value = %q; want %q", i, got, want) - } - } - if err := bs.Err(); err != nil { - t.Error(err) - } -} - func (d *Decoder) mustAt(idx int) HeaderField { if hf, ok := d.at(uint64(idx)); !ok { panic(fmt.Sprintf("bogus index %d", idx)) @@ -132,10 +31,10 @@ func TestDynamicTableAt(t *testing.T) { } d.dynTab.add(pair("foo", "bar")) d.dynTab.add(pair("blake", "miz")) - if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want { + if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want { t.Errorf("at(dyn 1) = %v; want %v", got, want) } - if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want { + if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want { t.Errorf("at(dyn 2) = %v; want %v", got, want) } if got, want := at(3), (pair(":method", "POST")); got != want { @@ -143,41 +42,6 @@ func TestDynamicTableAt(t *testing.T) { } } -func TestDynamicTableSearch(t *testing.T) { - dt := dynamicTable{} - dt.setMaxSize(4096) - - dt.add(pair("foo", "bar")) - dt.add(pair("blake", "miz")) - dt.add(pair(":method", "GET")) - - tests := []struct { - hf HeaderField - wantI uint64 - wantMatch bool - }{ - // Name and Value match - {pair("foo", "bar"), 3, true}, - {pair(":method", "GET"), 1, true}, - - // Only name match because of Sensitive == true - {HeaderField{"blake", "miz", true}, 2, false}, - - // Only Name matches - {pair("foo", "..."), 3, false}, - {pair("blake", "..."), 2, false}, - {pair(":method", "..."), 1, false}, - - // None match - {pair("foo-", "bar"), 0, false}, - } - for _, tt := range tests { - if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { - t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) - } - } -} - func TestDynamicTableSizeEvict(t *testing.T) { d := NewDecoder(4096, nil) if want := uint32(0); d.dynTab.size != want { @@ -196,7 +60,7 @@ func TestDynamicTableSizeEvict(t *testing.T) { if want := uint32(6 + 32); d.dynTab.size != want { t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) } - if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want { + if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want { t.Errorf("at(dyn 1) = %v; want %v", got, want) } add(pair("long", strings.Repeat("x", 500))) @@ -255,9 +119,9 @@ func TestDecoderDecode(t *testing.T) { } func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { - hf = make([]HeaderField, len(dt.ents)) + hf = make([]HeaderField, len(dt.table.ents)) for i := range hf { - hf[i] = dt.ents[len(dt.ents)-1-i] + hf[i] = dt.table.ents[len(dt.table.ents)-1-i] } return } diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go index b9283a023..870159244 100644 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -4,73 +4,199 @@ package hpack +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + t.byName[f.Name] = 0 + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + t.byNameValue[p] = 0 + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + func pair(name, value string) HeaderField { return HeaderField{Name: name, Value: value} } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), +var staticTable = newStaticTable() + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + t.addEntry(pair(":authority", "")) + t.addEntry(pair(":method", "GET")) + t.addEntry(pair(":method", "POST")) + t.addEntry(pair(":path", "/")) + t.addEntry(pair(":path", "/index.html")) + t.addEntry(pair(":scheme", "http")) + t.addEntry(pair(":scheme", "https")) + t.addEntry(pair(":status", "200")) + t.addEntry(pair(":status", "204")) + t.addEntry(pair(":status", "206")) + t.addEntry(pair(":status", "304")) + t.addEntry(pair(":status", "400")) + t.addEntry(pair(":status", "404")) + t.addEntry(pair(":status", "500")) + t.addEntry(pair("accept-charset", "")) + t.addEntry(pair("accept-encoding", "gzip, deflate")) + t.addEntry(pair("accept-language", "")) + t.addEntry(pair("accept-ranges", "")) + t.addEntry(pair("accept", "")) + t.addEntry(pair("access-control-allow-origin", "")) + t.addEntry(pair("age", "")) + t.addEntry(pair("allow", "")) + t.addEntry(pair("authorization", "")) + t.addEntry(pair("cache-control", "")) + t.addEntry(pair("content-disposition", "")) + t.addEntry(pair("content-encoding", "")) + t.addEntry(pair("content-language", "")) + t.addEntry(pair("content-length", "")) + t.addEntry(pair("content-location", "")) + t.addEntry(pair("content-range", "")) + t.addEntry(pair("content-type", "")) + t.addEntry(pair("cookie", "")) + t.addEntry(pair("date", "")) + t.addEntry(pair("etag", "")) + t.addEntry(pair("expect", "")) + t.addEntry(pair("expires", "")) + t.addEntry(pair("from", "")) + t.addEntry(pair("host", "")) + t.addEntry(pair("if-match", "")) + t.addEntry(pair("if-modified-since", "")) + t.addEntry(pair("if-none-match", "")) + t.addEntry(pair("if-range", "")) + t.addEntry(pair("if-unmodified-since", "")) + t.addEntry(pair("last-modified", "")) + t.addEntry(pair("link", "")) + t.addEntry(pair("location", "")) + t.addEntry(pair("max-forwards", "")) + t.addEntry(pair("proxy-authenticate", "")) + t.addEntry(pair("proxy-authorization", "")) + t.addEntry(pair("range", "")) + t.addEntry(pair("referer", "")) + t.addEntry(pair("refresh", "")) + t.addEntry(pair("retry-after", "")) + t.addEntry(pair("server", "")) + t.addEntry(pair("set-cookie", "")) + t.addEntry(pair("strict-transport-security", "")) + t.addEntry(pair("transfer-encoding", "")) + t.addEntry(pair("user-agent", "")) + t.addEntry(pair("vary", "")) + t.addEntry(pair("via", "")) + t.addEntry(pair("www-authenticate", "")) + return t } var huffmanCodes = [256]uint32{ diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go new file mode 100644 index 000000000..7f40d9a42 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables_test.go @@ -0,0 +1,188 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bufio" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestHeaderFieldTable(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // Tests will be run twice: once before evicting anything, and + // again after evicting the three oldest entries. + tests := []struct { + f HeaderField + beforeWantStaticI uint64 + beforeWantMatch bool + afterWantStaticI uint64 + afterWantMatch bool + }{ + {HeaderField{"key1", "value1-1", false}, 1, true, 0, false}, + {HeaderField{"key1", "value1-2", false}, 3, true, 0, false}, + {HeaderField{"key1", "value1-3", false}, 3, false, 0, false}, + {HeaderField{"key2", "value2-1", false}, 2, true, 3, false}, + {HeaderField{"key2", "value2-2", false}, 6, true, 3, true}, + {HeaderField{"key2", "value2-3", false}, 6, false, 3, false}, + {HeaderField{"key4", "value4-1", false}, 5, true, 2, true}, + // Name match only, because sensitive. + {HeaderField{"key4", "value4-1", true}, 5, false, 2, false}, + // Key not found. + {HeaderField{"key5", "value5-x", false}, 0, false, 0, false}, + } + + staticToDynamic := func(i uint64) uint64 { + if i == 0 { + return 0 + } + return uint64(table.len()) - i + 1 // dynamic is the reversed table + } + + searchStatic := func(f HeaderField) (uint64, bool) { + old := staticTable + staticTable = table + defer func() { staticTable = old }() + return staticTable.search(f) + } + + searchDynamic := func(f HeaderField) (uint64, bool) { + return table.search(f) + } + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.beforeWantStaticI) + if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } + + table.evictOldest(3) + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.afterWantStaticI) + if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } +} + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > staticTable.len() { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable.ents[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable.ents[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 3c641a8c2..550427dda 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -110,11 +110,38 @@ type Server struct { // activity for the purposes of IdleTimeout. IdleTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + // NewWriteScheduler constructs a write scheduler for a connection. // If nil, a default scheduler is chosen. NewWriteScheduler func() WriteScheduler } +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + func (s *Server) maxReadFrameSize() uint32 { if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { return v @@ -255,27 +282,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { defer cancel() sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - wantStartPushCh: make(chan startPushRequest, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + wantStartPushCh: make(chan startPushRequest, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, } // The net/http package sets the write deadline from the @@ -294,6 +321,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.writeSched = NewRandomWriteScheduler() } + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) @@ -387,34 +417,34 @@ type serverConn struct { writeSched WriteScheduler // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - idleTimerCh <-chan time.Time // nil if unused + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimerCh <-chan time.Time // nil until used + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + idleTimerCh <-chan time.Time // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -463,10 +493,9 @@ type stream struct { numTrailerValues int64 weight uint8 state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - reqBuf []byte // if non-nil, body pipe buffer to return later at EOF + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -696,15 +725,17 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) sc.unackedSettings++ + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return @@ -1395,9 +1426,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { // adjust the size of all stream flow control windows that it // maintains by the difference between the new value and the // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative for _, st := range sc.streams { if !st.flow.add(growth) { // 6.9.2 Initial Flow Control Window Size @@ -1719,9 +1750,9 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream } st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) sc.streams[id] = st sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) @@ -1785,16 +1816,14 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, err } if bodyOpen { - st.reqBuf = getRequestBodyBuf() - req.Body.(*requestBody).pipe = &pipe{ - b: &fixedBuffer{buf: st.reqBuf}, - } - if vv, ok := rp.header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } } return rw, req, nil } @@ -1890,24 +1919,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r return rw, req, nil } -var reqBodyCache = make(chan []byte, 8) - -func getRequestBodyBuf() []byte { - select { - case b := <-reqBodyCache: - return b - default: - return make([]byte, initialWindowSize) - } -} - -func putRequestBodyBuf(b []byte) { - select { - case reqBodyCache <- b: - default: - } -} - // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true @@ -2003,12 +2014,6 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { case <-sc.doneServing: } } - if err == io.EOF { - if buf := st.reqBuf; buf != nil { - st.reqBuf = nil // shouldn't matter; field unused by other - putRequestBodyBuf(buf) - } - } } func (sc *serverConn) noteBodyRead(st *stream, n int) { diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go index c2e51e67a..407fafc6d 100644 --- a/vendor/golang.org/x/net/http2/server_test.go +++ b/vendor/golang.org/x/net/http2/server_test.go @@ -80,6 +80,7 @@ type serverTesterOpt string var optOnlyServer = serverTesterOpt("only_server") var optQuiet = serverTesterOpt("quiet_logging") +var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { resetHooks() @@ -91,7 +92,7 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} NextProtos: []string{NextProtoTLS}, } - var onlyServer, quiet bool + var onlyServer, quiet, framerReuseFrames bool h2server := new(Server) for _, opt := range opts { switch v := opt.(type) { @@ -107,6 +108,8 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} onlyServer = true case optQuiet: quiet = true + case optFramerReuseFrames: + framerReuseFrames = true } case func(net.Conn, http.ConnState): ts.Config.ConnState = v @@ -149,6 +152,9 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} } st.cc = cc st.fr = NewFramer(cc, cc) + if framerReuseFrames { + st.fr.SetReuseFrames() + } if !logFrameReads && !logFrameWrites { st.fr.debugReadLoggerf = func(m string, v ...interface{}) { m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" @@ -254,11 +260,52 @@ func (st *serverTester) Close() { // greet initiates the client's HTTP/2 connection into a state where // frames may be sent. func (st *serverTester) greet() { + st.greetAndCheckSettings(func(Setting) error { return nil }) +} + +func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { st.writePreface() st.writeInitialSettings() - st.wantSettings() + st.wantSettings().ForeachSetting(checkSetting) st.writeSettingsAck() - st.wantSettingsAck() + + // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. + var gotSettingsAck bool + var gotWindowUpdate bool + + for i := 0; i < 2; i++ { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *SettingsFrame: + if !f.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + gotSettingsAck = true + + case *WindowUpdateFrame: + if f.FrameHeader.StreamID != 0 { + st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID, 0) + } + incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize) + if f.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) + } + gotWindowUpdate = true + + default: + st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f) + } + } + + if !gotSettingsAck { + st.t.Fatalf("Didn't get a settings ACK") + } + if !gotWindowUpdate { + st.t.Fatalf("Didn't get a window update") + } } func (st *serverTester) writePreface() { @@ -578,12 +625,7 @@ func TestServer(t *testing.T) { server sends in the HTTP/2 connection. `) - st.writePreface() - st.writeInitialSettings() - st.wantSettings() - st.writeSettingsAck() - st.wantSettingsAck() - + st.greet() st.writeHeaders(HeadersFrameParam{ StreamID: 1, // clients send odd numbers BlockFragment: st.encodeHeader(), @@ -2595,11 +2637,9 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) { defer st.Close() // shake hands - st.writePreface() - st.writeInitialSettings() frameSize := defaultMaxReadFrameSize var advHeaderListSize *uint32 - st.wantSettings().ForeachSetting(func(s Setting) error { + st.greetAndCheckSettings(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: if s.Val < minMaxFrameSize { @@ -2614,8 +2654,6 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) { } return nil }) - st.writeSettingsAck() - st.wantSettingsAck() if advHeaderListSize == nil { t.Errorf("server didn't advertise a max header list size") @@ -2994,6 +3032,89 @@ func BenchmarkServerPosts(b *testing.B) { } } +// Send a stream of messages from server to client in separate data frames. +// Brings up performance issues seen in long streams. +// Created to show problem in go issue #18502 +func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) { + benchmarkServerToClientStream(b) +} + +// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8 +// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer. +func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { + benchmarkServerToClientStream(b, optFramerReuseFrames) +} + +func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msgLen = 1 + // default window size + const windowSize = 1<<16 - 1 + + // next message to send from the server and for the client to expect + nextMsg := func(i int) []byte { + msg := make([]byte, msgLen) + msg[0] = byte(i) + if len(msg) != msgLen { + panic("invalid test setup msg length") + } + return msg + } + + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + for i := 0; i < b.N; i += 1 { + w.Write(nextMsg(i)) + w.(http.Flusher).Flush() + } + }, newServerOpts...) + defer st.Close() + st.greet() + + const id = uint32(1) + + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + + st.writeData(id, true, nil) + st.wantHeaders() + + var pendingWindowUpdate = uint32(0) + + for i := 0; i < b.N; i += 1 { + expected := nextMsg(i) + df := st.wantData() + if bytes.Compare(expected, df.data) != 0 { + b.Fatalf("Bad message received; want %v; got %v", expected, df.data) + } + // try to send infrequent but large window updates so they don't overwhelm the test + pendingWindowUpdate += uint32(len(df.data)) + if pendingWindowUpdate >= windowSize/2 { + if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + pendingWindowUpdate = 0 + } + } + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } +} + // go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 // Verify we don't hang. func TestIssue53(t *testing.T) { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index fef839686..84d042d46 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1528,8 +1528,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return res, nil } - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} cs.bytesRemain = res.ContentLength res.Body = transportResponseBody{cs} go cs.awaitRequestCancel(cs.req) diff --git a/vendor/golang.org/x/net/internal/netreflect/socket.go b/vendor/golang.org/x/net/internal/netreflect/socket.go index e82e51c44..1495b65f5 100644 --- a/vendor/golang.org/x/net/internal/netreflect/socket.go +++ b/vendor/golang.org/x/net/internal/netreflect/socket.go @@ -2,8 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.9 + // Package netreflect implements run-time reflection for the // facilities of net package. +// +// This package works only for Go 1.8 or below. package netreflect import ( diff --git a/vendor/golang.org/x/net/internal/netreflect/socket_19.go b/vendor/golang.org/x/net/internal/netreflect/socket_19.go new file mode 100644 index 000000000..74df52e1a --- /dev/null +++ b/vendor/golang.org/x/net/internal/netreflect/socket_19.go @@ -0,0 +1,37 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package netreflect + +import ( + "errors" + "net" +) + +var ( + errInvalidType = errors.New("invalid type") + errOpNoSupport = errors.New("operation not supported") +) + +// SocketOf returns the socket descriptor of c. +func SocketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn, *net.UnixConn: + return 0, errOpNoSupport + default: + return 0, errInvalidType + } +} + +// PacketSocketOf returns the socket descriptor of c. +func PacketSocketOf(c net.PacketConn) (uintptr, error) { + switch c.(type) { + case *net.UDPConn, *net.IPConn, *net.UnixConn: + return 0, errOpNoSupport + default: + return 0, errInvalidType + } +} diff --git a/vendor/golang.org/x/net/internal/netreflect/socket_posix.go b/vendor/golang.org/x/net/internal/netreflect/socket_posix.go index df475a2b2..410c0924d 100644 --- a/vendor/golang.org/x/net/internal/netreflect/socket_posix.go +++ b/vendor/golang.org/x/net/internal/netreflect/socket_posix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.9 // +build darwin dragonfly freebsd linux netbsd openbsd solaris windows package netreflect diff --git a/vendor/golang.org/x/net/internal/netreflect/socket_stub.go b/vendor/golang.org/x/net/internal/netreflect/socket_stub.go index 85adb4b7f..17b20c478 100644 --- a/vendor/golang.org/x/net/internal/netreflect/socket_stub.go +++ b/vendor/golang.org/x/net/internal/netreflect/socket_stub.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.9 // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package netreflect diff --git a/vendor/golang.org/x/net/internal/netreflect/socket_test.go b/vendor/golang.org/x/net/internal/netreflect/socket_test.go index 49b97ed54..b3aad0d92 100644 --- a/vendor/golang.org/x/net/internal/netreflect/socket_test.go +++ b/vendor/golang.org/x/net/internal/netreflect/socket_test.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + package netreflect_test import ( diff --git a/vendor/golang.org/x/net/ipv4/go19_test.go b/vendor/golang.org/x/net/ipv4/go19_test.go new file mode 100644 index 000000000..82a27b113 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/go19_test.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +func init() { + disableTests = true +} diff --git a/vendor/golang.org/x/net/ipv4/ipv4_test.go b/vendor/golang.org/x/net/ipv4/ipv4_test.go new file mode 100644 index 000000000..917299283 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/ipv4_test.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "os" + "testing" +) + +var disableTests = false + +func TestMain(m *testing.M) { + if disableTests { + fmt.Fprintf(os.Stderr, "ipv4 tests disabled in Go 1.9 until netreflect is fixed. (Issue 19051)\n") + os.Exit(0) + } + // call flag.Parse() here if TestMain uses flags + os.Exit(m.Run()) +} diff --git a/vendor/golang.org/x/net/ipv6/go19_test.go b/vendor/golang.org/x/net/ipv6/go19_test.go new file mode 100644 index 000000000..c7cb057d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/go19_test.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +func init() { + disableTests = true +} diff --git a/vendor/golang.org/x/net/ipv6/ipv6_test.go b/vendor/golang.org/x/net/ipv6/ipv6_test.go new file mode 100644 index 000000000..8d2d23542 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/ipv6_test.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "os" + "testing" +) + +var disableTests = false + +func TestMain(m *testing.M) { + if disableTests { + fmt.Fprintf(os.Stderr, "ipv6 tests disabled in Go 1.9 until netreflect is fixed (Issue 19051)\n") + os.Exit(0) + } + // call flag.Parse() here if TestMain uses flags + os.Exit(m.Run()) +} diff --git a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s index 1ebca3739..39d76af79 100644 --- a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s +++ b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s @@ -6,6 +6,3 @@ TEXT ·sysvicall6(SB),NOSPLIT,$0-88 JMP syscall·sysvicall6(SB) - -TEXT ·keepAlive(SB),NOSPLIT,$0 - RET diff --git a/vendor/golang.org/x/net/lif/syscall.go b/vendor/golang.org/x/net/lif/syscall.go index 5fe073620..aadab2e14 100644 --- a/vendor/golang.org/x/net/lif/syscall.go +++ b/vendor/golang.org/x/net/lif/syscall.go @@ -19,13 +19,8 @@ var procIoctl uintptr func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) -// TODO: replace with runtime.KeepAlive when available -//go:noescape -func keepAlive(p unsafe.Pointer) - func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) - keepAlive(arg) if errno != 0 { return error(errno) } diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go index 9b9628239..973f57f19 100644 --- a/vendor/golang.org/x/net/proxy/socks5.go +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -72,24 +72,28 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { if err != nil { return nil, err } - closeConn := &conn - defer func() { - if closeConn != nil { - (*closeConn).Close() - } - }() + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} - host, portStr, err := net.SplitHostPort(addr) +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) if err != nil { - return nil, err + return err } port, err := strconv.Atoi(portStr) if err != nil { - return nil, errors.New("proxy: failed to parse port number: " + portStr) + return errors.New("proxy: failed to parse port number: " + portStr) } if port < 1 || port > 0xffff { - return nil, errors.New("proxy: port number out of range: " + portStr) + return errors.New("proxy: port number out of range: " + portStr) } // the size here is just an estimate @@ -103,17 +107,17 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { } if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[0] != 5 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) } if buf[1] == 0xff { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") } if buf[1] == socks5AuthPassword { @@ -125,15 +129,15 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, s.password...) if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[1] != 0 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") } } @@ -150,7 +154,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, ip...) } else { if len(host) > 255 { - return nil, errors.New("proxy: destination hostname too long: " + host) + return errors.New("proxy: destination hostname too long: " + host) } buf = append(buf, socks5Domain) buf = append(buf, byte(len(host))) @@ -159,11 +163,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, byte(port>>8), byte(port)) if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } failure := "unknown error" @@ -172,7 +176,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { } if len(failure) > 0 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) } bytesToDiscard := 0 @@ -184,11 +188,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { case socks5Domain: _, err := io.ReadFull(conn, buf[:1]) if err != nil { - return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } bytesToDiscard = int(buf[0]) default: - return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) } if cap(buf) < bytesToDiscard { @@ -197,14 +201,13 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = buf[:bytesToDiscard] } if _, err := io.ReadFull(conn, buf); err != nil { - return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } // Also need to discard the port number if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } - closeConn = nil - return conn, nil + return nil } diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go index bf20c036b..5db1e6986 100644 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's public_suffix_list.dat, git revision 915565885d0fbd25caf7d8b339cd3478f558da94 (2016-10-19T08:16:09Z)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision 45a2bf8ef3e22000fbe4bfa5f9252db41d777001 (2017-01-18T01:04:06Z)" const ( nodesBitsChildren = 9 @@ -23,446 +23,447 @@ const ( ) // numTLD is the number of top level domains. -const numTLD = 1553 +const numTLD = 1554 // Text is the combined text of all labels. -const text = "biellaakesvuemieleccebieszczadygeyachimatainaircraftraeumtgerade" + - "alstahaugesunderseaportsinfolldalaskanittedallasalleasinglesango" + - "ppdalinzaintuitateshinanomachintaifun-dnsaliaskimitsubatamicable" + - "-modembetsukuinuyamanouchikuhokuryugasakitaurayasudabievatmallor" + - "cadaquesanjotateyamabifukagawalmartatsunobihorologyuzhno-sakhali" + - "nskaszubybikedagestangebilbaogakievenesannaninomiyakonojoshkar-o" + - "lawabillustrationirasakinvestmentsannohelplfinancialipetskatowic" + - "ebiobirdartcenterprisesakikuchikuseikarugapartmentsanokatsushika" + - "beeldengeluidunloppacificasinore-og-uvdalivornobirkenesoddtangen" + - "ovarabirthplacebjarkoybjerkreimdbalatinorddalillyonagoyastronomy" + - "asustor-elvdalwaysdatabaseballangenoamishirasatochigiessenebakke" + - "shibechambagriculturennebudapest-a-la-masionativeamericanantique" + - "s3-ap-northeast-2bjugnieznordlandunsantabarbarablockbusternidupo" + - "ntariobloombergbauernrtattoolsztynsettlersantacruzsantafedextras" + - "pace-to-rentalstomakomaibarabloxcmsanukis-a-candidatebluedaplier" + - "neustarhubalestrandabergamoarekeymachineues3-us-west-1bmoattachm" + - "entsaotomeloyalistjordalshalsenishiazais-a-catererbmsapodhalewis" + - "millerbmweirbnpparibaselburgloppenzaogashimadachicagoboatsapporo" + - "bnrwfarmsteadurbanamexhibitionishigotsukisosakitagawabomloanswat" + - "ch-and-clockerbondurhamburgmbhartiffanybonnishiharabookingminaka" + - "michiharabootsaratovalleaostatoilomzaporizhzheguris-a-celticsfan" + - "ishiizunazukis-a-chefarsundvrcambridgestonewyorkshirecreationish" + - "ikatakayamatsuzakis-a-conservativefsncfdvrdnsiskinkyotobetsumida" + - "tlanticateringebudejjuedischesapeakebayernurembergmodenakanotodd" + - "enishikatsuragithubusercontentaxihuanishikawazukanazawaboschaeff" + - "lerdalorenskogmxfinitybostikatsuyamaseratis-a-cpadoval-daostaval" + - "leybostonakijinsekikogentingrimstadwgripebotanicalgardenishimera" + - "botanicgardenishinomiyashironobotanybouncemerckmsdnipropetrovskl" + - "eppalmspringsakerbounty-fullensakerrypropertiesardegnamsosnowiec" + - "atholicheltenham-radio-openair-traffic-controlleyboutiquebecngri" + - "wataraidyndns-ipamperedchefashionishinoomotegovtgorybozentsujiie" + - "bradescorporationishinoshimatta-varjjatjeldsundyndns-mailotenkaw" + - "abrandywinevalleybrasiliabresciabrindisibenikebristolgaulardalot" + - "tebritishcolumbialowiezaganquannefrankfurtjmaxxxjaworznowtvalled" + - "-aostavangerbroadcastleclerchelyabinskypescaravantaabroadwaybrok" + - "e-itjometlifeinsurancebrokerbronnoysundyndns-office-on-the-webca" + - "mpobassociatesardiniabrothermesaverdeatnuorockartuzybrowsersafet" + - "ymarketsarlottokorozawabrumunddalouvreitjxn--0trq7p7nnishiokoppe" + - "gardyndns-picsarpsborgroks-thisayamanashiibaghdadultkmaxxn--11b4" + - "c3dyndns-remotegildeskalmykiabrunelblagdenesnaaseralingenkainana" + - "ejrietisalatinabenoboribetsucksarufutsunomiyawakasaikaitakoelnis" + - "hitosashimizunaminamiashigarabrusselsasayamabruxellesaseboknowsi" + - "tallowiczest-le-patrondheimperiabryanskodjeepostfoldnavyatkakami" + - "gaharabrynewhampshirebungoonordreisa-geekaufenishiwakis-a-cubicl" + - "e-slavellinotteroybuskerudinewhollandyndns-servercellikes-piedmo" + - "ntblancomeeresaskatchewanggouvicenzabuzenissandnessjoenissayokos" + - "hibahikariwanumatakazakis-a-democratmpanamabuzzgradyndns-weberli" + - "ncolnissedalucaniabwhalingrondarbzhitomirkutskydivingrongacomput" + - "erhistoryofscience-fictioncomsecuritytacticschulezajskddielddanu" + - "orrikuzentakatajirissagamiharacondoshichinohealth-carereformitak" + - "eharaconferenceconstructionconsuladoharuhrconsultanthropologycon" + - "sultingvollutskfhappoumuenchencontactoyotomiyazakis-a-geekgalaxy" + - "contemporaryarteducationalchikugojomedio-campidano-mediocampidan" + - "omediocontractorskenconventureshinodesashibetsuikinderoycookingc" + - "hannelveruminamibosogndaluxembourgujolstercoolkuszippodlasiellak" + - "asamatsudovre-eikercoopencraftoyotsukaidownloadcopenhagencyclope" + - "dichernovtsykkylvenetogakushimotoganewmexicoldwarmiamiastalowa-w" + - "oladbrokesassaris-a-designerimarumorimachidacorsicagliaridagawal" + - "tercorvettenrightathomegoodschwarzgwangjuifminamidaitomangotemba" + - "ixadacosenzamamibuilderschmidtre-gauldaluxurycostumedizinhistori" + - "scheschweizjcbnluzerncouchpotatofriesciencecentersciencehistoryc" + - "ouncilvivano-frankivskhabarovskhakassiacouponscientistockholmest" + - "randcoursescjohnsoncq-acranbrookuwanalyticscotlandcreditcardcred" + - "itunioncremonashorokanaiecrewiiheyaizuwakamatsubushikusakadogawa" + - "cricketrzyncrimeacrotonewspapercrownprovidercrsvparaglidingulenc" + - "ruisescrapper-sitecryptonomichigangwoncuisinellajollamericanexpr" + - "essexyculturalcentertainmentoyouracuneocupcakecxn--1ctwolominama" + - "takkofuefukihabororostrowwlkpmgunmarnardalcymruovatoystre-slidre" + - "ttozawacyonabarussiacyouthdfcbankzlguovdageaidnufcfanfieldfiguer" + - "estaurantozsdefilateliafilminamiechizenfinalfinancefineartservef" + - "tparisor-fronfinlandfinnoyfirebaseapparliamentranbyfirenzefirest" + - "onexus-east-1firmdaleirfjordfishingolffanservegame-serverisignfi" + - "tjarqhachiojiyahikobeatservehalflifestylefitnessettlementrani-an" + - "dria-barletta-trani-andriafjalerflesbergflickragerotikamakurazak" + - "irkeneservehttparmaflightservehumourflirumansionserveirchiryukyu" + - "ragifuchukotkakegawassamukawataricohdavvenjargausdaluccapitalone" + - "wjerseyflogintogurafloraflorencefloridafloristanohatakaharulvikh" + - "arkovalledaostavernflorokunohealthcareerserveminecraftraniandria" + - "barlettatraniandriaflowerservemp3utilitiesquarezzoologicalvinkle" + - "in-addrammenuernbergdyniabogadocscbggfareastcoastaldefence-burgj" + - "emnes3-ap-northeast-1kappleaseating-organicbcg12000emmafanconaga" + - "wakayamadridvagsoyericsson-aptibleangaviikadenaamesjevuemielno-i" + - "p6flynnhubalsfjordiscountysnes3-us-west-2fndfoodnetworkshoppingf" + - "or-ourfor-someetnedalfor-theaterforexrothruheredstoneforgotdnser" + - "vep2parocherkasyzrankoshigayaltaijis-a-greenforli-cesena-forlice" + - "senaforlikescandyndns-at-workinggrouparservepicservequakeforsale" + - "irvikhersonforsandasuoloftranoyfortmissoulan-udefenseljordfortwo" + - "rthachirogatakamoriokamikitayamatotakadaforuminamifuranofosneser" + - "vesarcasmatartanddesignfotaruis-a-gurunzenfoxfordegreefreeboxost" + - "rowiechitachinakagawatchandclockazimierz-dolnyfreemasonryfreibur" + - "gfreightcmwildlifedjejuegoshikiminokamoenairlinedre-eikerfreseni" + - "uscountryestateofdelawaredumbrellanbibaidarfribourgfriuli-v-giul" + - "iafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-ven" + - "eziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiu" + - "liafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfrogan" + - "servicesettsurgeonshalloffamemergencyberlevagangaviikanonjis-a-h" + - "ard-workerfrognfrolandfrom-akrehamnfrom-alfrom-arfrom-azpartis-a" + - "-hunterfrom-capebretonamiasakuchinotsuchiurakawarszawashingtondc" + - "lkhmelnitskiyamasfjordenfrom-collectionfrom-ctransportransurlfro" + - "m-dchitosetogitsuldalucernefrom-dell-ogliastrakhanawafrom-flande" + - "rsevastopolefrom-gafrom-higashiagatsumagoirminamiiselectrapaniim" + - "imatakatoris-a-knightpointtokamachippubetsubetsugaruslivinghisto" + - "ryfrom-iafrom-idfrom-ilfrom-incheonfrom-ksevenassisicilyfrom-kyo" + - "wariasahikawafrom-lancashireggio-calabriafrom-manxn--1qqw23afrom" + - "-mdfrom-meguromskoguchikuzenfrom-microsoftbankhmelnytskyivallee-" + - "aosteroyfrom-mnfrom-mochizukirovogradoyfrom-msewilliamhillfrom-m" + - "tnfrom-nchloefrom-ndfrom-nefrom-nhktravelchannelfrom-njcpartners" + - "franziskanerdpolicefrom-nminamiizukamitondabayashiogamagoriziafr" + - "om-nvalleeaosteigenfrom-nyfrom-ohkurafrom-oketohmaorivnefrom-orf" + - "rom-paderbornfrom-pratohnoshoooshikamaishimofusartshangrilangeva" + - "grarboretumbriafrom-ris-a-landscaperugiafrom-schoenbrunnfrom-sdf" + - "rom-tnfrom-txn--2m4a15efrom-utazuerichardlillehammerfest-mon-blo" + - "gueurovisionfrom-vaksdalfrom-vtravelersinsurancefrom-wafrom-wiel" + - "unnerfrom-wvanylvenicefrom-wyfrosinonefrostalbansharis-a-lawyerf" + - "royahabadajozoraholtalenvironmentalconservationfstavropolitienda" + - "fujiiderafujikawaguchikonefujiminohtawaramotoineppubolognakaniik" + - "awatanagurafujinomiyadafujiokayamapartsharpartyfujisatoshonairpo" + - "rtland-4-salernogatagajobojis-a-liberalfujisawafujishiroishidaka" + - "biratoridellogliastraderfujitsurugashimamateramodalenfujixeroxn-" + - "-30rr7yfujiyoshidafukayabeardubaiduckdnshomebuiltrdfukuchiyamada" + - "fukudominichocolatemasekazofukuis-a-libertarianfukumitsubishigak" + - "iryuohadanoshiroomurafukuokazakisarazurewebsiteshikagamiishibuka" + - "wafukuroishikarikaturindalfukusakishiwadafukuyamagatakahashimama" + - "kisofukushimarburgfunabashiriuchinadafunagatakahatakaishimoichin" + - "osekigaharafunahashikamiamakusatsumasendaisennangonohejis-a-linu" + - "x-useranishiaritabashikaoizumizakitchenfundaciofuoiskujukuriyama" + - "rcheapasadenaklodzkodairafuosskoczowinbaltimore-og-romsdalimited" + - "iscoveryonaguniversityoriikashibatakashimarylhurstjohnaval-d-aos" + - "ta-valleyukibestadishakotankashiharaukraanghkepnord-frontierepai" + - "rbusantiquest-a-la-maisondre-landebusinessebyklefrakkestadds3-ap" + - "-southeast-2furnitureggio-emilia-romagnakanojohanamakinoharafuru" + - "biraquarellebesbyglandfurudonostiafurukawairtelecityeatshawaiiji" + - "marugame-hostingfusodegaurafussaintlouis-a-anarchistoireggiocala" + - "briafutabayamaguchinomigawafutboldlygoingnowhere-for-moregontrai" + - "lroadfuttsurugiminamimakis-a-llamarylandfuturemailingfvgfyis-a-m" + - "usicianfylkesbiblackfridayfyresdalhannanmokuizumodernhannovarese" + - "rveblogspotrentino-a-adigehanyuzenhapmirhareidsbergenharstadharv" + - "estcelebrationhasamarahasaminami-alpssells-itrentino-aadigehashb" + - "anghasudahasura-appassenger-associationhasviklabudhabikinokawaba" + - "rthaebaruminamiminowahatogayahoohatoyamazakitahiroshimarriottren" + - "tino-alto-adigehatsukaichikaiseis-a-painteractivegarsheis-a-pats" + - "fanhattfjelldalhayashimamotobuildinghazuminobusellsyourhomeipavi" + - "ancargodaddyndns-at-homednshimonosekikawahboehringerikehelsinkit" + - "akamiizumisanofidelitysvardollshimosuwalkis-a-personaltrainerhem" + - "bygdsforbundhemneshimotsukehemsedalhepforgeherokussldheroyhgtvsh" + - "imotsumahigashichichibungotakadatinghigashihiroshimanehigashiizu" + - "mozakitakatakanezawahigashikagawahigashikagurasoedahigashikawaki" + - "taaikitakyushuaiahigashikurumeiwamarshallstatebankmpspbamblebtim" + - "netz-2higashimatsushimarinehigashimatsuyamakitaakitadaitoigawahi" + - "gashimurayamalatvuopmidoris-a-photographerokuappfizerhigashinaru" + - "sembokukitamidsundhigashinehigashiomihachimanchesterhigashiosaka" + - "sayamamotorcycleshinichinanhigashishirakawamatakaokamikoaniikapp" + - "ugliahigashisumiyoshikawaminamiaikitamotosumitakaginankokubunjis" + - "-a-playerhigashitsunotogawahigashiurausukitanakagusukumoduminami" + - "ogunicomcastresistancehigashiyamatokoriyamanakakogawahigashiyodo" + - "gawahigashiyoshinogaris-a-republicancerresearchaeologicalifornia" + - "hiraizumisatohobby-sitehirakatashinagawahiranairtraffichofunator" + - "ientexpressatxn--1ck2e1balsanagochihayaakasakawaharavennagasakik" + - "onaikawachinaganoharamcoalaheadjudaicaaarborteaches-yogasawaraci" + - "ngroks-theatreemersongdalenviknakamuratakahamannortonsbergladelm" + - "enhorstackspacekitagataiwanairguardigitalimanowarudaugustowadaeg" + - "ubs3-ap-southeast-1hirarahiratsukagawahirayaitakarazukamiminersh" + - "injournalismailillesandefjordhistorichouseshinjukumanohitachiomi" + - "yaginowaniihamatamakawajimaritimodellinghitachiotagooglecodespot" + - "rentino-altoadigehitoyoshimifunehitradinghjartdalhjelmelandholec" + - "kobierzyceholidayhomelinuxn--32vp30hagakhanamigawahomesecurityma" + - "ceratakasagoperaunitextileitungsenhomesecuritypccwindmillhomesen" + - "seminehomeunixn--3bst00minamisanrikubetsupplyhondahoneywellbeing" + - "zonehongorgehonjyoitakasakitashiobarahornindalhorseoulminamitane" + - "hortendofinternetrentino-s-tirollagrigentomologyhoteleshinkamigo" + - "toyohashimototalhotmailhoyangerhoylandetroitskokonoehumanitieshi" + - "nshinotsurgeryhurdalhurumajis-a-rockstarachowicehyllestadhyogori" + - "s-a-socialistmeindianapolis-a-bloggerhyugawarahyundaiwafunehzcho" + - "nanbugattipschlesischesaudajgorajlchoshibuyachiyodavvesiidazaifu" + - "daigodoesntexistanbullensvanguardyndns-wikindleikangerjlljmpharm" + - "acienshiojirishirifujiedajnjelenia-gorajoyentrentino-sued-tirolj" + - "oyokaichibahcavuotnagaraumalselvendrelljpmorganjpnchoyodobashich" + - "ikashukujitawarajprshioyamemorialjuniperjurkristiansundkrodshera" + - "dkrokstadelvaldaostarnbergkryminamiyamashirokawanabelgorodeokuma" + - "torinokumejimassa-carrara-massacarraramassabunkyonanaoshimageand" + - "soundandvisionkumenanyokkaichirurgiens-dentistes-en-francekunisa" + - "kis-an-artistcgroupgfoggiakunitachiarailwaykunitomigusukumamotoy" + - "amasoykunneppulawykunstsammlungkunstunddesignkuokgrouphdkureisen" + - "kurgankurobelaudibleborkdalkurogimilitarykuroisoftwarendalenugku" + - "romatsunais-an-engineeringkurotakikawasakis-an-entertainerkursko" + - "mmunalforbundkushirogawakustanais-bykusupplieshiranukaniepcekutc" + - "hanelkutnokuzbassnillfjordkuzumakis-certifiedogawarabikomaezakir" + - "unorthwesternmutualkvafjordkvalsundkvamfamberkeleykvanangenkvine" + - "sdalkvinnheradkviteseidskogkvitsoykwpspjelkavikommunemitourismol" + - "anciamitoyoakemiuramiyazumiyotamanomjondalenmlbfanmonmouthagebos" + - "tadmonstermonticellombardiamondshiraois-into-carshintomikasahara" + - "montrealestatefarmequipmentrentino-suedtirolmonza-brianzaporizhz" + - "hiamonza-e-della-brianzapposhiraokanmakiyokawaramonzabrianzaptok" + - "yotangotpantheonsitemonzaebrianzaramonzaedellabrianzamoparachuti" + - "ngmordoviajessheiminanomoriyamatsunomoriyoshiokamitsuemormoneymo" + - "royamatsusakahoginozawaonsenmortgagemoscowindowshiratakahagivest" + - "bytomaritimekeepingmoseushistorymosjoenmoskeneshishikuis-into-ca" + - "rtoonshinyoshitomiokaneyamaxunusualpersonmosshisognemosvikomorot" + - "sukamisunagawamoviemovistargardmtpchristmasakikugawatchesauherad" + - "yndns-workisboringrossetouchijiwadeloittevadsoccertificationissh" + - "ingugemtranakatsugawamuenstermugithubcloudusercontentrentinoa-ad" + - "igemuikamogawamukochikushinonsenergymulhouservebeermunakatanemun" + - "cieszynmuosattemuphiladelphiaareadmyblogsitemurmanskomvuxn--3ds4" + - "43gmurotorcraftrentinoaadigemusashimurayamatsushigemusashinohara" + - "museetrentinoalto-adigemuseumverenigingmutsuzawamutuellevangermy" + - "dissentrentinoaltoadigemydrobofagemydshisuifuelmyeffectrentinos-" + - "tirolmyfritzwinnershitaramamyftphilatelymykolaivarggatrentinosti" + - "rolmymediapchromedicaltanissettaishinomakimobetsuliguriamyokoham" + - "amatsudamypepsonyoursidedyn-o-saurecipesaro-urbino-pesarourbinop" + - "esaromamurogawawioshizukuishimogosenmypetshizuokannamiharumyphot" + - "oshibahccavuotnagareyamalvikongsbergmypsxn--3e0b707emysecurityca" + - "merakermyshopblockshoujis-into-gamessinashikiwakunigamihamadamyt" + - "is-a-bookkeepermincommbankomonomyvnchryslerpictetrentinosud-tiro" + - "lpictureshowtimeteorapphoenixn--3oq18vl8pn36apiemontepilotshrira" + - "mlidlugolekagaminogiftsienaplesigdalpimientaketomisatomskongsvin" + - "gerpinkoninjamisonpioneerpippuphonefosshowapiszpittsburghofastly" + - "piwatepizzapkonskowolayangroupharmacyshirahamatonbetsurgutsiracu" + - "saitoshimaplanetariuminnesotaketakatsukis-foundationplantationpl" + - "antsilkonsulatrobeepilepsydneyplatformintelligenceplaystationpla" + - "zaplchungbukazunoplombardyndns-blogdnsimbirskonyvelolplumbingopm" + - "npodzonepohlpoivronpokerpokrovskooris-a-techietis-a-soxfanpolkow" + - "icepoltavalle-aostarostwodzislawitdkopervikomforbananarepublicar" + - "toonartdecoffeedbackplaneappalacemreviewskrakoweddinglassassinat" + - "ionalheritagematsubarakawagoeu-1pomorzeszowithgoogleapisa-hockey" + - "nutrentinosudtirolpordenonepornporsangerporsanguideltajimicrolig" + - "htingporsgrunnanpoznanpraxis-a-bruinsfanprdpreservationpresidiop" + - "rgmrprimelhusgardenprincipeprivatizehealthinsuranceprochowicepro" + - "ductionsimple-urlprofauskedsmokorsetagayasells-for-ulsandoyprogr" + - "essivegasiaprojectrentinosued-tirolpromombetsurfbsbxn--1lqs03npr" + - "opertyprotectionprotonetrentinosuedtirolprudentialpruszkowithyou" + - "tubeneventoeidsvollprzeworskogptplusterptzpvtrentoyonakagyokutoy" + - "akokamishihoronobeokaminoyamatsuris-leetrentino-stirolpwchungnam" + - "dalseidfjordynnsavannahgapzqldqponqslgbtrevisohughesirdalquicksy" + - "teslingqvchurchaseljeffersoniyodogawastoragestordalstorenburgsto" + - "rfjordstpetersburgstreamsterdamnserverbaniastudiostudyndns-homef" + - "tpaccessnoasakakinokiastuff-4-salestufftoread-booksnesnzstuttgar" + - "trogstadsurreysusakis-not-certifieducatorahimeshimakanegasakinko" + - "bayashikshacknethnologysusonosuzakanrasuzukanumazurysuzukis-save" + - "dunetbankolobrzegersundsvalbardudinkakudamatsuesveiosvelvikoseis" + - "-a-therapistoiasvizzeraswedenswidnicarrierswiebodzindianmarketin" + - "gswiftcoveronaritakurashikis-slickomaganeswinoujscienceandhistor" + - "yswisshikis-uberleetrentino-sud-tirolturystykarasjohkamiokaminok" + - "awanishiaizubangetuscanytushuissier-justicetuvalle-daostatichuva" + - "shiatuxfamilyversicherungvestfoldvestnesolutionslupskoryolasitev" + - "estre-slidreamhostersomavestre-totennishiawakuravestvagoyvevelst" + - "advibo-valentiavibovalentiavideovillaskoyabearalvahkijobserverda" + - "lvdalcesomnarashinovinnicartiervinnytsiavipsinaapphotographysiov" + - "irginiavirtualvirtueeldomeindustriesteambulancevirtuelvisakegawa" + - "vistaprinternationalfirearmsooviterboltromsakatakinouevivoldavla" + - "dikavkazanvladimirvladivostokaizukarasuyamazoevlogoipiagetmyiphi" + - "lipsyvolkenkundenvolkswagentsopotritonvologdanskoshunantokonameg" + - "atakasugais-an-accountantshinshirovolvolgogradvolyngdalvoronezhy" + - "tomyrvossevangenvotevotingvotoyonezawavrnworldworse-thanggliding" + - "wowiwatsukiyonowruzhgorodoywritesthisblogsytewroclawloclawekostr" + - "omahachijorpelandwtcirclegnicafederationwtfbx-oslodingenwuozuwww" + - "mflabsor-odalwzmiuwajimaxn--4gq48lf9jeonnamerikawauexn--4it168dx" + - "n--4it797kotohiradomainsurehabmerxn--4pvxsor-varangerxn--54b7fta" + - "0ccitichernigovernmentoyookanzakiyosatokigawaxn--55qw42gxn--55qx" + - "5dxn--5js045dxn--5rtp49civilaviationxn--5rtq34kotouraxn--5su34j9" + - "36bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn-" + - "-7t0a264civilisationxn--80adxhksorfoldxn--80ao21axn--80aqecdr1ax" + - "n--80asehdbarclaycardsakuraibigawaurskog-holandroverhalla-spezia" + - "grocerybnikahokutobishimaizurubtsovskiervaapsteiermarkariyakumol" + - "dev-myqnapcloudcontrolappagefrontappagespeedmobilizerobiraeropor" + - "talabamagasakishimabarackmaze12xn--80aswgxn--80audnedalnxn--8ltr" + - "62kouhokutamakis-an-actorxn--8pvr4uxn--8y0a063axn--90a3academyac" + - "tivedirectoryazannakadomari-elasticbeanstalkounosunndalxn--90ais" + - "hobaraomoriguchiharahkkeravjudygarlandxn--90azhaibarakitahatakan" + - "abeautydalxn--9dbhblg6dietcimmobilienxn--9dbq2axn--9et52uxn--9kr" + - "t00axn--andy-iraxn--aroport-byanagawaxn--asky-iraxn--aurskog-hla" + - "nd-jnbarclaysakyotanabellunordkappgafanpachigasakidsmynasushioba" + - "ragusaarlandiskstationavigationavuotnakayamatsuuraustevollavagis" + - "kebinagisochildrensgardenaturalhistorymuseumcenterepbodyndns-fre" + - "ebox-oskolegokasells-for-less3-eu-central-1xn--avery-yuasakuhokk" + - "aidontexisteingeekouyamashikis-an-actresshintokushimaxn--b-5gaxn" + - "--b4w605ferdxn--bck1b9a5dre4civilizationxn--bdddj-mrabdxn--beara" + - "lvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn-" + - "-bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-" + - "ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-graingerxn--bod-2n" + - "aroyxn--brnny-wuaccident-investigationjukudoyamagadancebetsukuba" + - "bia-goracleaningatlantabusebastopologyeonggiehtavuoatnadexeterim" + - "o-i-ranagahamaroygardendoftheinternetflixilovecollegefantasyleag" + - "uernseyxn--brnnysund-m8accident-preventionlineat-urlxn--brum-voa" + - "gatromsojavald-aostaplesokanoyakagexn--btsfjord-9zaxn--c1avgxn--" + - "c2br7gxn--c3s14misasaguris-into-animelbournexn--cck2b3barefootba" + - "llooningliwiceventsalangenayoroddaustinnaturalsciencesnaturelles" + - "3-eu-west-1xn--cg4bkis-very-badaddjamalborkangerxn--ciqpnxn--clc" + - "hc0ea0b2g2a9gcdn77-sslattumisawaxn--comunicaes-v6a2oxn--correios" + - "-e-telecomunicaes-ghc29axn--czr694bargainstitutelemarkashiwaraus" + - "traliaisondriodejaneirochestereportarantours3-external-1xn--czrs" + - "0trusteexn--czru2dxn--czrw28barreauctionflfanfshostrodawaraustrh" + - "eimatunduhrennesoyokotebinorilskarlsoyokozebizenakamagayachts3-e" + - "xternal-2xn--d1acj3barrel-of-knowledgeologyukuhashimojibmditchyo" + - "uripalanakhodkanagawauthordalandroidgcahcesuolocalhistoryggeelvi" + - "nckarmoyomitanobninskarpaczeladz-1xn--d1alfaromeoxn--d1atrvbarce" + - "lonagasukeu-2xn--d5qv7z876civilwarmanagementoyosatoyokawaxn--dav" + - "venjrga-y4axn--djrs72d6uyxn--djty4kouzushimashikokuchuoxn--dnna-" + - "grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4claimsaves-th" + - "e-whalessandria-trani-barletta-andriatranibarlettaandriaxn--eckv" + - "dtc9dxn--efvn9sorreisahayakawakamiichikawamisatottoris-lostre-to" + - "teneis-a-studentalxn--efvy88hair-surveillancexn--ehqz56nxn--elqq" + - "16hakatanotaireshimokawaxn--estv75gxn--eveni-0qa01gaxn--f6qx53ax" + - "n--fct429kozagawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsortlandx" + - "n--fiq64barrell-of-knowledgeometre-experts-comptablesalondonetsk" + - "ashiwazakiyosemiteverbankasukabedzin-the-bandaioiraseeklogesuran" + - "certmgretachikawakkanaibetsubamericanfamilydscloudcontrolledekaf" + - "jordivtasvuodnagatorogersaltdalimoliserniautomotivecodynaliascol" + - "i-picenoipirangamvikaruizawamusementaobaokinawashirosatochiokino" + - "shimakeupowiathletajimabariakembuchikumagayagawakuyabukihokumako" + - "gengerdalipayekaterinburgjerdrumckinseyokosukareliance164xn--fiq" + - "s8sorumisakis-gonexn--fiqz9southcarolinazawaxn--fjord-lraxn--fjq" + - "720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde-grand" + - "rapidsouthwestfalenxn--frna-woaraisaijosoyrovigorlicexn--frya-hr" + - "axn--fzc2c9e2clickchristiansburgroundhandlingroznyxn--fzys8d69uv" + - "gmailxn--g2xx48clinichernihivanovosibirskautokeinoxn--gckr3f0fbx" + - "ostrolekaluganskharkivgucciprianiigataitogliattirescrappingushik" + - "amifuranosegawaxn--gecrj9cliniquenoharaxn--ggaviika-8ya47hakodat" + - "exn--gildeskl-g0axn--givuotna-8yandexn--3pxu8kosugexn--gjvik-wua" + - "xn--gk3at1exn--gls-elacaixaxn--gmq050is-very-evillagexn--gmqw5ax" + - "n--h-2failxn--h1aeghakonexn--h2brj9clintonoshoesavonamsskoganeis" + - "-a-doctorayxn--hbmer-xqaxn--hcesuolo-7ya35bashkiriautoscanadaeje" + - "onbukarumaifarmerseinextdirectargets-itargivingjesdalavangenatur" + - "bruksgymnaturhistorisches3-fips-us-gov-west-1xn--hery-iraxn--hge" + - "bostad-g3axn--hmmrfeasta-s4acctrysiljan-mayenxn--hnefoss-q1axn--" + - "hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn" + - "--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyaotsurnadal" + - "xn--io0a7is-very-goodhandsonxn--j1aefermobilyxn--j1amhakubankhva" + - "olbia-tempio-olbiatempioolbialystokkemerovodkagoshimalopolskanla" + - "ndxn--j6w193gxn--jlq61u9w7basilicataniaveroykeniwaizumiotsukumiy" + - "amazonawsabaerobaticketsaritsynologyeongnamegawakeisenbahnatuurw" + - "etenschappenaumburgjovikasaokamisatokashikiwienaustdalazioceanog" + - "raphics3-sa-east-1xn--jlster-byaroslavlaanderenxn--jrpeland-54ax" + - "n--jvr189misconfusedxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77" + - "d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn" + - "--klty5xn--42c2d9axn--koluokta-7ya57hakuis-a-nascarfanxn--kprw13" + - "dxn--kpry57dxn--kpu716ferraraxn--kput3is-very-nicexn--krager-gya" + - "sakaiminatoyonoxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxax" + - "n--krjohka-hwab49jetztrentino-sudtirolxn--ksnes-uuaxn--kvfjord-n" + - "xaxn--kvitsy-fyasugis-very-sweetpepperxn--kvnangen-k0axn--l-1fai" + - "rwindsowaxn--l1accentureklamborghiniizaxn--laheadju-7yasuokarate" + - "xn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52basket" + - "ballfinanzgoravocatanzarowebhopocznoceanographiquehimeji234xn--l" + - "esund-huaxn--lgbbat1ad8jevnakershuscultureggioemiliaromagnakasat" + - "sunais-a-teacherkassymantechnologyxn--lgrd-poacoachampionshiphop" + - "tobamagazinebraskaunjargallupinbatodayurihonjournalisteinkjerusa" + - "lembroideryusuharavoues3-us-gov-west-1xn--lhppi-xqaxn--linds-pra" + - "mericanartulansokndalxn--lns-qlanxesspreadbettingxn--loabt-0qaxn" + - "--lrdal-sraxn--lrenskog-54axn--lt-liaclothingrpanasonichernivtsi" + - "ciliaxn--lten-granexn--lury-iraxn--mely-iraxn--merker-kuaxn--mgb" + - "2ddespydebergxn--mgb9awbferrarittogoldpoint2thisamitsukexn--mgba" + - "3a3ejtunesolarssonxn--mgba3a4f16axn--mgba3a4franamizuholdingsmil" + - "eksvikozakis-an-anarchistoricalsocietyumenxn--mgba7c0bbn0axn--mg" + - "baakc7dvferreroticanonoichinomiyakexn--mgbaam7a8hakusandiegoodye" + - "arthadselfipassagenshellaspeziaxn--mgbab2bdxn--mgbai9a5eva00bats" + - "fjordivttasvuotnaharimaniwakuratexascolipicenord-aurdalpha-myqna" + - "pcloudappspotagerhcloudfunctionsalvadordalibabaikaliszczytnord-o" + - "dalindasdaburyatiaarpaleomutashinaiinetarnobrzegyptianhlfanhsalz" + - "burglobalashovhachinohedmarkasumigaurawa-mazowszexboxenapponazur" + - "e-mobilevje-og-hornnesamegawaxasnesoddenmarkhangelskjervoyagemol" + - "ogicallyngenglanddnskingjerstadotsuruokamchatkameokameyamashinat" + - "sukigatakamatsukawaetnagaivuotnagaokakyotambabydgoszczecinemagen" + - "tositelekommunikationthewifiat-band-campaniamallamaintenanceobih" + - "irosakikamijimattelefonicarbonia-iglesias-carboniaiglesiascarbon" + - "iabruzzoologyeongbuk-uralsk12xn--mgbai9azgqp6jewelryxn--mgbayh7g" + - "paduaxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca" + - "7dzdoxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbi4ecexposedxn--m" + - "gbpl2fhskpnxn--mgbqly7c0a67fbcloudnsdojoetsuwanouchikujogaszkola" + - "hppiacenzakopanerairforcexn--mgbqly7cvafredrikstadtverranzanxn--" + - "mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhausposts-and-telecommun" + - "icationsnasadodgeorgeorgiaxn--mgbx4cd0abbottunkosherbrookegawaxn" + - "--mix082fetsundxn--mix891fgxn--1lqs71dxn--mjndalen-64axn--mk0axi" + - "nfinitis-with-thebandoomdnsfor-better-thandaxn--mk1bu44cnsaxoxn-" + - "-mkru45isleofmandalxn--mlatvuopmi-s4axn--mli-tlapyatigorskppspie" + - "gelxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuragawaxn--mosjen-ey" + - "atominamiawajikissmarterthanyoustkarasjokomakiyosumycdn77-secure" + - "chtrainingxn--mot-tlaquilancasterxn--mre-og-romsdal-qqbbcasadela" + - "monedatsunanjoburglobodoes-itvedestrandiyusuisserveexchangexn--m" + - "sy-ula0haldenxn--mtta-vrjjat-k7afamilycompanycntoyotaris-a-finan" + - "cialadvisor-aurdalukowhoswhokksundynv6xn--muost-0qaxn--mxtq1mish" + - "imatsumaebashimodatexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45brj9ci" + - "rcus-2xn--nit225krasnodarxn--nmesjevuemie-tcbajddarchaeologyxn--" + - "nnx388axn--nodexn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--nts" + - "q17gxn--nttery-byaeservecounterstrikexn--nvuotna-hwaxn--nyqy26ax" + - "n--o1achattanooganorfolkebiblegallocus-1xn--o3cw4halsaitamatsuku" + - "ris-a-nurservebbshimokitayamaxn--od0algxn--od0aq3bbtarumizusawax" + - "n--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyatsukaratsuginam" + - "ikatagamihoboleslawiecolonialwilliamsburgruexn--osyro-wuaxn--p1a" + - "cfhvalerxn--p1aiwchoseirouterxn--pbt977coloradoplateaudioxn--pgb" + - "s0dhlxn--porsgu-sta26fidonnakaiwamizawaxn--pssu33lxn--pssy2uxn--" + - "q9jyb4columbusheyxn--qcka1pmcdonaldsrlxn--qqqt11missilelxn--qxam" + - "urskjakdnepropetrovskiptveterinairealtorlandxn--rady-iraxn--rdal" + - "-poaxn--rde-ularvikrasnoyarskomitamamuraxn--rdy-0nabarixn--renne" + - "sy-v1axn--rhkkervju-01aflakstadaokagakibichuoxn--rholt-mragowood" + - "sidexn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn" + - "--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byatsushiroxn--" + - "rny31hammarfeastafricapetownnews-stagingxn--rovu88bbvacationsupd" + - "atelevisionikiitatebayashijonawatexn--rros-granvindafjordxn--rsk" + - "og-uuaxn--rst-0narutomobellevuelosangelesjaguarchitecturealtychy" + - "attorneyagawalbrzycharternopilawalesundxn--rsta-francaiseharaxn-" + - "-ryken-vuaxn--ryrvik-byawaraxn--s-1faitheguardianxn--s9brj9commu" + - "nitysfjordyroyrvikinguitarsbschokoladenxn--sandnessjen-ogbizhevs" + - "kredirectmeldalxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gra" + - "tangenxn--skierv-utazaskvolloabathsbcomobaraxn--skjervy-v1axn--s" + - "kjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narviikananporovnox" + - "n--slt-elabbvieeexn--smla-hraxn--smna-gratis-a-bulls-fanxn--snas" + - "e-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr-au" + - "rdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyu" + - "uconnectatamotorsamnangerxn--srfold-byawatahamaxn--srreisa-q1axn" + - "--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbep" + - "publishproxyzgorzeleccolognewportlligatewayuzawaxn--stre-toten-z" + - "cbsrtroandinosaurlandesmolenskosaigawaxn--t60b56axn--tckweatherc" + - "hannelxn--tiq49xqyjewishartgalleryxn--tjme-hraxn--tn0agrinet-fre" + - "aksrvaroyxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r1ax" + - "n--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvdonskoshimizumaki" + - "zunokunimilanoxn--uc0ay4axn--uist22hamurakamigoriginshimonitayan" + - "agitlaborxn--uisz3gxn--unjrga-rtambovenneslaskerrylogisticsologn" + - "exn--unup4yxn--uuwu58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn" + - "--vermgensberater-ctberndnpalermomasvuotnakatombetsupportatarsta" + - "nikkoebenhavnikolaevennodessaikiraxn--vermgensberatung-pwbeskidy" + - "nathomedepotenzachpomorskienikonantanangerxn--vestvgy-ixa6oxn--v" + - "g-yiabcn-north-1xn--vgan-qoaxn--vgsy-qoa0jfkomatsushimashikexn--" + - "vgu402comparemarkerryhotelscholarshipschooluroyxn--vhquversaille" + - "solundbeckosakaerodromegalsacechirealminamiuonumasudaxn--vler-qo" + - "axn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bestbuysh" + - "ousesamsclubindalindesnesamsunglogowegroweibolzanordre-landrange" + - "dalinkasuyakutiaxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wg" + - "bh1compute-1xn--wgbl6axn--xhq521betainaboxfusejnynysagaeroclubme" + - "decincinnationwidealerxn--xkc2al3hye2axn--xkc2dl3a5ee0hangoutsys" + - "temscloudfrontdoorxn--y9a3aquariumisugitokuyamatsumotofukexn--ye" + - "r-znarvikristiansandcatshirakoenigxn--yfro4i67oxn--ygarden-p1axn" + - "--ygbi2ammxn--45q11citadeliveryokamikawanehonbetsurutaharaxn--ys" + - "tre-slidre-ujbieigersundrivelandrobaknoluoktaikicks-assedicaseih" + - "ichisobetsuitaipeiheijiiyamanobeauxartsandcraftsandvikcoromantov" + - "alle-d-aostathellexusdecorativeartsanfranciscofreakunemurorangei" + - "seiyoichiropracticasertairaxn--zbx025dxn--zf0ao64axn--zf0avxn--4" + - "gbriminingxn--zfr164bielawallonieruchomoscienceandindustryninohe" + - "kinannestadrudmurtiaxperiaxz" +const text = "bikedagestangeorgeorgiaxagrocerybnikahokutobishimaizuruhreportar" + + "nobrzegyptianaturalhistorymuseumcentereviewskrakoweddinggfarmers" + + "einexus-2bilbaogakievenesalangenikiiyamanouchikuhokuryugasakitau" + + "rayasudabillustrationikkoebenhavnikolaevennodessagamiharabiomuta" + + "shinainfinitintuitattoolsztynsettlersalondonetskarpaczeladzjcbre" + + "mangerbirdartcenterprisesakikuchikuseikarugapartmentsaltdalimoli" + + "serniabirkenesoddtangenovaravennagasukeverbankaruizawabirthplace" + + "vje-og-hornnesalvadordalibabajddarchaeologyusuisserveexchangebja" + + "rkoyuufcfanikonantanangerbjerkreimbalsanagochihayaakasakawaharau" + + "malopolskanlandds3-us-west-1bjugninohekinannestadrangedalindasda" + + "burblockbusternidray-dnsupdaterbloombergbauerninomiyakonojosoyro" + + "rosalzburgjovikarumaifarmsteadraydnsamegawabloxcmsamnangerblueda" + + "ncebmoattachmentsamsclubindalindesnesamsungladell-ogliastraderbm" + + "sandvikcoromantovalle-d-aostatic-accessanfranciscofreakunemurora" + + "ngeiseiyoichiropracticasinordre-landrivelandrobaknoluoktabuseekl" + + "ogesurancertmgretachikawakkanaibetsubamericanfamilydscloudcontro" + + "lledekafjordrudunsangoppdalivornobmweirbnpparibaselburglassassin" + + "ationalheritagematsubarakawagoebnrwfarsundupontariobonnirasakinu" + + "yamashinashikitchenishiazainvestmentsanjournalismailillesandefjo" + + "rdurbanamexhibitionishigobookingliwicebootsannanishiharaboschaef" + + "flerdalomzaporizhzhegurinzais-a-bulls-fanishiizunazukis-a-candid" + + "atebostikasaokamiminersannohelplfinancialorenskoglobalashovhachi" + + "nohedmarkashibatakasakiyokawarabostonakijinsekikogentinglobodoes" + + "-itvedestrandurhamburglogowhalingloppenzaogashimadachicagoboatsa" + + "nokashiharabotanicalgardenishikatakayamatta-varjjataxihuanishika" + + "tsuragithubusercontentgoryuzawabotanicgardenishikawazukamitondab" + + "ayashiogamagoriziabotanybouncemerckmsdnipropetrovskjakdnepropetr" + + "ovskiervaapsteiermarkashiwarabounty-fullensakerrypropertiesantab" + + "arbaraboutiquebecngmbhartiffanybozentsujiiebradescorporationishi" + + "merabrandywinevalleybrasiliabresciabrindisibenikebristoloslocalh" + + "istoryggeelvinckashiwazakiyosatokashikiyosemitebritishcolumbialo" + + "wiezachpomorskienishinomiyashironobroadcastlefrakkestadvrcambrid" + + "gestonextdirectjeldsundvrdnsantacruzsantafedextraspacekitagataji" + + "rittogoldpoint2thisamitsukebroadwaybroke-itjmaxxxboxenapponazure" + + "-mobilebrokerbronnoysundwgminakamichiharabrothermesaverdeatnurem" + + "bergmodellingmxfinitybrowsersafetymarketsanukis-a-catererbrumund" + + "dalotenkawabrunelasticbeanstalkasukabedzin-the-bandaikawachinaga" + + "noharamcoalaskanittedallasalleasinglest-mon-blogueurovisionthewi" + + "fiat-band-campaniabrusselsaotomemergencyberlevagangaviikanonjis-" + + "a-celticsfanishinoomotegobruxellesapodlasiellakasamatsudovre-eik" + + "erbryanskjervoyagebrynewhampshirebungoonordlandyndns-at-workingg" + + "roupalacebuskerudinewjerseybuzenishinoshimattelefonicarbonia-igl" + + "esias-carboniaiglesiascarboniabuzzlgrimstadyndns-blogdnsapporobw" + + "hoswhokksundyndns-freebox-ostrowiecateringebuilderschmidtre-gaul" + + "dalottebzhitomirumalselvendrellottokonamegatakasugais-a-chefashi" + + "onishiokoppegardyndns-homednsardegnamsskoganeis-a-conservativefs" + + "nillfjordyndns-ipaleocondoshichinohealth-carereformitakeharaconf" + + "erenceconstructionconsuladoesntexistanbullensvanguardyndns-wikin" + + "dlegokasells-for-lessaudaconsultanthropologyconsultingvolluxuryc" + + "ontactoyookanmakiwakunigamifunecontemporaryarteducationalchikugo" + + "doharuovatoyosatoyakokonoecontractorskenconventureshinodesashibe" + + "tsuikinderoycookingchannelblagdenesnaaseralingenkainanaejrietisa" + + "latinabenonichernihivanovodkagoshimalvikasumigaurawa-mazowszexjc" + + "palermomahachijorpelandyndns-mailouvreisenishitosashimizunaminam" + + "iashigaracoolkuszkoladbrokesauheradyndns-workisboringrpamperedch" + + "efastlylbaltimore-og-romsdalwaysdatabaseballangenoamishirasatoch" + + "igiessenebakkeshibechambagriculturennebudejjudygarlandigitalavan" + + "genavigationavuotnaklodzkodairamusementarumizusawabruzzoologyeon" + + "gbuk12cooperaunitemasekatsushikabeeldengeluidyndns1copenhagencyc" + + "lopedichernivtsiciliacorsicagliarightathomeftpanamacorvettenriku" + + "zentakataitogliattiresavannahgacosenzaganquannakadomaritimekeepi" + + "ngatlantaijis-a-financialadvisor-aurdaluzerncosidnsfor-better-th" + + "anawawildlifedjeffersoncostumedio-campidano-mediocampidanomedioc" + + "ouchpotatofriesaves-the-whalessandria-trani-barletta-andriatrani" + + "barlettaandriacouncilvivano-frankivskatsuyamasfjordencouponsavon" + + "aplesaxocoursesbschokoladencq-acranbrookuwanalyticscholarshipsch" + + "oolcreditcardynnschulezajskydivingruecreditunioncremonashorokana" + + "iecrewilliamhillcricketrzyncrimeastcoastaldefencecrotonewyorkshi" + + "recipesaro-urbino-pesarourbinopesaromasvuotnaharimamurogawacrown" + + "providercrsvpanasonichernovtsykkylvenetogakushimotoganewportllig" + + "atjxn--0trq7p7nnishiwakis-a-cpadoval-daostavalleycruiseschwarzgw" + + "angjuegoshikiminokamoenairtraffichiryukyuragifuchungbukasuyaltak" + + "ashimaseratis-a-cubicle-slavellinowtvalleaostatoilowiczest-le-pa" + + "trondheimmobilienissandnessjoenissayokoshibahikariwanumatakazaki" + + "s-a-democratkmaxxn--11b4c3dyndns-office-on-the-webcampobassociat" + + "esardiniacryptonomichigangwoncuisinellahppiacenzakopanerairguard" + + "ynv6culturalcentertainmentoyotaris-a-geekgalaxycuneocupcakecxn--" + + "1ctwolominamatakkokaminokawanishiaizubangecymrussiacyonabarulsan" + + "doycyouthdfcbankaufenfiguerestaurantoyotomiyazakis-a-greenfilate" + + "liafilminamiawajikis-a-guruslivinghistoryfinalfinancefineartscie" + + "ntistoragefinlandfinnoyfirebaseapparliamentoyotsukaidownloadfire" + + "nzefirestonefirmdaleirfjordfishingolffanscjohnsonfitjarqhachioji" + + "yahikobeatscotlandfitnessettlementoyourafjalerflesbergushikamifu" + + "ranoshiroomuraflickragerotikakamigaharaflightscrapper-siteflirfl" + + "ogintogurafloraflorencefloridavvesiidazaifudaigojomedizinhistori" + + "schescrappingxn--1lqs71dfloristanohatakahamaniwakuratexascolipic" + + "enord-aurdalipayflorogerserveftparmaflowerservegame-serversaille" + + "servehalflifestyleflynnhubambleclercartoonartdecoldwarmiamibugat" + + "tipschlesisches3-us-west-2fndfoodnetworkshoppingfor-ourfor-somee" + + "thnologyfor-theaterforexrothruherecreationforgotdnservehttparoch" + + "erkasyno-dservehumourforli-cesena-forlicesenaforlikescandynamic-" + + "dnserveirchitachinakagawatchandclockaszubyforsaleirvikazoforsand" + + "asuoloftoystre-slidrettozawafortmissoulair-traffic-controlleyfor" + + "tworthachirogatakahatakaishimogosenforuminamibosogndalfosneserve" + + "minecraftozsdev-myqnapcloudcontrolappspotagerfotaruis-a-hard-wor" + + "kerfoxfordedyn-ip24freeboxoservemp3utilitiesquarezzoologicalvink" + + "lein-addrammenuernbergdyniabogadocscbnl-o-g-i-nativeamericananti" + + "ques3-ap-northeast-1kappchizippodhaleangaviikadenadexeterepbodyn" + + "athomebuilt3l3p0rtargets-itargiving12000emmafanconagawakayamadri" + + "dvagsoyericssonyoursidealerimo-i-ranaamesjevuemielno-ip6freemaso" + + "nryfreiburgfreightcminamidaitomangotsukisosakitagawafreseniuscou" + + "ntryestateofdelawaredstonefribourgfriuli-v-giuliafriuli-ve-giuli" + + "afriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-" + + "vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-" + + "giuliafriuliveneziagiuliafriulivgiuliafrlfroganservep2parservepi" + + "cservequakefrognfrolandfrom-akrehamnfrom-alfrom-arfrom-azwinbana" + + "narepublicasadelamonedatsunanjoburgjerstadotsuruokakegawasnesodd" + + "enmarkhangelskiptveterinairealtychyattorneyagawalmartatamotors3-" + + "ap-south-1from-capebretonamiastapleservesarcasmatartanddesignfro" + + "m-collectionfrom-ctrani-andria-barletta-trani-andriafrom-dchitos" + + "etogitsuldalucaniafrom-defenseljordfrom-flanderservicesettsurgeo" + + "nshalloffamemorialfrom-gausdalfrom-higashiagatsumagoizumizakiraf" + + "rom-iafrom-idfrom-ilfrom-incheonfrom-ksevastopolefrom-kyowariasa" + + "hikawafrom-lajollamericanexpressexyfrom-mannortonsbergfrom-mdfro" + + "m-megurokunohealthcareersevenassisicilyfrom-midoris-a-hunterfrom" + + "-mnfrom-mochizukirkenesewindmillfrom-msfranziskanerdpolicefrom-m" + + "tnfrom-nchloefrom-ndfrom-nefrom-nhktraniandriabarlettatraniandri" + + "afrom-njelenia-gorafrom-nminamiechizenfrom-nvalled-aostavangerfr" + + "om-nyfrom-ohkurafrom-oketohmansionshangrilanciafrom-orfrom-pader" + + "bornfrom-pratohnoshoooshikamaishimodatextileitungsenfrom-ris-a-k" + + "nightpointtokaizukameokameyamatotakadafrom-schoenbrunnfrom-sdfro" + + "m-tnfrom-txn--1qqw23afrom-utazuerichardlillehammerfeste-ipartis-" + + "a-landscaperfrom-vaksdalfrom-vtranoyfrom-wafrom-wielunnerfrom-wv" + + "alledaostavernfrom-wyfrosinonefrostalowa-wolawafroyahababyglandf" + + "stcgroupartnersharis-a-lawyerfujiiderafujikawaguchikonefujiminoh" + + "tawaramotoineppubolognakanotoddenfujinomiyadafujiokayamanxn--2m4" + + "a15efujisatoshonairportland-4-salernoboribetsucksharpartshawaiij" + + "imarugame-hostrodawarafujisawafujishiroishidakabiratoridegreefuj" + + "itsurugashimamateramodalenfujixeroxn--30rr7yfujiyoshidafukayabea" + + "rdubaiduckdnshellaspeziafukuchiyamadafukudominichocolatelevision" + + "issedaluccapitalonewmexicoffeedbackplaneapplinzis-a-designerimar" + + "umorimachidafukuis-a-liberalfukumitsubishigakirovogradoyfukuokaz" + + "akiryuohadanotaireshimojis-a-libertarianfukuroishikarikaturindal" + + "fukusakisarazurewebsiteshikagamiishibukawafukuyamagatakaharustka" + + "noyakagefunabashiriuchinadafunagatakahashimamakishiwadafunahashi" + + "kamiamakusatsumasendaisennangonohejis-a-linux-useranishiaritabas" + + "hijonawatefundaciofuoiskujukuriyamaoris-a-llamarylandfuosskoczow" + + "indowshimokawafurnituredumbrellanbibaidarfurubiraquarelleborkang" + + "erfurudonostiaarpartyfurukawairtelecityeatshimokitayamafusodegau" + + "rafussaikisofukushimapasadenamsosnowiechofunatorientexpressarluc" + + "ernefutabayamaguchinomigawafutboldlygoingnowhere-for-moregontrai" + + "lroadfuttsurugimperiafuturehostingfuturemailingfvgfyis-a-musicia" + + "nfylkesbiblackfridayfyresdalhangglidinghangoutsystemscloudfrontd" + + "oorhannanmokuizumodenakasatsunais-a-painteractivegarsheis-a-pats" + + "fanhannotteroyhanyuzenhapmirhareidsbergenharstadharvestcelebrati" + + "onhasamarnardalhasaminami-alpssells-itransportransurlhashbanghas" + + "udahasura-appassenger-associationhasvikazunohatogayahoohatoyamaz" + + "akitahiroshimarriottrapaniimimatakatoris-a-personaltrainerhatsuk" + + "aichikaiseis-a-photographerokuappaviancargodaddynaliascoli-picen" + + "oipirangamvikddielddanuorrissagaeroclubmedecincinnationwidealsta" + + "haugesunderseaportsinfolldalabamagasakishimabarackmazehattfjelld" + + "alhayashimamotobungotakadapliernewhollandhazuminobusellsyourhome" + + "goodshimotsumahboehringerikehelsinkitakamiizumisanofidelitysvard" + + "ollshinichinanhembygdsforbundhemneshinjournalistjohnhemsedalhepf" + + "orgeherokussldheroyhgtvallee-aosteroyhigashichichibunkyonanaoshi" + + "mageandsoundandvisionhigashihiroshimanehigashiizumozakitakatakam" + + "oriokalmykiahigashikagawahigashikagurasoedahigashikawakitaaikita" + + "kyushuaiahigashikurumeiwamarshallstatebankfhappouhigashimatsushi" + + "maritimodernhigashimatsuyamakitaakitadaitoigawahigashimurayamamo" + + "torcycleshinjukumanohigashinarusembokukitamidsundhigashinehigash" + + "iomihachimanchesterhigashiosakasayamanakakogawahigashishirakawam" + + "atakanabeautydalhigashisumiyoshikawaminamiaikitamotosumitakagild" + + "eskaliszhigashitsunowruzhgorodeohigashiurausukitanakagusukumodum" + + "inamiiselectravelchannelhigashiyamatokoriyamanashifteditchyourip" + + "fizerhigashiyodogawahigashiyoshinogaris-a-playerhiraizumisatohob" + + "by-sitehirakatashinagawahiranais-a-republicancerresearchaeologic" + + "aliforniahirarahiratsukagawahirayaitakanezawahistorichouseshinka" + + "migotoyohashimotoshimahitachiomiyaginankokubunjis-a-rockstaracho" + + "wicehitachiotagooglecodespotravelersinsurancehitraeumtgeradeloit" + + "tevadsoccertificationhjartdalhjelmelandholeckobierzyceholidayhom" + + "eipgfoggiahomelinkhakassiahomelinuxn--32vp30haebaruminamifuranoh" + + "omeofficehomesecuritymaceratakaokaluganskodjejuifminamiizukamiok" + + "amikitayamatsuris-a-socialistmein-vigorgehomesecuritypccwinnersh" + + "inshinotsurgeryhomesenseminehomeunixn--3bst00minamimakis-a-soxfa" + + "nhondahoneywellbeingzonehongopocznosegawahonjyoitakarazukamakura" + + "zakitashiobarahornindalhorseoulminamiminowahortendofinternet-dns" + + "hinshirohospitalhoteleshintokushimahotmailhoyangerhoylandetroits" + + "kolelhumanitieshintomikasaharahurdalhurumajis-a-studentalhyllest" + + "adhyogoris-a-teacherkassymantechnologyhyugawarahyundaiwafunehzch" + + "onanbuildingripescaravantaajlchoyodobashichikashukujitawarajlljm" + + "pharmacienshirakofuefukihaboromskoguchikuzenjnjeonnamerikawauejo" + + "yokaichibahcavuotnagaranzannefrankfurtrentino-alto-adigejpmorgan" + + "jpnjprshiranukamogawajuniperjurkoshunantokigawakosugekotohiradom" + + "ainsureggiocalabriakotourakouhokutamakis-an-artisteinkjerusalemb" + + "roiderykounosupplieshiraokanagawakouyamashikokuchuokouzushimasoy" + + "kozagawakozakis-an-engineeringkpnkppspdnshiratakahagivestbytomar" + + "idagawassamukawataricohdatingkrasnodarkredirectmeldalkristiansan" + + "dcatshishikuis-an-entertainerkristiansundkrodsheradkrokstadelval" + + "daostarostwodzislawioshisognekryminamisanrikubetsupportrentino-a" + + "ltoadigekumatorinokumejimasudakumenanyokkaichirurgiens-dentistes" + + "-en-francekunisakis-bykunitachiarailwaykunitomigusukumamotoyamas" + + "sa-carrara-massacarraramassabusinessebyklegallocus-1kunneppulawy" + + "kunstsammlungkunstunddesignkuokgrouphdkureggioemiliaromagnakayam" + + "atsumaebashikshacknetrentino-s-tirollagrigentomologyeonggiehtavu" + + "oatnagaivuotnagaokakyotambabia-goracleaningkurgankurobelaudibleb" + + "timnetzkurogimilanokuroisoftwarendalenugkuromatsunais-certifiedo" + + "gawarabikomaezakirunorthwesternmutualkurotakikawasakis-foundatio" + + "nkushirogawakusupplykutchanelkutnokuzumakis-gonekvafjordkvalsund" + + "kvamfamberkeleykvanangenkvinesdalkvinnheradkviteseidskogkvitsoyk" + + "wpspiegelkzmissilevangermisugitokorozawamitourismolancastermitoy" + + "oakemiuramiyazumiyotamanomjondalenmlbfanmonmouthagebostadmonster" + + "monticellombardiamondshisuifuelveruminamitanemontrealestatefarme" + + "quipmentrentino-stirolmonza-brianzaporizhzhiamonza-e-della-brian" + + "zapposhitaramamonzabrianzaptokuyamatsusakahoginowaniihamatamakaw" + + "ajimarburgmonzaebrianzaramonzaedellabrianzamoparachutingmordovia" + + "jessheiminamiuonumatsumotofukemoriyamatsushigemoriyoshimilitarym" + + "ormoneymoroyamatsuuramortgagemoscowitdkmpspbarcelonagasakijobser" + + "verisignieznord-odalaziobihirosakikamijimassnasaarlandd-dnshome-" + + "webservercellikes-piedmontblancomeeres3-ap-southeast-1moseushist" + + "orymosjoenmoskeneshizukuishimofusaitamatsukuris-into-gamessinats" + + "ukigatakasagotembaixadamosshizuokananporovigotpantheonsitemosvik" + + "nx-serveronakatsugawamoteginozawaonsenmoviemovistargardmtpchrist" + + "masakikugawatchesarufutsunomiyawakasaikaitakoelniyodogawamtranby" + + "muenstermugithubcloudusercontentrentino-sud-tirolmuikamisatokama" + + "chippubetsubetsugarumukochikushinonsenergymulhouservebeermunakat" + + "anemuncieszynmuosattemuphiladelphiaareadmyblogsitemurmanskolobrz" + + "egersundmurotorcraftrentino-sudtirolmusashimurayamatsuzakis-leet" + + "rdmusashinoharamuseetrentino-sued-tirolmuseumverenigingmutsuzawa" + + "mutuellewismillermy-vigorlicemy-wanggouvicenzamyactivedirectorym" + + "yasustor-elvdalmycdn77-securechtrainingmydissentrentino-suedtiro" + + "lmydrobofagemydshoujis-lostre-toteneis-a-techietis-a-therapistoi" + + "amyeffectrentinoa-adigemyfirewallonieruchomoscienceandindustrynm" + + "yfritzmyftpaccesshowamyfusionmyhome-serverrankoshigayamelhusgard" + + "enmykolaivaolbia-tempio-olbiatempioolbialystokkepnogiftshowtimet" + + "eorapphilatelymymediapchromedicaltanissettairamyokohamamatsudamy" + + "pepsongdalenviknakanojohanamakinoharamypetshriramlidlugolekagami" + + "nogatagajobojis-not-certifieducatorahimeshimakanegasakinkobayash" + + "ikaoirminamiogunicomcastresistancemyphotoshibahccavuotnagareyama" + + "lborkdalvdalcesienarashinomypsxn--3e0b707emysecuritycamerakermys" + + "hopblocksigdalmyvnchryslerpictetrentinoaadigepicturesimple-urlpi" + + "emontepilotsirdalpimientaketomisatolgapinkomakiyosunndalpioneerp" + + "ippuphoenixn--3oq18vl8pn36apiszpittsburghofauskedsmokorsetagayas" + + "ells-for-ulvikautokeinopiwatepizzapkomatsushimashikizunokunimiho" + + "boleslawiechristiansburgriwataraidyndns-picsarpsborgroks-thisaya" + + "manobeokakudamatsueplanetariuminamiyamashirokawanabellevuelosang" + + "elesjaguarchitecturealtorlandplantationplantslingplatforminanopl" + + "aystationplazaplchungnamdalseidfjordyndns-remotewdyndns-serverda" + + "luroyplombardynamisches-dnslupskomforbarclaycards3-website-ap-no" + + "rtheast-1plumbingopmnpodzonepohlpoivronpokerpokrovskommunalforbu" + + "ndpolitiendapolkowicepoltavalle-aostathellexusdecorativeartsnoas" + + "aitomobellunorddalpomorzeszowithgoogleapisa-hockeynutsiracusakat" + + "akinouepordenonepornporsangerporsanguidelmenhorstalbansokanazawa" + + "porsgrunnanpoznanpraxis-a-bookkeeperugiaprdpreservationpresidiop" + + "rgmrprimeloyalistockholmestrandprincipeprivatizehealthinsurancep" + + "rochowiceproductionsokndalprofbsbxn--1lqs03nprogressivegasiaproj" + + "ectrentinoalto-adigepromombetsurfbx-ostrowwlkpmgulenpropertyprot" + + "ectionprotonetrentinoaltoadigeprudentialpruszkowithyoutubentleyp" + + "rzeworskogptplusterpvtrentinos-tirolpwchurchaseljeepostfoldnavyp" + + "zqldqponqslgbtrentinostirolquicksytesolarssonqvcirclegnicafedera" + + "tionstufftoread-booksnesolundbeckommunestuttgartrentoyokawasusak" + + "is-slickharkovalleeaosteigensusonosuzakaneyamazoesuzukaniepcesuz" + + "ukis-uberleetrentino-a-adigesvalbardunloppacificircustomersveios" + + "velvikomvuxn--3ds443gsvizzeraswedenswidnicarrierswiebodzindianap" + + "olis-a-bloggerswiftcoversicherungswinoujscienceandhistoryswisshi" + + "kis-very-badaddjamisonsynology-dsolutionsolognetuscanytushuissie" + + "r-justicetuvalle-daostaticsootuxfamilyvenneslaskerrylogisticsopo" + + "trentinosud-tirolvestfoldvestnesor-odalvestre-slidreamhostersor-" + + "varangervestre-totennishiawakuravestvagoyvevelstadvibo-valentiav" + + "ibovalentiavideovillaskoyabearalvahkihokumakogengerdalpha-myqnap" + + "cloudapplebesbydgoszczecinemakeupowiathletajimabariakembuchikuma" + + "gayagawakuyabukicks-assedicitadeliveryvinnicartiervinnytsiavipsi" + + "naapphonefossilkomaganevirginiavirtualvirtueeldomeindianmarketin" + + "gvirtuelvisakegawavistaprinternationalfirearmsorfoldviterboltroa" + + "ndinosaurepaircraftrevisohughesomavivoldavlaanderenvladikavkazim" + + "ierz-dolnyvladimirvlogoiphotographysiovolkswagentsorreisahayakaw" + + "akamiichikawamisatotalvologdanskongsvingervolvolkenkundenvolyngd" + + "alvossevangenvotevotingvotoyonakagyokutoursortlandworldworse-tha" + + "ndawowiwatsukiyonowritesthisblogsytewroclawloclawekoninjavald-ao" + + "starnbergwtciticatholicheltenham-radio-opencraftranagatorodoywtf" + + "bxosciencecentersciencehistorywuozuwwwmflabsorumincommbanklabudh" + + "abikinokawabarthagakhanamigawawzmiuwajimaxn--4gq48lf9jetztrentin" + + "o-aadigexn--4it168dxn--4it797konsulatrobeepilepsydneyxn--4pvxsou" + + "thcarolinazawaxn--54b7fta0ccivilizationxn--55qw42gxn--55qx5dxn--" + + "5js045dxn--5rtp49civilwarmanagementmpalmspringsakerxn--5rtq34kon" + + "yvelolxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2r" + + "xn--6qq986b3xlxn--7t0a264claimsasayamaxn--80adxhksouthwestfalenx" + + "n--80ao21axn--80aqecdr1axn--80asehdbarefootballooningjesdalillyo" + + "mbondiscountysnes3-website-ap-southeast-2xn--80aswgxn--80audneda" + + "lnxn--8ltr62kooris-an-actorxn--8pvr4uxn--8y0a063axn--90a3academy" + + "-firewall-gatewayxn--90aishobaraomoriguchiharahkkeravjuedischesa" + + "peakebayernrtrogstadxn--90azhytomyrxn--9dbhblg6dietcimdbargainst" + + "itutelemarkaratsuginamikatagamiharuconnectatarantottoribestadisc" + + "overyomitanobirastronomy-gatewayokosukanzakiwienaturalsciencesna" + + "turelles3-ap-southeast-2xn--9dbq2axn--9et52uxn--9krt00axn--andy-" + + "iraxn--aroport-byanaizuxn--asky-iraxn--aurskog-hland-jnbarreauct" + + "ionayorovnobninskarelianceu-1xn--avery-yuasakuhokkaidontexistein" + + "geekopervikhmelnitskiyamashikexn--b-5gaxn--b4w605ferdxn--bck1b9a" + + "5dre4clickatowicexn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jx" + + "axn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn-" + + "-bievt-0qa2xn--bjarky-fyandexn--3pxu8konskowolayangroupharmacysh" + + "iraois-an-accountantshinyoshitomiokamitsuexn--bjddar-ptamayufuet" + + "tertdasnetzxn--blt-elabourxn--bmlo-graingerxn--bod-2naroyxn--brn" + + "ny-wuaccident-investigation-aptibleaseating-organicbcn-north-1xn" + + "--brnnysund-m8accident-prevention-webhopenairbusantiquest-a-la-m" + + "aisondre-landebudapest-a-la-masionionjukudoyamagazineat-urlxn--b" + + "rum-voagatromsakakinokiaxn--btsfjord-9zaxn--c1avgxn--c2br7gxn--c" + + "3s14mintelligencexn--cck2b3barrel-of-knowledgemologicallyngenvir" + + "onmentalconservationflfanfshostrolekamisunagawaugustowadaegubs3-" + + "ca-central-1xn--cg4bkis-very-evillagexn--ciqpnxn--clchc0ea0b2g2a" + + "9gcdn77-sslattumisakis-into-carshioyanagawaxn--comunicaes-v6a2ox" + + "n--correios-e-telecomunicaes-ghc29axn--czr694barrell-of-knowledg" + + "eologyonagoyaukraanghkeymachineustarhubalestrandabergamoareke164" + + "xn--czrs0tromsojaworznoxn--czru2dxn--czrw28bashkiriaurskog-holan" + + "droverhalla-speziaeroportalaheadjudaicaaarborteaches-yogasawarac" + + "ingroks-theatree12xn--d1acj3basilicataniaustevollarvikarasjokara" + + "suyamarylhurstjordalshalsenaturbruksgymnaturhistorisches3-eu-cen" + + "tral-1xn--d1alfaromeoxn--d1atrusteexn--d5qv7z876clinichernigover" + + "nmentjometlifeinsurancexn--davvenjrga-y4axn--djrs72d6uyxn--djty4" + + "koryokamikawanehonbetsurutaharaxn--dnna-grajewolterskluwerxn--dr" + + "bak-wuaxn--dyry-iraxn--e1a4cliniquenoharaxn--eckvdtc9dxn--efvn9s" + + "owaxn--efvy88haibarakitahatakamatsukawaxn--ehqz56nxn--elqq16hair" + + "-surveillancexn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429ko" + + "saigawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hspjelkavikomonoxn--" + + "fiq64basketballfinanzgoraustinnatuurwetenschappenaumburgjemnes3-" + + "eu-west-1xn--fiqs8spreadbettingxn--fiqz9spydebergxn--fjord-lraxn" + + "--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde-" + + "grandrapidsrlxn--frna-woaraisaijotrvarggatritonxn--frya-hraxn--f" + + "zc2c9e2clintonoshoesaseboknowsitallutskypexn--fzys8d69uvgmailxn-" + + "-g2xx48clothingrondarxn--gckr3f0fermobilyxn--gecrj9cloudnsdojoet" + + "suwanouchikujogaszczytnore-og-uvdaluxembourgrongaxn--ggaviika-8y" + + "a47hakatanotogawaxn--gildeskl-g0axn--givuotna-8yaotsurreyxn--gjv" + + "ik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-very-goodhandsonxn" + + "--gmqw5axn--h-2failxn--h1aeghakodatexn--h2brj9cnsaskatchewanxn--" + + "hbmer-xqaxn--hcesuolo-7ya35batodayonaguniversityoriikariyakumold" + + "eltaiwanairlinedre-eikerxn--hery-iraxn--hgebostad-g3axn--hmmrfea" + + "sta-s4acctrysiljan-mayenxn--hnefoss-q1axn--hobl-iraxn--holtlen-h" + + "xaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b" + + "6b1a6a2exn--imr513nxn--indery-fyasakaiminatoyonezawaxn--io0a7is-" + + "very-nicexn--j1aeferraraxn--j1amhakonexn--j6w193gxn--jlq61u9w7ba" + + "tsfjordishakotankarlsoyoshiokarasjohkamikoaniikappugliaustraliai" + + "sondriodejaneirochesterhcloudfunctions3-external-1xn--jlster-bya" + + "sugis-very-sweetpepperxn--jrpeland-54axn--jvr189misasaguris-into" + + "-cartoonshirahamatonbetsurnadalxn--k7yn95exn--karmy-yuaxn--kbrq7" + + "oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dx" + + "n--kltx9axn--klty5xn--42c2d9axn--koluokta-7ya57hakubadajozorahol" + + "taleniwaizumiotsukumiyamazonawsabaerobaticketshimonosekikawaxn--" + + "kprw13dxn--kpry57dxn--kpu716ferrarivnexn--kput3is-with-thebandoo" + + "mdnsiskinkyotobetsumidatlantichoseiroumuenchenisshingugexn--krag" + + "er-gyasuokanraxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn" + + "--krjohka-hwab49jevnakershuscultureggio-emilia-romagnakatombetsu" + + "my-routerxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatomitamamurax" + + "n--kvnangen-k0axn--l-1fairwindsrtrentinosudtirolxn--l1accenturek" + + "lamborghiniizaxn--laheadju-7yatsukanumazuryxn--langevg-jxaxn--lc" + + "vr32dxn--ldingen-q1axn--leagaviika-52bauhausposts-and-telecommun" + + "icationsncfdivtasvuodnakaiwamizawaustrheimatunduhrennesoyokotebi" + + "nagisochildrensgardenaustdalavagiskebinorfolkebibleikangerxn--le" + + "sund-huaxn--lgbbat1ad8jewelryxn--lgrd-poacoachampionshiphoptobam" + + "agentositelekommunikationlinebraskaunjargallupinbbcaseihichisobe" + + "tsuitainairforceoceanographics3-website-eu-west-1xn--lhppi-xqaxn" + + "--linds-pramericanartulangevagrarboretumbriamallamaintenancechir" + + "ealminnesotaketakatsukis-into-animelbournexn--lns-qlansrvareserv" + + "eblogspotrentinosued-tirolxn--loabt-0qaxn--lrdal-sraxn--lrenskog" + + "-54axn--lt-liacntoyonoxn--lten-granexn--lury-iraxn--mely-iraxn--" + + "merker-kuaxn--mgb2ddestordalxn--mgb9awbferreroticanonoichinomiya" + + "kexn--mgba3a3ejtunesomnaritakurashikis-savedunetbankharkivguccip" + + "rianiigataishinomakimobetsuliguriaxn--mgba3a4f16axn--mgba3a4fran" + + "amizuholdingsmileksvikosakaerodromegalsacebetsukubankhmelnytskyi" + + "vanylvenicexn--mgba7c0bbn0axn--mgbaakc7dvfetsundynvpnxn--mgbaam7" + + "a8hakuis-a-nascarfanxn--mgbab2bdxn--mgbai9a5eva00bbtateshinanoma" + + "chintaifun-dnsaliaskimitsubatamicable-modembetsukuibigawauthorda" + + "landroiddnskingjerdrumckinseyokozebizenakaniikawatanaguraetnagah" + + "amaroygardendoftheinternetflixilovecollegefantasyleaguernseyboml" + + "oans3-ap-northeast-2xn--mgbai9azgqp6jewishartgalleryxn--mgbayh7g" + + "padualstackspace-to-rentalstomakomaibaraxn--mgbb9fbpobanazawaxn-" + + "-mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--m" + + "gberp4a5d4arxn--mgbi4ecexposedxn--mgbpl2fhskleppiagetmyiphilipsy" + + "nology-diskstationxn--mgbqly7c0a67fbcolonialwilliamsburgrossetou" + + "chijiwadellogliastradingroundhandlingroznyxn--mgbqly7cvafredriks" + + "tadtvstoreitrentinosuedtirolxn--mgbt3dhdxn--mgbtf8flatangerxn--m" + + "gbtx2bbvacationswatch-and-clockerxn--mgbx4cd0abbottunkongsbergxn" + + "--mix082fgunmarcheaparisor-fronxn--mix891fhvalerxn--mjndalen-64a" + + "xn--mk0axindustriesteambulancexn--mk1bu44coloradoplateaudioxn--m" + + "kru45isleofmandalxn--mlatvuopmi-s4axn--mli-tlanxesstorfjordxn--m" + + "lselv-iuaxn--moreke-juaxn--mori-qsakuragawaxn--mosjen-eyatsushir" + + "oxn--mot-tlapyxn--mre-og-romsdal-qqbeppublishproxyzgorzeleccolog" + + "newspaperxn--msy-ula0hakusandiegoodyearthadselfipassagenshimonit" + + "ayanagitlaborxn--mtta-vrjjat-k7afamilycompanycolumbusheyxn--muos" + + "t-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45brj9ci" + + "vilaviationxn--nit225koseis-an-actresshiojirishirifujiedaxn--nme" + + "sjevuemie-tcbalatinord-frontierxn--nnx388axn--nodexn--nqv7fs00em" + + "axn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservecount" + + "erstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattanooganordreisa-ge" + + "ekosherbrookegawaxn--o3cw4haldenxn--od0algxn--od0aq3bernuorockar" + + "tuzyukibmdivttasvuotnakamagayachts3-website-sa-east-1xn--ogbpf8f" + + "lekkefjordxn--oppegrd-ixaxn--ostery-fyawaraxn--osyro-wuaxn--p1ac" + + "fidonnakamuratajimicrolightinguovdageaidnunzenxn--p1aissmarterth" + + "anyouxn--pbt977communitysfjordyndns-weberlincolnxn--pgbs0dhlxn--" + + "porsgu-sta26fieldyroyrvikinguitarschweizparaglidingujolsterxn--p" + + "ssu33lxn--pssy2uxn--q9jyb4comobaraxn--qcka1pmcdonaldstpetersburg" + + "xn--qqqt11misconfusedxn--qxamuneuestreamsterdamnserverbaniaxn--r" + + "ady-iraxn--rdal-poaxn--rde-ulaquilancashirehabmerxn--rdy-0nabari" + + "wchoshibuyachiyodavvenjargaulardalukowiiheyaizuwakamatsubushikus" + + "akadogawaxn--rennesy-v1axn--rhkkervju-01aflakstadaokagakibichuox" + + "n--rholt-mragowoodsidexn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn" + + "--risa-5narusawaxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmsk" + + "og-byawatahamaxn--rny31halsaintlouis-a-anarchistoireggio-calabri" + + "axn--rovu88beskidyn-vpncasertaipeiheijiinetnedalimanowarudautomo" + + "tivecodyn-o-saurlandes3-fips-us-gov-west-1xn--rros-granvindafjor" + + "dxn--rskog-uuaxn--rst-0narutokyotangovturystykannamihamadaxn--rs" + + "ta-francaiseharaxn--ryken-vuaxn--ryrvik-byaxn--s-1faitheguardian" + + "xn--s9brj9comparemarkerryhotelsassaris-a-doctorayxn--sandnessjen" + + "-ogbizxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratangenxn-" + + "-skierv-utazaskvolloabathsbcompute-1xn--skjervy-v1axn--skjk-soax" + + "n--sknit-yqaxn--sknland-fxaxn--slat-5narviikamishihoronobeauxart" + + "sandcraftstudioxn--slt-elabbvieeexn--smla-hraxn--smna-gratis-a-b" + + "ruinsfanxn--snase-nraxn--sndre-land-0cbstudyndns-at-homedepotenz" + + "amamicrosoftbankomorotsukaminoyamaxunusualpersonxn--snes-poaxn--" + + "snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-va" + + "ranger-ggbestbuyshouses3-website-us-east-1xn--srfold-byaxn--srre" + + "isa-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshal" + + "sen-sqbetainaboxfusejnynysadodgeometre-experts-comptables3-websi" + + "te-us-west-1xn--stre-toten-zcbieigersundiyukuhashimoichinosekiga" + + "harautoscanadaejeonbukaratehimeji234xn--t60b56axn--tckweathercha" + + "nnelxn--tiq49xqyjfkhersonxn--tjme-hraxn--tn0agrinet-freakstuff-4" + + "-salexn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn--t" + + "rna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvaroyxn--uc0ay4axn--ui" + + "st22hammarfeastafricapetownnews-stagingxn--uisz3gxn--unjrga-rtao" + + "baokinawashirosatochiokinoshimalatvuopmiasakuchinotsuchiurakawal" + + "brzycharternopilawalesundxn--unup4yxn--uuwu58axn--vads-jraxn--va" + + "rd-jraxn--vegrshei-c0axn--vermgensberater-ctbielawalterxn--vermg" + + "ensberatung-pwbiellaakesvuemielecceu-2xn--vestvgy-ixa6oxn--vg-yi" + + "abcgxn--vgan-qoaxn--vgsy-qoa0jgoraxn--vgu402computerhistoryofsci" + + "ence-fictionxn--vhquvbarclays3-website-ap-southeast-1xn--vler-qo" + + "axn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bieszczad" + + "ygeyachimataikikonaioirasebastopologyeongnamegawakeisenbahnhlfan" + + "hs3-website-us-west-2xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dx" + + "n--wgbh1comsecuritytacticsatxn--1ck2e1balsfjordgcahcesuolodingen" + + "aval-d-aosta-valleyolasitemrxn--wgbl6axn--xhq521bievatmallorcada" + + "quesakuraiitatebayashiibaghdadultateyamaveroykenglanddnss3-sa-ea" + + "st-1xn--xkc2al3hye2axn--xkc2dl3a5ee0hamurakamigoriginshimosuwalk" + + "is-a-nurservebbshimotsukexn--y9a3aquariumishimatsunoxn--yer-znar" + + "vikoshimizumakis-an-anarchistoricalsocietyxn--yfro4i67oxn--ygard" + + "en-p1axn--ygbi2ammxn--45q11civilisationxn--ystre-slidre-ujbifuka" + + "gawarszawashingtondclkarmoyurihonjoyentatsunoceanographiquevents" + + "akyotanabeneventoeidsvollimitednpagefrontappagespeedmobilizerodd" + + "avocatanzarowegroweibolzanordkappgafanpachigasakidsmynasushiobar" + + "agusarts3-us-east-2xn--zbx025dxn--zf0ao64axn--zf0avxn--4gbrimini" + + "ngxn--zfr164bihorologyusuharavoues3-us-gov-west-1xperiaxz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into @@ -482,8141 +483,8127 @@ const text = "biellaakesvuemieleccebieszczadygeyachimatainaircraftraeumtgerade" // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x29e943, - 0x364444, - 0x28af46, - 0x371983, - 0x371986, - 0x394246, - 0x3a4103, - 0x202f04, - 0x24f607, - 0x28ab88, - 0x1a00882, - 0x309dc7, - 0x3533c9, - 0x2fb3ca, - 0x2fb3cb, - 0x22fe43, - 0x28cac6, - 0x2352c5, - 0x1e00702, - 0x211ac4, - 0x2c7a83, - 0x226bc5, - 0x2200d42, - 0x2a0f43, - 0x2707e44, - 0x368485, - 0x2a00c42, - 0x3797ce, - 0x24a483, - 0x38b406, - 0x2e04642, - 0x2a5907, - 0x237d46, - 0x3200a42, - 0x2ae043, - 0x2ae044, - 0x280f86, - 0x36f448, - 0x283a46, - 0x386144, - 0x3601002, - 0x326a09, - 0x363a07, - 0x3351c6, - 0x355049, - 0x293988, - 0x367104, - 0x3a6606, - 0x20e306, - 0x3a02e02, - 0x241d0f, - 0x33174e, - 0x212484, - 0x2bb945, - 0x202e05, - 0x2ea589, - 0x23e889, - 0x325747, - 0x221646, - 0x26b083, - 0x3e056c2, - 0x346fc3, - 0x207a4a, - 0x211e83, - 0x250585, - 0x2040c2, - 0x2830c9, - 0x4204802, - 0x209084, - 0x29e486, - 0x284b45, - 0x34c904, - 0x4a74a04, - 0x204803, - 0x234304, - 0x4e01842, - 0x364184, - 0x52e41c4, - 0x22410a, - 0x56009c2, - 0x334307, - 0x38e008, - 0x6201182, - 0x322847, - 0x2b7344, - 0x2b7347, - 0x383d05, - 0x370a47, - 0x325506, - 0x332a44, - 0x340c85, - 0x28df47, - 0x72046c2, - 0x349183, - 0x218782, - 0x366703, - 0x76108c2, - 0x2798c5, - 0x7a02d42, - 0x368ac4, - 0x277785, - 0x2123c7, - 0x2ddc0e, - 0x330a44, - 0x244744, - 0x20ca03, - 0x326ec9, - 0x30528b, - 0x30e148, - 0x31cd88, - 0x320888, - 0x20a588, - 0x354e8a, - 0x370947, - 0x217906, - 0x7e9c3c2, - 0x377d83, - 0x380c43, - 0x38bc84, - 0x250ac3, - 0x3a4143, - 0x1713b02, - 0x8203182, - 0x2484c5, - 0x30c046, - 0x2c9bc4, - 0x396e07, - 0x22eec6, - 0x280304, - 0x3a7dc7, - 0x203183, - 0x86bdb82, - 0x8a4f882, - 0x8e13702, - 0x213706, + 0x32f983, + 0x28a344, + 0x30e286, + 0x371b43, + 0x371b46, + 0x394646, + 0x3a5003, + 0x367844, + 0x260687, + 0x30dec8, + 0x1a04cc2, + 0x316e47, + 0x355d89, + 0x32228a, + 0x32228b, + 0x22eec3, + 0x28fac6, + 0x2327c5, + 0x1e04e02, + 0x217c04, + 0x2a90c3, + 0x3ac705, + 0x2203942, + 0x329e03, + 0x26957c4, + 0x368e05, + 0x2a10182, + 0x3787ce, + 0x253343, + 0x3a03c6, + 0x2e00142, + 0x30e407, + 0x23ae46, + 0x3200c42, + 0x22a343, + 0x254b04, + 0x325a86, + 0x35c208, + 0x28a706, + 0x21ad04, + 0x3601442, + 0x332309, + 0x207587, + 0x256286, + 0x339309, + 0x29d788, + 0x328d44, + 0x364906, + 0x36b606, + 0x3a02942, + 0x27144f, + 0x20f94e, + 0x2131c4, + 0x2c6085, + 0x367745, + 0x385989, + 0x241a89, + 0x368047, + 0x23c9c6, + 0x273a43, + 0x3e02342, + 0x2df283, + 0x205aca, + 0x221d83, + 0x303145, + 0x289c02, + 0x289c09, + 0x4200f82, + 0x203d84, + 0x2250c6, + 0x2eb205, + 0x34cbc4, + 0x4a04c04, + 0x205283, + 0x231ac4, + 0x4e02e02, + 0x209f04, + 0x52f4e04, + 0x24ae0a, + 0x5601342, + 0x303907, + 0x26b8c8, + 0x6202f82, + 0x31cf07, + 0x2c2e04, + 0x2c2e07, + 0x373d85, + 0x357a47, + 0x367e06, + 0x2e8384, + 0x39c0c5, + 0x294847, + 0x7206cc2, + 0x34f703, + 0x200582, + 0x200583, + 0x76125c2, + 0x221ec5, + 0x7a02302, + 0x27bd84, + 0x2810c5, + 0x213107, + 0x269f0e, + 0x224bc4, + 0x206a84, + 0x211503, + 0x2ceac9, + 0x2ed94b, + 0x3a6548, + 0x3148c8, + 0x318dc8, + 0x237908, + 0x33914a, + 0x357947, + 0x318146, + 0x7ea4fc2, + 0x35bc03, + 0x366c43, + 0x371144, + 0x3a5043, + 0x324cc3, + 0x171f542, + 0x8203682, + 0x252185, + 0x2a11c6, + 0x2d6d44, + 0x2f6e07, + 0x382986, + 0x319dc4, + 0x398247, + 0x20f7c3, + 0x86c8902, + 0x8b124c2, + 0x8e1c502, + 0x21c506, 0x9200002, - 0x37f645, - 0x315043, - 0x205244, - 0x2dbe44, - 0x2dbe45, - 0x207043, - 0x9723ac3, - 0x9a093c2, - 0x2873c5, - 0x2873cb, - 0x22d086, - 0x20cbcb, - 0x26ff44, - 0x20d189, - 0x20ed44, - 0x9e0fd82, - 0x210c83, - 0x211183, - 0x1611302, - 0x23c343, - 0x21130a, - 0xa211d42, - 0x211d45, - 0x28ea8a, - 0x2cd704, - 0x212d43, - 0x213384, - 0x213cc3, - 0x213cc4, - 0x213cc7, - 0x214245, - 0x218dc5, - 0x219586, - 0x21a0c6, - 0x21aa43, - 0x21dc48, - 0x258e83, - 0xa615802, - 0x21f008, - 0x21580b, - 0x222d08, - 0x223586, - 0x224547, - 0x229748, - 0xb279a82, - 0xb693f02, - 0x20b608, - 0x2ad0c7, - 0x23b405, - 0x23b408, - 0x281888, - 0x2ada03, - 0x22eac4, - 0x38bcc2, - 0xba2f482, - 0xbe051c2, - 0xc62f802, - 0x22f803, - 0xca02ec2, - 0x202ec3, - 0x2fe704, - 0x21abc3, - 0x3670c4, - 0x24edcb, + 0x359b85, + 0x320a83, + 0x200004, + 0x2ee344, + 0x2ee345, + 0x203e43, + 0x9768883, + 0x9a07f42, + 0x28e245, + 0x28e24b, + 0x2d0686, + 0x20a0cb, + 0x225b04, + 0x20a7c9, + 0x20be84, + 0x9e0c0c2, + 0x20cf83, + 0x210843, + 0x1600802, + 0x260903, + 0x2109ca, + 0xa211cc2, + 0x217e85, + 0x2983ca, + 0x2dc184, + 0x369f03, + 0x315044, + 0x213643, + 0x213644, + 0x213647, + 0x213985, + 0x213e05, + 0x2150c6, + 0x2167c6, + 0x2182c3, + 0x21c188, + 0x221c43, + 0xa601082, + 0x21cac8, + 0x21ff0b, + 0x2216c8, + 0x2221c6, + 0x222a47, + 0x226488, + 0xb2413c2, + 0xb6c2fc2, + 0x326388, + 0x257ec7, + 0x22bac5, + 0x22bac8, + 0x2bf708, + 0x3871c3, + 0x22a784, + 0x371182, + 0xba2af82, + 0xbe5ba02, + 0xc62c1c2, + 0x22c1c3, + 0xca0e542, + 0x367803, + 0x2f8e04, + 0x218443, + 0x328d04, + 0x25fe4b, + 0x21fe43, + 0x2e4d06, + 0x226284, + 0x2a5a8e, + 0x361c05, + 0x3a04c8, + 0x282247, + 0x28224a, + 0x229d03, + 0x2b3447, + 0x2edb05, + 0x22ea44, + 0x272486, + 0x272487, + 0x32b704, + 0x304ac7, + 0x26a244, + 0x35bc84, + 0x35bc86, + 0x386904, + 0x20e546, + 0x223c83, + 0x22b888, + 0x30c3c8, + 0x24e2c3, + 0x2608c3, + 0x204f04, + 0x395943, + 0xce0d882, + 0xd2db442, + 0x20c043, + 0x201806, + 0x35c383, + 0x28bfc4, + 0xd6081c2, + 0x2081c3, + 0x358243, + 0x219942, + 0xda02ac2, + 0x2c55c6, + 0x2334c7, + 0x2f7a85, + 0x37da44, + 0x3723c5, + 0x367007, + 0x274205, + 0x2d3e89, + 0x2e0586, + 0x2e5488, + 0x2f7986, + 0xde0db42, + 0x362f48, + 0x2f8bc6, + 0x20db45, + 0x304687, + 0x30c2c4, + 0x30c2c5, + 0x28a8c4, + 0x28a8c8, + 0xe20a182, + 0xe60c502, + 0x313646, + 0x2c28c8, + 0x31f145, + 0x332c06, + 0x3356c8, + 0x33e1c8, + 0xea0fd45, + 0xee0c504, + 0x2958c7, + 0xf20bbc2, + 0xf61c402, + 0x1060d1c2, + 0x35aa45, + 0x2a8505, + 0x282606, + 0x3698c7, + 0x376ec7, + 0x10ed0783, + 0x2e3447, + 0x30dc88, + 0x3823c9, + 0x378987, + 0x390287, + 0x3a5b08, + 0x3aa206, + 0x22e546, + 0x22f18c, + 0x230b0a, + 0x230fc7, + 0x23268b, + 0x233307, + 0x23330e, + 0x236244, + 0x23a184, + 0x23b547, + 0x264f47, + 0x240d46, + 0x240d47, + 0x241207, + 0x18a20802, + 0x2420c6, + 0x2420ca, + 0x24294b, + 0x243407, + 0x244ec5, + 0x245203, + 0x246c46, + 0x246c47, + 0x241c43, + 0x18e38702, + 0x24b74a, + 0x19356fc2, + 0x196acdc2, + 0x19a4cec2, + 0x19e26982, + 0x24da85, + 0x24e0c4, + 0x1a604d02, + 0x209f85, + 0x294ac3, + 0x20bf85, + 0x237804, + 0x20a684, + 0x2ba5c6, + 0x26a546, + 0x28e443, + 0x3b1484, + 0x209383, + 0x1aa03b02, + 0x263544, + 0x263546, + 0x295e45, + 0x27fdc6, + 0x304788, + 0x20cb44, + 0x2a8e88, + 0x343fc5, + 0x24a108, + 0x2b1cc6, + 0x2bddc7, + 0x26cdc4, + 0x26cdc6, + 0x25e303, + 0x382e43, + 0x2c9388, + 0x318bc4, + 0x238d47, + 0x220246, + 0x2dad89, + 0x315108, + 0x319f08, + 0x349184, + 0x39b9c3, + 0x23e342, + 0x1ba34682, + 0x1be16102, + 0x3b2783, + 0x1c204a42, + 0x2607c4, + 0x390a46, + 0x39a6c5, + 0x2a4383, + 0x232004, + 0x2b6f47, + 0x26aa43, + 0x250d08, + 0x20e8c5, + 0x370083, + 0x281045, + 0x281184, + 0x2fb8c6, + 0x210384, + 0x211a46, + 0x213046, + 0x263004, + 0x21fd83, + 0x2225c3, + 0x1c604e42, + 0x377905, + 0x222e03, + 0x1ca1c3c2, + 0x22f143, + 0x210085, + 0x231b83, + 0x231b89, + 0x1ce07d02, + 0x1d601142, + 0x28d9c5, + 0x21a746, + 0x2d6906, + 0x2b94c8, + 0x2b94cb, + 0x20538b, + 0x2f7c85, + 0x2e1045, + 0x2c9fc9, + 0x1600742, + 0x2631c8, + 0x206044, + 0x1de001c2, + 0x25fa83, + 0x1e665106, + 0x20f088, + 0x1ea04782, + 0x233ec8, + 0x1ee01742, + 0x225c4a, + 0x1f2d0e43, + 0x3b1e86, + 0x206988, + 0x207d48, + 0x39a9c6, + 0x36d5c7, + 0x271647, + 0x220b0a, + 0x2dc204, + 0x3423c4, + 0x355589, + 0x1fb8fc85, + 0x20fb46, + 0x208203, + 0x251944, + 0x201e44, + 0x201e47, + 0x22cec7, + 0x237044, + 0x220a45, + 0x2826c8, + 0x350707, + 0x3619c7, + 0x1fe041c2, + 0x226004, + 0x298cc8, + 0x381004, + 0x24f0c4, + 0x24fa45, + 0x24fb87, + 0x215809, + 0x2505c4, + 0x2510c9, + 0x251308, + 0x2516c4, + 0x2516c7, + 0x20251c43, + 0x252487, + 0x16101c2, + 0x179d442, + 0x253386, + 0x2539c7, + 0x253e84, + 0x255607, + 0x256907, + 0x257483, + 0x2abc02, + 0x229c02, + 0x258743, + 0x258744, + 0x25874b, + 0x3149c8, + 0x25f184, + 0x2594c5, + 0x25ba87, + 0x25d8c5, + 0x2c4dca, + 0x25f0c3, + 0x2060da42, + 0x22b484, + 0x264d09, + 0x268983, + 0x268a47, + 0x28ee89, + 0x37b5c8, + 0x2d8483, + 0x27ff47, + 0x280689, + 0x2316c3, + 0x288284, + 0x289389, + 0x28c6c6, + 0x28dc83, + 0x2023c2, + 0x24ca43, + 0x36ab47, + 0x2bfa85, + 0x35ba06, + 0x256bc4, + 0x2d1d45, + 0x205a83, + 0x218506, + 0x20a9c2, + 0x391084, + 0x22ad02, + 0x22ad03, + 0x20a00182, + 0x29b083, + 0x216c44, + 0x216c47, + 0x200306, + 0x201e02, + 0x20e01dc2, + 0x21e184, + 0x2123b882, + 0x21600502, + 0x2dfcc4, + 0x2dfcc5, + 0x2b9cc5, + 0x262746, + 0x21a0ca82, + 0x20ca85, + 0x210d85, + 0x225883, + 0x215c06, + 0x216dc5, + 0x21c482, + 0x33de05, + 0x21c484, + 0x2230c3, + 0x223303, + 0x21e097c2, + 0x294a47, + 0x221144, + 0x221149, + 0x251844, + 0x228903, + 0x341cc9, + 0x3777c8, + 0x2a8384, + 0x2a8386, + 0x210503, + 0x259b43, + 0x332ec3, + 0x222ebc02, + 0x30eb82, + 0x22600642, + 0x3238c8, + 0x35c588, + 0x394d86, + 0x24ed45, + 0x2b32c5, + 0x200647, + 0x228fc5, + 0x2630c2, + 0x22a9a502, + 0x1614502, + 0x38fe08, + 0x362e85, + 0x351d04, + 0x2f18c5, + 0x3836c7, + 0x24f5c4, + 0x246f82, + 0x22e01182, + 0x336f04, + 0x2173c7, + 0x28e9c7, + 0x357a04, + 0x298383, + 0x24e204, + 0x24e208, + 0x22e886, + 0x27230a, + 0x2156c4, + 0x298708, + 0x259784, + 0x222b46, + 0x29a4c4, + 0x35ad46, + 0x221409, + 0x25b287, + 0x234483, + 0x23274842, + 0x274843, + 0x20c2c2, + 0x23651b02, + 0x2f0b06, + 0x364148, + 0x2a9d87, + 0x395d09, + 0x297f09, + 0x2ab245, + 0x2ad3c9, + 0x2ae045, + 0x2ae189, + 0x2af5c5, + 0x2b0208, + 0x27f284, + 0x23a8d6c7, + 0x294403, + 0x2b0407, + 0x390646, + 0x2b08c7, + 0x2a6d45, + 0x2a7f83, + 0x23e00dc2, + 0x392604, + 0x2423b8c2, + 0x264403, + 0x24618d82, + 0x307646, + 0x26b845, + 0x2b2bc7, + 0x37f183, + 0x324c44, + 0x2138c3, + 0x2386c3, + 0x24a0a3c2, + 0x25201a02, + 0x394744, + 0x2abbc3, + 0x38c985, + 0x226d85, + 0x25602282, + 0x25e00bc2, + 0x280286, + 0x318d04, + 0x2491c4, + 0x2491ca, + 0x26601d42, + 0x37324a, + 0x204148, + 0x26a964c4, + 0x201d43, + 0x25ff43, + 0x318f09, + 0x2a8909, + 0x2b7046, + 0x26e04303, + 0x328145, + 0x30664d, + 0x204306, + 0x21268b, + 0x27203482, + 0x295048, + 0x27e1c282, + 0x282051c2, + 0x37aa85, + 0x28600b02, + 0x2a3147, + 0x212b87, + 0x201503, + 0x22f848, + 0x28a02f02, + 0x202f04, + 0x217043, + 0x38d045, + 0x386fc3, + 0x2eb106, + 0x30bdc4, + 0x204ec3, + 0x234f83, + 0x28e07042, + 0x2f7c04, + 0x30ed45, + 0x35a587, + 0x27dbc3, + 0x2b36c3, + 0x2b3ec3, + 0x1621ac2, + 0x2b3f83, + 0x2b4b03, + 0x2920ae42, + 0x2cee84, + 0x26a746, + 0x33c7c3, + 0x2b4f83, + 0x296b5e02, + 0x2b5e08, + 0x2b60c4, + 0x2470c6, + 0x2b6547, + 0x24e3c6, + 0x295304, + 0x37200082, + 0x39050b, + 0x2ffbce, + 0x21bb0f, + 0x29da43, + 0x37a4ca02, + 0x166b142, + 0x37e02442, + 0x29c243, + 0x2472c3, + 0x233106, + 0x22ff46, + 0x212307, + 0x31aa84, + 0x3821a882, + 0x3860ec02, + 0x2d4ac5, + 0x2e88c7, + 0x37e046, + 0x38a82882, + 0x2f5b04, + 0x2b9783, + 0x38e01b02, + 0x39352883, + 0x2ba984, + 0x2c06c9, + 0x16c6fc2, + 0x39642682, + 0x351245, + 0x39ac7242, + 0x39e02682, + 0x341687, + 0x2364c9, + 0x35600b, + 0x271405, + 0x2c7c89, + 0x276a46, + 0x2d06c7, + 0x3a2092c4, + 0x32ef49, + 0x374b07, + 0x2255c7, + 0x234003, + 0x37b386, + 0x30f887, + 0x265383, + 0x27c5c6, + 0x3aa022c2, + 0x3ae31e02, + 0x220443, + 0x324845, + 0x257747, + 0x21fac6, + 0x2bfa05, + 0x230044, + 0x2efa45, + 0x2f8284, + 0x3b205e82, + 0x349f07, + 0x2e1c44, + 0x23e244, + 0x33328d, + 0x257249, + 0x381588, + 0x25ac04, + 0x314e85, + 0x3b2607, + 0x205e84, + 0x382a47, + 0x20c705, + 0x3b6aa384, + 0x36dec5, + 0x267644, + 0x24f706, + 0x3696c5, + 0x3ba24742, + 0x369fc4, + 0x369fc5, + 0x3716c6, + 0x2bfb45, + 0x25c104, + 0x3120c3, + 0x204986, + 0x22c085, + 0x22c785, + 0x3697c4, 0x215743, - 0x2d2f86, - 0x223f84, - 0x29b84e, - 0x360445, - 0x38b508, - 0x24bd87, - 0x24bd8a, - 0x222a83, - 0x222a87, - 0x305445, - 0x231c84, - 0x24d886, - 0x24d887, - 0x2beb44, - 0x2f6607, - 0x377e04, - 0x3afe84, - 0x3afe86, - 0x267544, - 0x208606, - 0x210ac3, - 0x217188, - 0x21cfc8, - 0x244703, - 0x23c303, - 0x395544, - 0x39a003, - 0xce00482, - 0xd304e82, - 0x2004c3, - 0x2072c6, - 0x369e83, - 0x263584, - 0xd616942, - 0x244c43, - 0x216943, - 0x21b182, - 0xda008c2, - 0x2b9fc6, - 0x235fc7, - 0x2e9345, - 0x367a44, - 0x27e045, - 0x2026c7, - 0x26a205, - 0x2c6889, - 0x2cf2c6, - 0x2d48c8, - 0x2e9246, - 0xde06c02, - 0x33b648, - 0x2fe4c6, - 0x3b1a45, - 0x3ae4c7, - 0x301084, - 0x301085, - 0x283c04, - 0x283c08, - 0xe20cc82, - 0xe6131c2, - 0x329cc6, - 0x318208, - 0x339345, - 0x33a3c6, - 0x33c648, - 0x35b948, - 0xeac8945, - 0xefa8204, - 0x3aae87, - 0xf20e802, - 0xf61dec2, - 0x10a16582, - 0x357b85, - 0x2a3e05, - 0x2de246, - 0x319b87, - 0x399547, - 0x1122d183, - 0x29a147, - 0x2d4488, - 0x38fec9, - 0x379987, - 0x3a5187, - 0x22fe88, - 0x230686, - 0x231786, - 0x2323cc, - 0x232f4a, - 0x233787, - 0x23518b, - 0x235e07, - 0x235e0e, - 0x236a84, - 0x2374c4, - 0x239b07, - 0x25b087, - 0x23d9c6, - 0x23d9c7, - 0x23e107, - 0x14600bc2, - 0x23ec86, - 0x23ec8a, - 0x23ef0b, - 0x240007, - 0x2407c5, - 0x240b03, - 0x240fc6, - 0x240fc7, - 0x230a83, - 0x14a0b382, - 0x24198a, - 0x14f54502, - 0x152a6c02, - 0x15642b82, - 0x15a37e42, - 0x243a85, - 0x244504, - 0x16200682, - 0x364205, - 0x226b83, - 0x317c85, - 0x20a484, - 0x20ec44, - 0x291786, - 0x378106, - 0x2875c3, - 0x261f84, - 0x281e43, - 0x16600f82, - 0x200f84, - 0x3ab406, - 0x200f85, - 0x258c06, - 0x3ae5c8, - 0x263804, - 0x2c7848, - 0x2e0c45, - 0x22e708, - 0x32c306, - 0x2b49c7, - 0x239504, - 0x239506, - 0x307003, - 0x384083, - 0x2be608, - 0x30c584, - 0x2a0887, - 0x30a106, - 0x30a109, - 0x252448, - 0x27efc8, - 0x280444, - 0x378983, - 0x22aa02, - 0x16ab1d82, - 0x16e2cf42, - 0x3a1603, - 0x17219c42, - 0x24f744, - 0x3400c6, - 0x371305, - 0x23fe83, - 0x232884, - 0x300447, - 0x367783, - 0x2379c8, - 0x3af5c5, - 0x36fc43, - 0x277705, + 0x21574c, + 0x3be8d002, + 0x3c2045c2, + 0x3c605d82, + 0x205d83, + 0x205d84, + 0x3ca032c2, + 0x2f9648, + 0x35bac5, + 0x33f884, + 0x230e46, + 0x3ce073c2, + 0x3d20fc42, + 0x3d600c02, + 0x321e85, + 0x262ec6, + 0x20b484, + 0x325fc6, + 0x3036c6, + 0x202983, + 0x3db1164a, + 0x264785, + 0x28b4c3, + 0x223886, + 0x305e09, + 0x223887, + 0x293308, + 0x29d649, + 0x248108, + 0x229a46, + 0x208843, + 0x3de017c2, + 0x384b03, + 0x384b09, + 0x368548, + 0x3e2513c2, + 0x3e601f02, + 0x22d603, + 0x2e0405, + 0x258f44, + 0x35e849, + 0x226784, + 0x26f288, + 0x205c03, + 0x2602c4, + 0x2784c3, + 0x21a788, + 0x3331c7, + 0x3ea0f302, + 0x2432c2, + 0x257d85, + 0x3960c9, + 0x20fbc3, + 0x281ac4, + 0x328104, + 0x219e03, + 0x28380a, + 0x3ef73102, + 0x3f2c0202, + 0x2c8883, + 0x374fc3, + 0x1649202, + 0x262a43, + 0x3f60bcc2, + 0x3fa05f02, + 0x3fe22044, + 0x222046, + 0x33e8c6, 0x277844, - 0x208306, - 0x20c804, - 0x20cf06, - 0x212306, - 0x2512c4, - 0x215683, - 0x21a883, - 0x1767e402, - 0x360fc5, - 0x215dc3, - 0x17a00442, - 0x232383, - 0x331e85, - 0x2343c3, - 0x2343c9, - 0x17e08042, - 0x18614b42, - 0x286b45, - 0x21ba46, - 0x29f387, - 0x2c9786, - 0x2b83c8, - 0x2b83cb, - 0x20730b, - 0x22aac5, - 0x2d02c5, - 0x2bf489, - 0x1600ec2, - 0x251488, - 0x20ce04, - 0x18e00202, - 0x24ea03, - 0x1965b246, - 0x330e88, - 0x19a031c2, - 0x228108, - 0x19e00d82, - 0x27008a, - 0x20f043, - 0x31d346, - 0x330448, - 0x378cc8, - 0x32f8c6, - 0x36dd07, - 0x241f07, - 0x20de8a, - 0x2cd784, - 0x33f184, - 0x352f49, - 0x38f8c5, - 0x2f31c6, - 0x2138c3, - 0x247984, - 0x212104, - 0x3412c7, - 0x21e687, - 0x2d7e84, - 0x20ddc5, - 0x2de308, - 0x35d607, - 0x360207, - 0x1a206ac2, - 0x369684, - 0x28f388, - 0x384544, - 0x2455c4, - 0x2459c5, - 0x245b07, - 0x206ac9, - 0x246684, - 0x246e89, - 0x247348, - 0x247704, - 0x247707, - 0x1a647f83, - 0x248a47, - 0x16475c2, - 0x17a4a82, - 0x249a06, - 0x24a4c7, - 0x24a904, - 0x24c9c7, - 0x24e4c7, - 0x252083, - 0x23aa82, - 0x201682, - 0x252b03, - 0x252b04, - 0x252b0b, - 0x31ce88, - 0x258b44, - 0x253805, - 0x255e47, - 0x257a05, - 0x2d9a8a, - 0x258a83, - 0x1aa21842, - 0x258d84, - 0x25ae49, - 0x25edc3, - 0x25ee87, - 0x3ac249, - 0x280d08, - 0x200c83, - 0x276607, - 0x276d49, - 0x202883, - 0x27d9c4, - 0x282309, - 0x2856c6, - 0x286e03, - 0x2038c2, - 0x233e83, - 0x39b6c7, - 0x37f785, - 0x3585c6, - 0x247b84, - 0x2d37c5, - 0x207a03, - 0x21ac86, - 0x20d382, - 0x390e04, - 0x227982, - 0x2db883, - 0x1ae007c2, - 0x244a43, - 0x21a544, - 0x21a547, - 0x3713c6, - 0x2499c2, - 0x1b22d642, - 0x325e44, - 0x1b626b02, - 0x1ba0acc2, - 0x2d6484, - 0x2d6485, - 0x2c3e85, - 0x341a46, - 0x1be01e02, - 0x29fe45, - 0x2ded05, - 0x201e03, - 0x3650c6, - 0x378445, - 0x213682, - 0x33a005, - 0x213684, - 0x217c43, - 0x219343, - 0x1c20c502, - 0x28e147, - 0x35d884, - 0x35d889, - 0x247884, - 0x22b603, - 0x346449, - 0x360e88, - 0x2a3c84, - 0x2a3c86, - 0x201f83, - 0x212883, - 0x21eb03, - 0x1c6e1102, - 0x2e9182, - 0x1ca0b2c2, - 0x316b88, - 0x34afc8, - 0x394986, - 0x2549c5, - 0x21a905, - 0x306007, - 0x255805, - 0x21c2c2, - 0x1ce61e82, - 0x1614b82, - 0x38fa48, - 0x33b585, - 0x2c8084, - 0x2e0b85, - 0x390507, - 0x258884, - 0x23a882, - 0x1d204c42, - 0x32c704, - 0x213507, - 0x3abd87, - 0x370a04, - 0x28ea43, - 0x244644, - 0x244648, - 0x231ac6, - 0x24d70a, - 0x206984, - 0x28edc8, - 0x253ac4, - 0x224646, - 0x290e84, - 0x357e86, - 0x35db49, - 0x259187, - 0x33a683, - 0x1d605e82, - 0x26a843, - 0x20ff82, - 0x1da04d42, - 0x2dfe86, - 0x35ed48, - 0x2a5287, - 0x3a30c9, - 0x23a4c9, - 0x2a5c85, - 0x2a6e49, - 0x2a7b45, - 0x2a7c89, - 0x2a8b85, - 0x284244, - 0x1de84247, - 0x2957c3, - 0x2a9c07, - 0x3a5546, - 0x2aa407, - 0x2a2945, - 0x2aba83, - 0x1e232a02, - 0x392844, - 0x1e63a3c2, - 0x25a183, - 0x1ea0f1c2, - 0x2e8b86, - 0x38df85, - 0x2acb87, - 0x324d83, - 0x250a44, - 0x206f83, - 0x20b343, - 0x1ee082c2, - 0x1f600042, - 0x394344, - 0x23aa43, - 0x364885, - 0x25fcc5, - 0x1fa05602, - 0x20200942, - 0x276946, - 0x209744, - 0x30c6c4, - 0x30c6ca, - 0x20a00a82, - 0x2f768a, - 0x372fc8, - 0x20e01604, - 0x201d83, - 0x216c03, - 0x3209c9, - 0x223349, - 0x300546, - 0x21202243, - 0x2da985, - 0x2f81cd, - 0x202246, - 0x2065cb, - 0x21606382, - 0x333208, - 0x21a0bf02, - 0x21e00b42, - 0x2af285, - 0x222074c2, - 0x264c47, - 0x2a2247, - 0x2103c3, - 0x2ae348, - 0x22601982, - 0x203a04, - 0x3786c3, - 0x332c05, - 0x3833c3, - 0x38da46, - 0x31b204, - 0x23c2c3, - 0x26ad83, - 0x22a095c2, - 0x22aa44, - 0x351445, - 0x36bb47, - 0x274643, - 0x2ad803, - 0x2aed83, - 0x1621a82, - 0x2aee43, - 0x2af643, - 0x22e04282, - 0x2f5d44, - 0x378306, - 0x204283, - 0x2af9c3, - 0x232b09c2, - 0x2b09c8, - 0x2b1404, - 0x25a546, - 0x2b1847, - 0x22ba06, - 0x230d44, - 0x30e001c2, - 0x3a540b, - 0x39fb4e, - 0x21c80f, - 0x233383, - 0x31633e42, - 0x1604ec2, - 0x31a02b82, - 0x227683, - 0x202b83, - 0x235c06, - 0x2aea46, - 0x27d807, - 0x34aa44, - 0x31e1bb82, - 0x32229e82, - 0x228e45, - 0x3a4ac7, - 0x371b86, - 0x326436c2, - 0x2436c4, - 0x36e203, - 0x32a09682, - 0x32f508c3, - 0x391004, - 0x2b6b49, - 0x16bc882, - 0x33216c82, - 0x216c85, - 0x33644802, - 0x33a00102, - 0x33e507, - 0x239049, - 0x35364b, - 0x241cc5, - 0x377609, - 0x2bcfc6, - 0x22d0c7, - 0x33e0c744, - 0x305ac9, - 0x35a787, - 0x201b47, - 0x209883, - 0x209886, - 0x2da2c7, - 0x206003, - 0x271e46, - 0x34605642, - 0x34a34642, - 0x21fa43, - 0x250645, - 0x222547, - 0x281b86, - 0x37f705, - 0x311244, - 0x3b1405, - 0x2e8904, - 0x34e02102, - 0x3210c7, - 0x2d6044, - 0x223244, - 0x22324d, - 0x248809, - 0x2e0f48, - 0x22cd04, - 0x209b05, - 0x27ee47, - 0x332784, - 0x22ef87, - 0x3a8405, - 0x353a9084, - 0x2fa005, - 0x25da84, - 0x265d46, - 0x319985, - 0x35636b42, - 0x212e04, - 0x212e05, - 0x213206, - 0x37f845, - 0x256584, - 0x2dbc83, - 0x32fd86, - 0x220f45, - 0x225285, - 0x319a84, - 0x206a03, - 0x206a0c, - 0x35a86002, - 0x35e01042, - 0x3620b402, - 0x332683, - 0x332684, - 0x366061c2, - 0x3a6088, - 0x358685, - 0x236604, - 0x23b9c6, - 0x36a0a242, - 0x36e09bc2, - 0x37200982, - 0x2d8845, - 0x251186, - 0x341204, - 0x2814c6, - 0x3340c6, - 0x203483, - 0x3772788a, - 0x23ad85, + 0x2474c3, + 0x39bc83, + 0x235143, + 0x242cc6, + 0x2ce085, + 0x2c8e47, + 0x2d0589, + 0x2ccac5, + 0x2cdfc6, + 0x2ce548, + 0x2ce746, + 0x245bc4, + 0x29ef8b, + 0x2d3983, + 0x2d3985, + 0x2d3ac8, + 0x226302, + 0x341982, + 0x4024db02, + 0x4060b602, + 0x21a8c3, + 0x40a73fc2, + 0x273fc3, + 0x2d3dc4, + 0x2d4e43, + 0x41201682, + 0x41601686, + 0x2c47c6, + 0x2da008, + 0x41a95242, + 0x41e10882, + 0x42223342, + 0x4265e402, + 0x42a14202, + 0x42e01302, + 0x234103, + 0x261c05, + 0x32d1c6, + 0x43213184, + 0x295c4a, + 0x201306, + 0x2f7ec4, + 0x269ec3, + 0x43e0c002, + 0x202082, + 0x231743, + 0x44204ac3, + 0x362b47, + 0x3695c7, + 0x45a58847, + 0x32d747, + 0x228543, + 0x2977ca, + 0x377004, + 0x220144, + 0x22014a, + 0x202085, + 0x45e04182, + 0x3294c3, + 0x462002c2, + 0x2104c3, 0x274803, - 0x225046, - 0x2efe09, - 0x225047, - 0x28bd48, - 0x293849, - 0x219888, - 0x36a346, - 0x20b203, - 0x37a9a1c2, - 0x3856c3, - 0x3856c9, - 0x3357c8, - 0x37e09782, - 0x38206742, - 0x2348c3, - 0x2cf145, - 0x253304, - 0x31c8c9, - 0x25f6c4, - 0x2b1648, - 0x206743, - 0x24f244, - 0x326b83, - 0x21ba88, - 0x223187, - 0x38643742, - 0x269d42, - 0x238c45, - 0x268849, - 0x211003, - 0x278184, - 0x2da944, - 0x202c03, - 0x278cca, - 0x38b72e82, - 0x38e12dc2, - 0x2bdb03, - 0x3751c3, - 0x164f202, - 0x250d03, - 0x39250042, - 0x39603042, - 0x39b07b04, - 0x366086, - 0x3469c6, - 0x276b84, - 0x25a943, - 0x27be43, - 0x2e4983, - 0x23f286, - 0x2c2e45, - 0x2be0c7, - 0x22cf89, - 0x2c1d45, - 0x2c2d86, - 0x2c3708, - 0x2c3906, - 0x238744, - 0x29718b, - 0x2c6383, - 0x2c6385, - 0x2c64c8, - 0x21e442, - 0x33e802, - 0x39e43b02, - 0x3a20e842, - 0x21bbc3, - 0x3a600e02, - 0x269fc3, - 0x2c67c4, - 0x2c8183, - 0x3ae25682, - 0x3b2cc5c6, - 0x2bb346, - 0x2ccc08, - 0x3b6cad42, - 0x3ba111c2, - 0x3be19382, - 0x3c209f82, - 0x3c614882, - 0x3ca00ac2, - 0x228343, - 0x318d45, - 0x209c86, - 0x3ce12444, - 0x3ab20a, - 0x310606, - 0x22ad04, - 0x27e943, - 0x3da06bc2, - 0x205902, - 0x24dbc3, - 0x3de38483, - 0x2ee087, - 0x319887, - 0x3f252c07, - 0x20f007, - 0x215a43, - 0x22c6ca, - 0x240584, - 0x341504, - 0x34150a, - 0x247045, - 0x3f601642, - 0x24d483, - 0x3fa01dc2, - 0x201f43, - 0x26a803, - 0x40201942, - 0x29a0c4, - 0x220a84, - 0x3a36c5, - 0x2d7205, - 0x22da06, - 0x22dd86, - 0x40608382, - 0x40a025c2, - 0x2eb445, - 0x2bb052, - 0x29fbc6, - 0x21ce83, - 0x2fd346, - 0x221d85, - 0x1611342, - 0x48e0d502, - 0x2ed8c3, - 0x212043, - 0x265603, - 0x49203382, - 0x379ac3, - 0x49602182, - 0x204a03, - 0x2f5d88, - 0x223b43, - 0x223b46, - 0x333a07, - 0x2d84c6, - 0x2d84cb, - 0x22ac47, - 0x392644, - 0x49e02602, - 0x3a6505, - 0x215a03, - 0x22fd83, - 0x31aa03, - 0x31aa06, - 0x2d038a, - 0x26d703, - 0x21d5c4, - 0x318146, - 0x3b1e46, - 0x4a2264c3, - 0x250907, - 0x29cf8d, - 0x39eb87, - 0x296ec5, - 0x237806, - 0x220f83, - 0x4bb65303, - 0x4be07a82, - 0x307604, - 0x21e3cc, - 0x35bb89, - 0x36f307, - 0x246045, - 0x255904, - 0x26ae08, - 0x274885, - 0x274a85, - 0x3612c9, - 0x335283, - 0x2a6b84, - 0x4c206d42, - 0x206d43, - 0x4c690942, - 0x295bc6, - 0x16b5482, - 0x4ca95782, - 0x2d8748, - 0x2b6d43, - 0x2f9f47, - 0x2d7785, - 0x295785, - 0x2f6c4b, - 0x2d1f06, - 0x2f6e46, - 0x2f9d06, - 0x226284, - 0x2d4ac6, - 0x2d5048, - 0x234b03, - 0x252ec3, - 0x252ec4, - 0x2d70c4, - 0x2d7487, - 0x2d8185, - 0x4ced82c2, - 0x4d206a42, - 0x209285, - 0x2990c4, - 0x2dac8b, - 0x2dbd48, - 0x2e6804, - 0x243702, - 0x4da80b82, - 0x2b0c03, - 0x2dc204, - 0x2dc4c5, - 0x272d87, - 0x2e06c4, - 0x22ab04, - 0x4de07442, - 0x359f49, - 0x2e1585, - 0x241f85, - 0x2e2105, - 0x4e21bd03, - 0x2e2f44, - 0x2e2f4b, - 0x2e3444, - 0x2e3ecb, - 0x2e48c5, - 0x21c94a, - 0x2e4f88, - 0x2e518a, - 0x2e5a03, - 0x2e5a0a, - 0x4e626402, - 0x4ea41542, - 0x265903, - 0x4eee7d82, - 0x2e7d83, - 0x4f35d142, - 0x4f7157c2, - 0x2e8784, - 0x21dd86, - 0x281205, - 0x2e91c3, - 0x29ef06, - 0x21d445, - 0x21e104, - 0x4fa08782, - 0x2ca784, - 0x2bf10a, - 0x386847, - 0x38ddc6, - 0x2d0847, - 0x21e503, - 0x253b48, - 0x25b5cb, - 0x300645, - 0x2b6e85, - 0x2b6e86, - 0x225904, - 0x335b88, - 0x20b4c3, - 0x20e204, - 0x20e207, - 0x280ec6, - 0x321746, - 0x29b68a, - 0x244fc4, - 0x244fca, - 0x2de886, - 0x2de887, - 0x253887, - 0x26f884, - 0x26f889, - 0x24b7c5, - 0x23a30b, - 0x26ddc3, - 0x20d0c3, - 0x21a943, - 0x231e84, - 0x4fe04b42, - 0x254186, - 0x2ab805, - 0x2b2dc5, - 0x3324c6, - 0x279384, - 0x502013c2, - 0x240b44, - 0x50607982, - 0x232984, - 0x227783, - 0x50a12082, - 0x349f83, - 0x24ae86, - 0x50e01bc2, - 0x30f108, - 0x224ec4, - 0x224ec6, - 0x305546, - 0x255f04, - 0x32fd05, - 0x3a8108, - 0x3a8607, - 0x3b0bc7, - 0x3b0bcf, - 0x28f286, - 0x210d03, - 0x210d04, - 0x2251c4, - 0x229103, - 0x224784, - 0x373e44, - 0x51226442, - 0x287303, - 0x390683, - 0x51617642, - 0x222a43, - 0x24f803, - 0x218e4a, - 0x23b5c7, - 0x3a568c, - 0x3a5946, - 0x230ac6, - 0x23a6c7, - 0x2302c7, - 0x23e289, - 0x21f144, - 0x23ea84, - 0x51a0a442, - 0x51e01402, - 0x29ba46, - 0x250704, - 0x376e86, - 0x230748, - 0x330cc4, - 0x264c86, - 0x2c9745, - 0x25f008, - 0x207503, - 0x266a45, - 0x269b43, - 0x242083, - 0x242084, - 0x26afc3, - 0x522e1202, - 0x52602482, - 0x26dc89, - 0x274985, - 0x283dc4, - 0x3614c5, - 0x210804, - 0x2ed107, - 0x33fac5, - 0x252dc4, - 0x252dc8, - 0x2d3486, - 0x2d5204, - 0x2d5208, - 0x2d5e87, - 0x52a015c2, - 0x2da0c4, - 0x2d3904, - 0x201d47, - 0x52e41384, - 0x22dc82, - 0x53201882, - 0x202b43, - 0x216b84, - 0x222903, - 0x222905, - 0x5362c082, - 0x2e9085, - 0x210fc2, - 0x376445, - 0x35ef05, - 0x53a168c2, - 0x2168c4, - 0x53e08d82, - 0x2c7b06, - 0x2ac106, - 0x268988, - 0x2b7b88, - 0x2e8b04, - 0x35e245, - 0x2f39c9, - 0x29f484, - 0x2d0344, - 0x2513c3, - 0x5420dfc5, - 0x374f07, - 0x2881c4, - 0x35a90d, - 0x35b202, - 0x3858c3, - 0x39a083, - 0x54601082, - 0x3886c5, - 0x31b447, - 0x20f0c4, - 0x20f0c7, - 0x293a49, - 0x2bf249, - 0x214687, - 0x24fa83, - 0x2b52c8, - 0x23dd09, - 0x2e9947, - 0x2e9cc5, - 0x2ea486, - 0x2eaac6, - 0x2eac45, - 0x248905, - 0x54a01282, - 0x228685, - 0x2b9988, - 0x2a79c6, - 0x3a1c87, - 0x2e4b04, - 0x2ab1c7, - 0x2edd06, - 0x54e00242, - 0x212f06, - 0x2f004a, - 0x2f1045, - 0x552d29c2, - 0x55638282, - 0x2da606, - 0x3574c8, - 0x55babf47, - 0x55e00602, - 0x20a503, - 0x3b0306, - 0x30aa04, - 0x3338c6, - 0x341746, - 0x3971ca, - 0x3a1e05, - 0x20d586, - 0x218743, - 0x218744, - 0x207282, - 0x2fe483, - 0x56253e82, - 0x2dd843, - 0x2f7904, - 0x2dca04, - 0x35760a, - 0x245483, - 0x283b08, - 0x36a40a, - 0x278447, - 0x2f4846, - 0x2c79c4, - 0x22abc2, - 0x200e42, - 0x56609202, - 0x244603, - 0x253647, - 0x29f1c7, - 0x38f98b, - 0x3643c4, - 0x349447, - 0x272e86, - 0x213807, - 0x2ad204, - 0x33bb85, - 0x2a96c5, - 0x56a10442, - 0x221a46, - 0x2259c3, - 0x226cc2, - 0x226cc6, - 0x56e0d942, - 0x57203e42, - 0x203e45, - 0x57624982, - 0x57a06ec2, - 0x358845, - 0x2c0f45, - 0x20d645, - 0x264183, - 0x340185, - 0x2d1fc7, - 0x2aa2c5, - 0x3219c5, - 0x38b604, - 0x379bc6, - 0x243c84, - 0x57e00cc2, - 0x276485, - 0x2a4887, - 0x377088, - 0x26a8c6, - 0x26a8cd, - 0x270789, - 0x270792, - 0x322045, - 0x326e03, - 0x58a019c2, - 0x2e6004, - 0x2022c3, - 0x35e145, - 0x2f2605, - 0x58e21e42, - 0x290f03, - 0x59242d42, - 0x59694082, - 0x59a18882, - 0x346e05, - 0x2a1003, - 0x397008, - 0x59e011c2, - 0x5a203282, - 0x29a086, - 0x27f30a, - 0x204983, - 0x256503, - 0x2f3c43, - 0x5ae06202, - 0x692033c2, - 0x69a04cc2, - 0x203dc2, - 0x38bd09, - 0x2bbcc4, - 0x2ae648, - 0x69ee9202, - 0x6a205882, - 0x2e4105, - 0x2355c8, - 0x247d88, - 0x2f334c, - 0x23a183, - 0x25d442, - 0x6a62d742, - 0x2c21c6, - 0x2f56c5, - 0x30f5c3, - 0x3903c6, - 0x2f5806, - 0x22fc43, - 0x2f6a03, - 0x2f6fc6, - 0x2f7d84, - 0x270186, - 0x2c6545, - 0x2f800a, - 0x23d184, - 0x2f8d84, - 0x34b94a, - 0x6aa6cd42, - 0x347a45, - 0x2fa44a, - 0x2fb885, - 0x2fc404, - 0x2fc506, - 0x2fc684, - 0x366dc6, - 0x6ae00282, - 0x38d706, - 0x38e7c5, - 0x204707, - 0x239f46, - 0x22d584, - 0x22d587, - 0x3277c6, - 0x212f45, - 0x2c6c87, - 0x39ae07, - 0x39ae0e, - 0x223ec6, - 0x22ee45, - 0x279a07, - 0x2deb83, - 0x2deb87, - 0x3a8a05, - 0x211204, - 0x2120c2, - 0x37a547, - 0x34aac4, - 0x2ae9c4, - 0x269bcb, - 0x2201c3, - 0x2c3a47, - 0x2201c4, - 0x2ce307, - 0x238943, - 0x32914d, - 0x388f08, - 0x252cc4, - 0x252cc5, - 0x2fca45, - 0x2fd003, - 0x6b224dc2, - 0x2fe443, - 0x2fea03, - 0x365c44, - 0x276e45, - 0x2193c7, - 0x2187c6, - 0x372f83, - 0x226e0b, - 0x29d34b, - 0x267c4b, - 0x276f4a, - 0x2a734b, - 0x2cae0b, - 0x2d2a0c, - 0x2d5711, - 0x33d90a, - 0x34e1cb, - 0x37bd0b, - 0x3ae28a, - 0x3b2eca, - 0x2ff60d, - 0x300d4e, - 0x301b4b, - 0x301e0a, - 0x302d51, - 0x30318a, - 0x30368b, - 0x303bce, - 0x30450c, - 0x30498b, - 0x304c4e, - 0x304fcc, - 0x3087ca, - 0x3098cc, - 0x6b709bca, - 0x30adc9, - 0x30c94a, - 0x30cbca, - 0x30ce4b, - 0x312f8e, - 0x313311, - 0x31bcc9, - 0x31bf0a, - 0x31cb0b, - 0x31e2ca, - 0x31ee56, - 0x32060b, - 0x321e0a, - 0x32220a, - 0x32424b, - 0x326889, - 0x329ac9, - 0x32ae0d, - 0x32c48b, - 0x32d60b, - 0x32dfcb, - 0x32e449, - 0x32ea8e, - 0x32efca, - 0x335e4a, - 0x33648a, - 0x336e4b, - 0x33768b, - 0x33794d, - 0x33904d, - 0x339c90, - 0x33a14b, - 0x33ac8c, - 0x33c3cb, - 0x33e00b, - 0x33f64b, - 0x34490b, - 0x34538f, - 0x34574b, - 0x34600a, - 0x346709, - 0x346b49, - 0x34808b, - 0x34834e, - 0x34bfcb, - 0x34cd8f, - 0x34ed8b, - 0x34f04b, - 0x34f30b, - 0x34f74a, - 0x353249, - 0x35624f, - 0x35ce4c, - 0x35d34c, - 0x35de0e, - 0x35e48f, - 0x35e84e, - 0x35fa90, - 0x35fe8f, - 0x3608ce, - 0x3617cc, - 0x361ad2, - 0x36b751, - 0x36bd0e, - 0x36c14e, + 0x46a00842, + 0x2e33c4, + 0x21db44, + 0x208285, + 0x30bd05, + 0x249406, + 0x249786, + 0x46e09282, + 0x47201002, + 0x3274c5, + 0x2c44d2, + 0x271ac6, + 0x248803, + 0x2a2486, + 0x248805, + 0x1610a02, + 0x4f611802, + 0x3522c3, + 0x211803, + 0x278203, + 0x4fa11f82, + 0x378ac3, + 0x4fe14602, + 0x200a83, + 0x2ceec8, + 0x24a843, + 0x24a846, + 0x3a1087, + 0x321b06, + 0x321b0b, + 0x2f7e07, + 0x392404, + 0x50603ec2, + 0x364805, + 0x50a04a83, + 0x239f83, + 0x326805, + 0x35b5c3, + 0x35b5c6, + 0x26334a, + 0x2774c3, + 0x23ad04, + 0x2c2806, + 0x20df46, + 0x50e00383, + 0x324b07, + 0x28bb8d, + 0x3adbc7, + 0x2a0685, + 0x250b46, + 0x22c0c3, + 0x52a15e43, + 0x52e04c82, + 0x3b2804, + 0x236d8c, + 0x245309, + 0x24bcc7, + 0x3724c5, + 0x268d84, + 0x27c1c8, + 0x27ed05, + 0x5328a405, + 0x377c09, + 0x256343, + 0x2acd44, + 0x53617902, + 0x21aac3, + 0x53a99f82, + 0x2a3fc6, + 0x16aa082, + 0x53e943c2, + 0x321d88, + 0x2c08c3, + 0x36de07, + 0x305105, + 0x2943c5, + 0x30860b, + 0x2e3906, + 0x308806, + 0x36dbc6, + 0x268f84, + 0x2e5686, + 0x2e5b48, + 0x23e943, + 0x23cf83, + 0x23cf84, + 0x2e6984, + 0x2e6e07, + 0x2e8745, + 0x542e8882, + 0x54606ec2, + 0x206ec5, + 0x2a4a84, + 0x2ebf8b, + 0x2ee248, + 0x2f7304, + 0x290602, + 0x54eb6042, + 0x38c543, + 0x2ee704, + 0x2ee9c5, + 0x2ef087, + 0x2f1404, + 0x2f7cc4, + 0x552054c2, + 0x35d209, + 0x2f2445, + 0x2716c5, + 0x2f3105, + 0x5561aa03, + 0x2f4244, + 0x2f424b, + 0x2f4684, + 0x2f4b0b, + 0x2f5505, + 0x21bc4a, + 0x2f5d08, + 0x2f5f0a, + 0x2f6783, + 0x2f678a, + 0x55a1b282, + 0x55e01202, + 0x26a103, + 0x562f7902, + 0x2f7903, + 0x5673aa42, + 0x56b21202, + 0x2f8104, + 0x21c2c6, + 0x325d05, + 0x2f8b43, + 0x32ff46, + 0x30c845, + 0x2e9784, + 0x56e00e02, + 0x2d7904, + 0x2c9c4a, + 0x2eb487, + 0x26b686, + 0x244007, + 0x236ec3, + 0x265488, + 0x28954b, + 0x386a45, + 0x235905, + 0x235906, + 0x370204, + 0x31d488, + 0x215a83, + 0x2b5984, + 0x36b507, + 0x307206, + 0x200e06, + 0x2a58ca, + 0x24e684, + 0x24e68a, + 0x57201946, + 0x201947, + 0x259547, + 0x279804, + 0x279809, + 0x2ba485, + 0x23b80b, + 0x2769c3, + 0x211c03, + 0x2a3f43, + 0x22ec44, + 0x57600682, + 0x259f06, + 0x2a7d05, + 0x2a26c5, + 0x2564c6, + 0x2531c4, + 0x57a01f82, + 0x245244, + 0x57e00d42, + 0x200d44, + 0x224303, + 0x58211842, + 0x3398c3, + 0x2478c6, + 0x58602602, + 0x2cfb88, + 0x223704, + 0x223706, + 0x375846, + 0x25bb44, + 0x204905, + 0x20c408, + 0x20c907, + 0x20d007, + 0x20d00f, + 0x298bc6, + 0x227843, + 0x227844, + 0x28be84, + 0x210e83, + 0x222c84, + 0x241104, + 0x58a36b82, + 0x28e183, + 0x241383, + 0x58e0c4c2, + 0x22a543, + 0x260883, + 0x213e8a, + 0x22bc87, + 0x241c8c, + 0x241f46, + 0x242406, + 0x246dc7, + 0x592ddb07, + 0x251a09, + 0x21cc04, + 0x252284, + 0x59609f42, + 0x59a01702, + 0x2a5c86, + 0x324904, + 0x2db5c6, + 0x2ab348, + 0x20eec4, + 0x2a3186, + 0x2d68c5, + 0x26ebc8, + 0x205583, + 0x272605, + 0x273583, + 0x2717c3, + 0x2717c4, + 0x273983, + 0x59edeec2, + 0x5a200b42, + 0x276889, + 0x27ec05, + 0x27ee04, + 0x281305, + 0x212504, + 0x2c1247, + 0x33d3c5, + 0x258a04, + 0x258a08, + 0x2dc3c6, + 0x2de404, + 0x2dfdc8, + 0x2e1a87, + 0x5a60e5c2, + 0x305304, + 0x210f44, + 0x2257c7, + 0x5aa53d84, + 0x249682, + 0x5ae01ac2, + 0x21b083, + 0x351144, + 0x242643, + 0x33d945, + 0x5b21de42, + 0x2ebb05, + 0x212bc2, + 0x381dc5, + 0x364305, + 0x5b60c842, + 0x3581c4, + 0x5ba06342, + 0x2a9146, + 0x2ac506, + 0x396208, + 0x2c3608, + 0x3075c4, + 0x2f8905, + 0x2fe809, + 0x2e10c4, + 0x263304, + 0x20aec3, + 0x5be20c45, + 0x2c7087, + 0x25e944, + 0x33a48d, + 0x33c282, + 0x33c283, + 0x355783, + 0x5c200202, + 0x388b45, + 0x26c747, + 0x2a7e04, + 0x32d807, + 0x29d849, + 0x2c9d89, + 0x248ac7, + 0x243843, + 0x27c008, + 0x2f33c9, + 0x2500c7, + 0x370145, + 0x385886, + 0x394246, + 0x3959c5, + 0x257345, + 0x5c603102, + 0x27eb05, + 0x2b8308, + 0x2c5386, + 0x2bcc07, + 0x2f5744, + 0x2ad207, + 0x2faf46, + 0x5ca2fb02, + 0x3713c6, + 0x2fd38a, + 0x2fde45, + 0x5cee4742, + 0x5d245702, + 0x30fbc6, + 0x2b25c8, + 0x5d68eb87, + 0x5da04602, + 0x20edc3, + 0x38c706, + 0x2246c4, + 0x3a0f46, + 0x262446, + 0x26bd0a, + 0x319a45, + 0x204446, + 0x218c83, + 0x218c84, + 0x205302, + 0x30c283, + 0x5de05dc2, + 0x2ce903, + 0x3734c4, + 0x2b2704, + 0x2b270a, + 0x229b03, + 0x28a7c8, + 0x229b0a, + 0x23a407, + 0x301446, + 0x2a9004, + 0x296e42, + 0x219542, + 0x5e206e42, + 0x24e1c3, + 0x259307, + 0x330207, + 0x38fd4b, + 0x28a2c4, + 0x34f9c7, + 0x2ef186, + 0x21c607, + 0x258004, + 0x23c445, + 0x2c17c5, + 0x5e620382, + 0x221a86, + 0x246043, + 0x24a2c2, + 0x24a2c6, + 0x5ea04802, + 0x5ee05bc2, + 0x3abd45, + 0x5f222e82, + 0x5f600a42, + 0x343745, + 0x38e6c5, + 0x204505, + 0x270ac3, + 0x390b05, + 0x2e39c7, + 0x309445, + 0x30a985, + 0x3a05c4, + 0x24c646, + 0x25c1c4, + 0x5fa03382, + 0x6060ce05, + 0x36f447, + 0x2db7c8, + 0x2a32c6, + 0x2a32cd, + 0x2a86c9, + 0x2a86d2, + 0x332705, + 0x33c843, + 0x60a044c2, + 0x2f6d84, + 0x204383, + 0x3623c5, + 0x2fed85, + 0x60e17082, + 0x3700c3, + 0x6124d082, + 0x616c3142, + 0x61a18dc2, + 0x364c05, + 0x329ec3, + 0x319888, + 0x61e02d82, + 0x62206902, + 0x2e3386, + 0x34398a, + 0x272243, + 0x25c083, + 0x2ed703, + 0x62e016c2, + 0x71211fc2, + 0x71a19682, + 0x206602, + 0x3711c9, + 0x2c6404, + 0x22fb48, + 0x71ef8b82, + 0x72202502, + 0x2f4d45, + 0x232ac8, + 0x2cf008, + 0x2fd54c, + 0x23bd83, + 0x267002, + 0x726019c2, + 0x2ccf46, + 0x3022c5, + 0x239103, + 0x383586, + 0x302406, + 0x21e2c3, + 0x304ec3, + 0x3055c6, + 0x306204, + 0x225d46, + 0x2d3b45, + 0x30648a, + 0x240544, + 0x307884, + 0x307a4a, + 0x72a037c2, + 0x234605, + 0x30898a, + 0x309a85, + 0x30a344, + 0x30a446, + 0x30a5c4, + 0x228306, + 0x72e39342, + 0x2eadc6, + 0x3a2445, + 0x26bb87, + 0x3a3c46, + 0x246fc4, + 0x2da807, + 0x311586, + 0x25b5c5, + 0x2d4287, + 0x39cd87, + 0x39cd8e, + 0x24abc6, + 0x382905, + 0x285407, + 0x201c43, + 0x201c47, + 0x3b3445, + 0x2108c4, + 0x211882, + 0x22afc7, + 0x31ab04, + 0x22fec4, + 0x24314b, + 0x21d283, + 0x288fc7, + 0x21d284, + 0x2ace07, + 0x2824c3, + 0x334b4d, + 0x389388, + 0x228e04, + 0x258905, + 0x30ac85, + 0x30b0c3, + 0x73201a82, + 0x30c243, + 0x30cf43, + 0x221c04, + 0x280785, + 0x223387, + 0x218d06, + 0x373203, + 0x24a40b, + 0x3aa70b, + 0x27928b, + 0x28088a, + 0x2ad8cb, + 0x2fc28b, + 0x2e478c, + 0x2e7291, + 0x32170a, + 0x34e48b, + 0x379d0b, + 0x3b048a, + 0x3b4cca, + 0x30ea4d, + 0x31038e, + 0x3109cb, + 0x310c8a, + 0x312191, + 0x3125ca, + 0x312acb, + 0x31300e, + 0x31398c, + 0x313fcb, + 0x31428e, + 0x31460c, + 0x315a4a, + 0x31694c, + 0x73716c4a, + 0x317449, + 0x31b60a, + 0x31b88a, + 0x31bb0b, + 0x31e9ce, + 0x31ed51, + 0x327a09, + 0x327c4a, + 0x32844b, + 0x32a30a, + 0x32ab96, + 0x32c8cb, + 0x32e00a, + 0x32e5ca, + 0x33048b, + 0x332189, + 0x3354c9, + 0x335a4d, + 0x3360cb, + 0x33734b, + 0x337d0b, + 0x3381c9, + 0x33880e, + 0x338f0a, + 0x33a24a, + 0x33a7ca, + 0x33af8b, + 0x33b7cb, + 0x33ba8d, + 0x33cecd, + 0x33da90, + 0x33df4b, + 0x33e54c, + 0x33ea4b, + 0x34118b, + 0x34290b, + 0x3463cb, + 0x346e4f, + 0x34720b, + 0x347d0a, + 0x348249, + 0x348609, + 0x34898b, + 0x348c4e, + 0x34b98b, + 0x34d04f, + 0x34ff0b, + 0x3501cb, + 0x35048b, + 0x35098a, + 0x355c09, + 0x35a20f, + 0x36128c, + 0x36170c, + 0x36208e, + 0x36388f, + 0x363c4e, + 0x3652d0, + 0x3656cf, + 0x365d4e, + 0x36650c, + 0x366812, + 0x36a511, + 0x36ad0e, + 0x36ba0e, + 0x36bf4e, + 0x36c2cf, 0x36c68e, - 0x36ca0f, - 0x36cdce, - 0x36d153, - 0x36d611, - 0x36da4e, - 0x36decc, - 0x36e2d3, - 0x36f650, - 0x36ff0c, - 0x37020c, - 0x3706cb, - 0x37168e, + 0x36ca13, + 0x36ced1, + 0x36d30e, + 0x36d78c, + 0x36e493, + 0x36fa90, + 0x37070c, + 0x370a0c, + 0x370ecb, + 0x37184e, 0x371f8b, - 0x3723cb, - 0x373b0c, - 0x37acca, - 0x37b50c, - 0x37b80c, - 0x37bb09, - 0x37cf0b, - 0x37d1c8, - 0x37d3c9, - 0x37d3cf, - 0x37ed0b, - 0x37fa0a, - 0x380fcc, - 0x382e49, - 0x383208, - 0x38374b, - 0x383e4b, - 0x384c8a, - 0x384f0b, - 0x38544c, - 0x385e08, - 0x38910b, - 0x38ba0b, - 0x38fc4b, - 0x391a4b, - 0x39a98b, - 0x39ac49, - 0x39b18d, - 0x3a004a, - 0x3a0f97, - 0x3a2898, - 0x3a5bc9, + 0x3727cb, + 0x37378c, + 0x37914a, + 0x37950c, + 0x37980c, + 0x379b09, + 0x37b7cb, + 0x37ba88, + 0x37bc89, + 0x37bc8f, + 0x37d5cb, + 0x37e44a, + 0x37fd4c, + 0x380e09, + 0x381b88, + 0x38214b, + 0x382c0b, + 0x38418a, + 0x38440b, + 0x38488c, + 0x385548, + 0x38958b, + 0x38c04b, + 0x39000b, + 0x39180b, + 0x39c90b, + 0x39cbc9, + 0x39d10d, + 0x3a264a, + 0x3a3597, + 0x3a3dd8, + 0x3a6309, 0x3a7b4b, - 0x3a9414, - 0x3a990b, - 0x3a9e8a, - 0x3aa30a, - 0x3aa58b, - 0x3ab590, - 0x3ab991, - 0x3ac48a, - 0x3ad88d, - 0x3adf8d, - 0x3b328b, - 0x3b4506, - 0x221b43, - 0x6bb99283, - 0x323dc6, - 0x28b6c5, - 0x2c55c7, - 0x33d7c6, - 0x1617982, - 0x2b0d49, - 0x29ed04, - 0x2cfe48, - 0x242743, - 0x2e5f47, - 0x230902, - 0x2acbc3, - 0x6be006c2, - 0x2c0586, - 0x2c17c4, - 0x307c84, - 0x236c43, - 0x236c45, - 0x6c6ff382, - 0x6caa8004, - 0x26f7c7, - 0x16ce2c2, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x2025c3, - 0x200882, - 0x880c8, - 0x216582, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x217643, - 0x317416, - 0x31a5d3, - 0x3492c9, - 0x3aad88, - 0x3a6389, - 0x2fa5c6, - 0x32c750, - 0x21d6d3, - 0x200b08, - 0x25aa47, - 0x274ec7, - 0x29cb4a, - 0x2f7989, - 0x3336c9, - 0x28a70b, - 0x325506, - 0x31cf8a, - 0x223586, - 0x29e903, - 0x28e085, - 0x217188, - 0x2c7bcd, - 0x357c4c, - 0x38e487, - 0x3025cd, - 0x3a8204, - 0x23214a, - 0x232a8a, - 0x232f4a, - 0x21d9c7, - 0x23cfc7, - 0x23f644, - 0x239506, - 0x3258c4, - 0x2ec848, - 0x25f709, - 0x2b83c6, - 0x2b83c8, - 0x2423cd, - 0x2bf489, - 0x378cc8, - 0x241f07, - 0x2fe78a, - 0x24a4c6, - 0x25a047, - 0x2cdac4, - 0x240d07, - 0x30130a, - 0x3397ce, - 0x255805, - 0x2fcd4b, - 0x277a09, - 0x223349, - 0x2a2087, - 0x358b0a, - 0x201c87, - 0x39fc89, - 0x358108, - 0x369c4b, - 0x2cf145, - 0x2e0e0a, - 0x2a37c9, - 0x30f54a, - 0x2c1dcb, - 0x240c0b, - 0x28a495, - 0x2d5d45, - 0x241f85, - 0x2e2f4a, - 0x215cca, - 0x310c47, - 0x220683, - 0x29b9c8, - 0x2cb14a, - 0x224ec6, - 0x23db49, - 0x25f008, - 0x2d5204, - 0x22fb09, - 0x2b7b88, - 0x32c247, - 0x276486, - 0x2a4887, - 0x28fd87, - 0x23f085, - 0x25564c, - 0x252cc5, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x216582, - 0x22d183, - 0x238483, - 0x2025c3, - 0x2264c3, - 0x22d183, - 0x238483, - 0x223b43, - 0x2264c3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x880c8, - 0x216582, - 0x201a42, - 0x233182, - 0x201982, - 0x204c02, - 0x293d42, - 0x462d183, - 0x2343c3, - 0x211cc3, - 0x21eb03, - 0x202243, - 0x211003, - 0x238483, - 0x2264c3, - 0x20b443, - 0x880c8, - 0x335d44, - 0x24f007, - 0x251fc3, - 0x231404, - 0x214bc3, - 0x282343, - 0x21eb03, - 0x16e747, - 0x200882, - 0x123ac3, - 0x5a16582, - 0x86a0d, - 0x233182, - 0x1604, - 0x201502, - 0x5e01508, - 0xe26c4, - 0x880c8, - 0x140de82, - 0x14fa2c6, - 0x230983, - 0x316403, - 0x662d183, - 0x232144, - 0x6a343c3, - 0x6e1eb03, - 0x2082c2, - 0x201604, - 0x238483, - 0x212ec3, - 0x202282, - 0x2264c3, - 0x21ed42, - 0x2e86c3, - 0x201bc2, - 0x29c743, - 0x22d743, - 0x204702, - 0x880c8, - 0x230983, - 0x212ec3, - 0x202282, - 0x2e86c3, - 0x201bc2, - 0x29c743, - 0x22d743, - 0x204702, - 0x2e86c3, - 0x201bc2, - 0x29c743, - 0x22d743, - 0x204702, - 0x22d183, - 0x323ac3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x202243, - 0x211003, - 0x212444, - 0x238483, - 0x2264c3, - 0x202002, - 0x21bd03, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x323ac3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x238483, - 0x2264c3, - 0x2e9cc5, - 0x221e42, - 0x200882, - 0x880c8, - 0x1462d48, - 0x21eb03, - 0x225b41, - 0x20fd41, - 0x20c401, - 0x20c041, - 0x226fc1, - 0x26f541, - 0x252041, - 0x225c41, - 0x2d5901, - 0x2ff8c1, + 0x3a9554, + 0x3a9a4b, + 0x3a9fca, + 0x3ab70a, + 0x3ab98b, + 0x3ad110, + 0x3ad511, + 0x3ae64a, + 0x3afa8d, + 0x3b018d, + 0x3b508b, + 0x3b5c46, + 0x221b83, + 0x73b76c03, + 0x37f706, + 0x292c85, + 0x396787, + 0x3215c6, + 0x1639f02, + 0x2b3809, + 0x32fd44, + 0x2e0bc8, + 0x24e103, + 0x2f6cc7, + 0x241b82, + 0x2b2c03, + 0x73e06c82, + 0x2cb006, + 0x2cc544, + 0x3b2e84, + 0x2616c3, + 0x2616c5, + 0x746c7282, + 0x74aae504, + 0x279747, + 0x1669e02, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x20abc3, + 0x204cc2, + 0x15f048, + 0x20d1c2, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x213e83, + 0x324156, + 0x325393, + 0x34f849, + 0x2957c8, + 0x364689, + 0x308b06, + 0x336f50, + 0x25c9d3, + 0x3072c8, + 0x344207, + 0x27e407, + 0x2475ca, + 0x373549, + 0x239789, + 0x29258b, + 0x367e06, + 0x314aca, + 0x2221c6, + 0x32f943, + 0x294985, + 0x22b888, + 0x2a920d, + 0x35ab0c, + 0x3a2107, + 0x379f8d, + 0x20c504, + 0x22ef0a, + 0x23064a, + 0x230b0a, + 0x20fe87, + 0x240387, + 0x243084, + 0x26cdc6, + 0x3aab84, + 0x2e26c8, + 0x2267c9, + 0x2b94c6, + 0x2b94c8, + 0x24c34d, + 0x2c9fc9, + 0x207d48, + 0x271647, + 0x2f8e8a, + 0x2539c6, + 0x2642c7, + 0x2c96c4, + 0x28e807, + 0x332eca, + 0x36f5ce, + 0x228fc5, + 0x28e70b, + 0x262089, + 0x2a8909, + 0x2129c7, + 0x29998a, + 0x225707, + 0x2ffd09, + 0x326b48, + 0x35dc4b, + 0x2e0405, + 0x38144a, + 0x223109, + 0x23908a, + 0x2ccb4b, + 0x383a0b, + 0x292315, + 0x2e6185, + 0x2716c5, + 0x2f424a, + 0x25980a, + 0x261e07, + 0x21d743, + 0x2a5c08, + 0x2d828a, + 0x223706, + 0x24ff09, + 0x26ebc8, + 0x2de404, + 0x242649, + 0x2c3608, + 0x2b1c07, + 0x20ce06, + 0x36f447, + 0x293cc7, + 0x242ac5, + 0x228e0c, + 0x258905, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x20d1c2, + 0x2d0783, + 0x204ac3, + 0x20abc3, + 0x200383, + 0x2d0783, + 0x204ac3, + 0x24a843, + 0x200383, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x15f048, + 0x20d1c2, + 0x2000c2, + 0x230d42, + 0x202f02, + 0x202382, + 0x261e82, + 0x46d0783, + 0x231b83, + 0x2135c3, + 0x332ec3, + 0x204303, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x201383, + 0x15f048, + 0x32df04, + 0x260087, + 0x263d43, + 0x37aa84, + 0x214543, + 0x212a43, + 0x332ec3, + 0x13ecc7, + 0x204cc2, + 0x168883, + 0x5a0d1c2, + 0x8d54d, + 0x8d88d, + 0x230d42, + 0x964c4, + 0x200382, + 0x5e963c8, + 0xf39c4, + 0x15f048, + 0x14020c2, + 0x1509cc6, + 0x20e443, + 0x26ae03, + 0x66d0783, + 0x22ef04, + 0x6a31b83, + 0x6f32ec3, + 0x20a3c2, + 0x2964c4, + 0x204ac3, + 0x2fc883, + 0x201882, + 0x200383, + 0x21c802, + 0x2f8043, + 0x202602, + 0x203f83, + 0x26ec83, + 0x206d02, + 0x15f048, + 0x20e443, + 0x2fc883, + 0x201882, + 0x2f8043, + 0x202602, + 0x203f83, + 0x26ec83, + 0x206d02, + 0x2f8043, + 0x202602, + 0x203f83, + 0x26ec83, + 0x206d02, + 0x2d0783, + 0x368883, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x204303, + 0x20fbc3, + 0x213184, + 0x204ac3, + 0x200383, + 0x210582, + 0x21aa03, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x368883, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x204ac3, + 0x200383, + 0x370145, + 0x217082, + 0x204cc2, + 0x15f048, + 0x1491b48, + 0x332ec3, + 0x2461c1, + 0x2096c1, + 0x202201, + 0x209441, + 0x24a5c1, + 0x27e081, + 0x24c0c1, + 0x2462c1, + 0x2e7481, + 0x30ed01, 0x200141, 0x200001, - 0x880c8, - 0x200481, - 0x200741, - 0x200081, - 0x200c81, - 0x2007c1, - 0x200901, - 0x200041, - 0x204281, - 0x2001c1, + 0x15f048, + 0x200701, + 0x200101, 0x2000c1, - 0x200341, - 0x200ac1, - 0x201501, - 0x2014c1, - 0x204101, - 0x200b81, + 0x201e41, + 0x200181, + 0x200941, + 0x200041, + 0x204ec1, + 0x200081, + 0x201481, + 0x200c01, + 0x2002c1, + 0x200381, + 0x200e81, + 0x21c2c1, + 0x2003c1, + 0x200201, 0x200241, 0x200a01, - 0x2002c1, - 0x200281, - 0x204701, - 0x20dec1, - 0x200781, - 0x200641, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x201502, - 0x2264c3, - 0x16e747, - 0x131ac7, - 0x1e1c6, - 0x1736ca, - 0x85c48, - 0x53188, - 0x53547, - 0x50d06, - 0xce6c5, - 0x51f05, - 0x161186, - 0x155646, - 0x224104, - 0x322707, - 0x880c8, - 0x22d684, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x21eb03, - 0x202243, - 0x211003, - 0x238483, - 0x2264c3, - 0x221e42, - 0x2be043, - 0x2f5003, - 0x20b283, - 0x202e02, - 0x248083, - 0x204803, - 0x206e83, + 0x2019c1, + 0x201a81, + 0x2005c1, + 0x2007c1, + 0x200cc1, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x200382, + 0x200383, + 0x13ecc7, + 0xfcc7, + 0x28b86, + 0x3dcca, + 0x8cc48, + 0x58dc8, + 0x59207, + 0x62a46, + 0xde185, + 0x63c85, + 0x177ac6, + 0x125886, + 0x24ae04, + 0x31cdc7, + 0x15f048, + 0x2da904, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x332ec3, + 0x204303, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x217082, + 0x2c8dc3, + 0x21fd43, + 0x200603, + 0x202942, + 0x251d43, + 0x205283, + 0x21e743, 0x200001, - 0x207043, - 0x26ff44, - 0x324dc3, - 0x30c683, - 0x21dec3, - 0x379b43, - 0xaa2d183, - 0x2374c4, - 0x21de83, - 0x232383, - 0x2343c3, - 0x234103, - 0x208143, - 0x2a3ec3, - 0x30c603, - 0x228103, - 0x212103, - 0x24c1c4, - 0x23aa82, - 0x252a43, - 0x2585c3, - 0x272bc3, - 0x250b43, - 0x24f8c3, - 0x21eb03, - 0x2db983, - 0x220883, - 0x201603, - 0x210483, - 0x2f2903, - 0xaefe5c3, - 0x385d43, - 0x200983, - 0x2348c3, - 0x211003, - 0x21e442, - 0x286fc3, - 0x238483, - 0x16025c3, - 0x217e83, - 0x21da43, - 0x29af43, - 0x2264c3, - 0x30e803, - 0x21bd03, - 0x2ad283, - 0x2f6a83, - 0x2e8883, - 0x21d445, - 0x215cc3, - 0x2e88c3, - 0x39c083, - 0x218744, - 0x25b343, - 0x22e8c3, - 0x277c03, - 0x20b443, - 0x221e42, - 0x23a183, - 0x2f9b84, - 0x2ae9c4, - 0x244843, - 0x880c8, - 0x882, - 0x1002, - 0x2e02, - 0x1482, - 0x2d42, - 0x4c2, - 0x44682, - 0x202, + 0x203e43, + 0x225b04, + 0x37f1c3, + 0x318cc3, + 0x21c403, + 0x360383, + 0xaad0783, + 0x23a184, + 0x21c3c3, + 0x22f143, + 0x231b83, + 0x2318c3, + 0x23a943, + 0x2a85c3, + 0x318c43, + 0x233ec3, + 0x201e43, + 0x253f84, + 0x2abc02, + 0x258683, + 0x25eb43, + 0x27bfc3, + 0x262883, + 0x201dc3, + 0x332ec3, + 0x208803, + 0x209e43, + 0x204143, + 0x210203, + 0x2ff083, + 0xae30043, + 0x2b1083, + 0x2113c3, + 0x22d603, + 0x20fbc3, + 0x226302, + 0x201683, + 0x204ac3, + 0x160abc3, + 0x27d643, + 0x20ff03, + 0x216ec3, + 0x200383, + 0x3b37c3, + 0x21aa03, + 0x241f03, + 0x304f43, + 0x2f8203, + 0x30c845, + 0x2202c3, + 0x2f8243, + 0x35ed83, + 0x218c84, + 0x265203, + 0x311883, + 0x2d8fc3, + 0x201383, + 0x217082, + 0x23bd83, + 0x308484, + 0x22fec4, + 0x22a843, + 0x15f048, + 0x4cc2, + 0x1442, + 0x2942, + 0x5ac2, + 0x2302, + 0x702, + 0x4e242, + 0x1c2, + 0x8a42, + 0xc02, + 0xf302, + 0xb602, + 0x73fc2, + 0x4c82, + 0x61e82, + 0x17902, + 0x3cf82, + 0x54c2, + 0x18b82, + 0xfc2, + 0x682, + 0x1bb82, 0x1f82, - 0x982, - 0x43742, - 0xe842, - 0xe02, - 0x7a82, - 0x93d42, - 0x6d42, - 0x26282, - 0x7442, - 0x1f882, - 0x8b02, - 0x4b42, - 0x1c882, - 0x13c2, - 0x17642, - 0x1402, - 0xdfc2, - 0x6ec2, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x216582, - 0x2264c3, - 0xc22d183, - 0x21eb03, - 0x211003, - 0x223b42, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x6c2, - 0x202f02, - 0x223f42, - 0x880c8, - 0x16582, - 0x235f82, - 0x203142, - 0x23e682, - 0x201642, - 0x208382, - 0x51f05, - 0x2029c2, - 0x202282, - 0x203382, - 0x205d02, - 0x206d42, - 0x385542, + 0xc4c2, + 0x1702, + 0x20c42, + 0xa42, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x20d1c2, + 0x200383, + 0xc2d0783, + 0x332ec3, + 0x20fbc3, + 0x20dc42, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x6c82, + 0x2031c2, + 0x24ac42, + 0x15f048, + 0xd1c2, + 0x233482, + 0x208842, + 0x22f942, + 0x204182, + 0x209282, + 0x63c85, + 0x204702, 0x201882, - 0x227642, - 0x16e747, - 0x119d4d, - 0xeafc9, - 0x47b8b, - 0xd1e88, - 0x13bc89, - 0x21eb03, - 0x880c8, - 0x880c8, - 0x53e06, - 0x200882, - 0x224104, - 0x216582, - 0x22d183, - 0x201a42, - 0x2343c3, - 0x201f82, - 0x22d684, - 0x202243, - 0x209782, - 0x238483, - 0x201502, - 0x2264c3, - 0x241f86, - 0x30d40f, - 0x6fef43, - 0x880c8, - 0x216582, - 0x211cc3, - 0x21eb03, - 0x211003, - 0x1568ecb, - 0x16e747, - 0x216582, - 0x22d183, - 0x21eb03, - 0x238483, - 0x200882, - 0x201102, - 0x2093c2, - 0xfa2d183, - 0x23e4c2, - 0x2343c3, - 0x2475c2, - 0x227982, - 0x21eb03, - 0x21c2c2, - 0x301dc2, - 0x2a7fc2, - 0x201142, - 0x289f82, - 0x206982, + 0x211f82, + 0x2034c2, + 0x217902, + 0x384982, + 0x201ac2, + 0x245742, + 0x13ecc7, + 0x169a8d, + 0xde209, + 0x56bcb, + 0xe3888, + 0x55109, + 0x332ec3, + 0x15f048, + 0x15f048, + 0x59b46, + 0x204cc2, + 0x24ae04, + 0x20d1c2, + 0x2d0783, + 0x2000c2, + 0x231b83, + 0x208a42, + 0x2da904, + 0x204303, + 0x2513c2, + 0x204ac3, + 0x200382, + 0x200383, + 0x2716c6, + 0x31c0cf, + 0x70d8c3, + 0x15f048, + 0x20d1c2, + 0x2135c3, + 0x332ec3, + 0x20fbc3, + 0x155afcb, + 0xde548, + 0x14ff507, + 0x13ecc7, + 0x20d1c2, + 0x2d0783, + 0x332ec3, + 0x204ac3, + 0x204cc2, 0x200902, - 0x205e82, - 0x26a242, - 0x204d42, - 0x2ad802, - 0x230cc2, - 0x225a02, - 0x228f02, - 0x211003, - 0x203042, - 0x238483, - 0x2425c2, - 0x267c02, - 0x2264c3, - 0x248102, - 0x217642, - 0x20a442, - 0x202482, - 0x2168c2, - 0x2d29c2, - 0x210442, - 0x242d42, - 0x221bc2, - 0x301e0a, - 0x34600a, - 0x38074a, - 0x3b4682, - 0x20d802, - 0x23c282, - 0xff49009, - 0x103a418a, - 0x1042fe87, - 0xc002, - 0x1a418a, - 0x245dc4, - 0x10e2d183, - 0x2343c3, - 0x247344, - 0x21eb03, - 0x201604, - 0x202243, - 0x211003, - 0x238483, - 0x2025c3, - 0x2264c3, - 0x215cc3, - 0x223ec3, - 0x880c8, - 0x1450c84, - 0x50505, - 0x4e80a, - 0x109842, - 0x18b406, - 0x162d51, - 0x11749009, - 0x163187, - 0x4802, - 0x1aa80a, - 0xdb7c7, - 0x880c8, - 0xfd948, - 0xe707, - 0x1281c44b, - 0x15802, - 0x1a6707, - 0x1b1a4a, - 0x10728f, - 0x131b4f, - 0x1dec2, - 0x16582, - 0xa3e08, - 0xea70a, - 0x167408, + 0x207f42, + 0xfad0783, + 0x2416c2, + 0x231b83, + 0x2101c2, + 0x22ad02, + 0x332ec3, + 0x2630c2, + 0x255302, + 0x2ae4c2, + 0x203742, + 0x291e02, + 0x209902, + 0x200b82, + 0x274842, + 0x258142, + 0x251b02, + 0x2b36c2, + 0x242602, + 0x246082, + 0x263c42, + 0x20fbc3, + 0x205f02, + 0x204ac3, + 0x231302, + 0x27de02, + 0x200383, + 0x251dc2, + 0x20c4c2, + 0x209f42, + 0x200b42, + 0x20c842, + 0x2e4742, + 0x220382, + 0x24d082, + 0x234f42, + 0x310c8a, + 0x347d0a, + 0x37e80a, + 0x3b5dc2, + 0x2046c2, + 0x204e82, + 0xff4f589, + 0x10324d0a, + 0x15926c7, + 0x1410c43, + 0x243d0, + 0x9402, + 0x24fe44, + 0x10ad0783, + 0x231b83, + 0x251304, + 0x332ec3, + 0x2964c4, + 0x204303, + 0x20fbc3, + 0x204ac3, + 0x20abc3, + 0x200383, + 0x2202c3, + 0x24abc3, + 0x15f048, + 0x14629c4, + 0x614c5, + 0x5f88a, + 0x1168c2, + 0x1a03c6, + 0x102d11, + 0x1134f589, + 0x61548, + 0x82a08, + 0x5e887, 0xf82, - 0x10700f, - 0x124e4b, - 0x2988, - 0x16e847, - 0x16a8a, - 0xae14b, - 0x112089, - 0x173507, - 0xf424c, - 0x10ec87, - 0xd060a, - 0x132d48, - 0x8e28e, - 0x553ce, - 0xdb60b, - 0x110d8b, - 0xead0b, - 0x1e1c9, - 0x1fb8b, - 0x2398d, - 0x260cb, - 0x2708d, - 0x2c90d, - 0x2ec8a, - 0xae80b, - 0xc6fcb, - 0xe82c5, - 0x10a710, - 0x8128f, - 0xb74f, - 0x2a24d, - 0x6fd50, - 0xd82, - 0x12fa2488, - 0x131948, - 0x132e4bc5, - 0x4668b, - 0x52088, - 0x110f4a, - 0x58d89, - 0x60587, - 0x608c7, - 0x60a87, - 0x611c7, - 0x629c7, - 0x62f47, - 0x636c7, - 0x63d47, - 0x64307, - 0x644c7, - 0x66087, - 0x66247, - 0x66407, - 0x665c7, - 0x668c7, - 0x66e07, - 0x67a47, - 0x67f07, - 0x68707, - 0x69207, - 0x693c7, - 0x699c7, - 0x69e87, - 0x6a087, - 0x6a347, - 0x6a507, - 0x6a6c7, - 0x6ac07, - 0x6b4c7, - 0x6bf87, - 0x6c687, - 0x6c947, - 0x6ce07, - 0x6cfc7, - 0x6d347, - 0x6e3c7, - 0x6ea07, - 0x6ee07, - 0x6efc7, - 0x6f187, - 0x6f5c7, - 0x70307, - 0x70607, - 0x70c07, - 0x70dc7, - 0x71147, - 0x71587, - 0xd382, - 0x33d8a, - 0xf9dc7, - 0x134c87cb, - 0x14c87d6, - 0x18351, - 0xdfb8a, - 0xa3c8a, - 0x53e06, - 0xc114b, - 0xb2c2, - 0x31ad1, - 0x959c9, - 0x90ac9, - 0x5e82, - 0x9c34a, - 0xa5449, - 0xa5c8f, - 0xa688e, - 0xa7188, - 0xf1c2, - 0x169a89, - 0x8498e, - 0xac64c, - 0xd400f, - 0x194a8e, - 0x1098c, - 0x15309, - 0x16451, - 0x19a48, - 0x2bd52, - 0x2e5cd, - 0x393cd, - 0x13ff8b, - 0x179d95, - 0x33c49, - 0x5488a, - 0x58749, - 0x5fe90, - 0x60f0b, - 0x6e54f, - 0x71d0b, - 0x756cc, - 0x787d0, - 0x8660a, - 0x86e8d, - 0x14ea0e, - 0x18004a, - 0x8c7cc, - 0x8fa54, - 0x95651, - 0x98f8b, - 0x9b54f, - 0xab6cd, - 0xabfce, - 0x12c10c, - 0x15710c, - 0xdc8cb, - 0xeef8e, - 0xfb250, - 0x10938b, - 0x11270d, - 0x15f28f, - 0xb504c, - 0xb824e, - 0xb8a51, - 0xba84c, - 0x1362c7, - 0x11c60d, - 0xbe94c, - 0xc4d90, - 0xd35cd, - 0xe7847, - 0xec4d0, - 0xf0688, - 0xf124b, - 0x17318f, - 0x2b848, - 0xdfd8d, - 0x1763d0, - 0x13aaf9c6, - 0xb0bc3, - 0x9682, - 0x2cd09, - 0x551ca, - 0xf9bc6, - 0x13cd5389, - 0x124c3, - 0x109f11, - 0x9f89, - 0xcd5c7, - 0x10710b, - 0xd2d10, - 0xd31cc, - 0xd47c5, - 0x11ab08, - 0x19be4a, - 0x122b07, - 0x25c2, - 0x5160a, - 0x1694c9, - 0xa62ca, - 0x1abe8f, - 0x4084b, - 0x10760c, - 0x1078d2, - 0xb5485, - 0x15d98a, - 0x142e1fc5, - 0x19900c, - 0x1157c3, - 0x185542, - 0xe8e4a, - 0x108548, - 0x163407, - 0x4b42, - 0x7982, - 0x1bc2, - 0x129e10, - 0x1402, - 0x3074f, - 0x161186, - 0x1113ce, - 0xe3a4b, - 0x180248, - 0xc9a89, - 0x17e4d2, - 0x178b8d, - 0x45e88, - 0x47a49, - 0x485cd, - 0x4a189, - 0x4a64b, - 0x4ac08, - 0x4e648, - 0x53f48, - 0x559c9, - 0x55bca, - 0x57f4c, - 0xe31ca, - 0xf6ac7, - 0xdfcd, - 0xeb88b, - 0x9eb0c, - 0x18b610, - 0x3282, - 0xc570d, - 0x6202, - 0x33c2, - 0xf6a0a, - 0xdfa8a, - 0xe5e4b, - 0xc718c, - 0xfd6ce, - 0x165d0d, - 0xf3dc8, - 0x6c2, - 0x11a0c0ce, - 0x11c2fe87, - 0x121aa0c9, - 0x11583, - 0x127117cc, - 0xc002, - 0x50111, - 0xc011, - 0xa1091, - 0x81ed1, - 0x11170f, - 0x11dfcc, - 0x121acd, - 0x123f0d, - 0x142615, - 0x14b18c, - 0x159590, - 0xfa8c, - 0x5274c, - 0x47109, - 0xc002, - 0x501ce, - 0xc0ce, - 0xa114e, - 0x81f8e, - 0x1117cc, - 0x11e089, - 0x14b249, - 0x14280d, - 0xfb49, - 0x52809, - 0x1267c3, - 0x940c3, - 0xc002, - 0x162d45, - 0x1aa804, - 0xdf704, - 0x127244, - 0x17f904, - 0x17c084, - 0x163184, - 0x144bdc3, - 0x140de83, - 0x19fd04, - 0x1573dc3, - 0xd82, - 0x165d03, - 0x200882, - 0x216582, - 0x201a42, - 0x206ac2, - 0x201f82, - 0x201502, - 0x201bc2, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201603, - 0x238483, - 0x2264c3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x238483, - 0x2264c3, - 0x1d003, - 0x21eb03, - 0x200882, - 0x323ac3, - 0x15e2d183, - 0x330d47, - 0x21eb03, - 0x332683, - 0x212444, - 0x238483, - 0x2264c3, - 0x24690a, - 0x241f85, - 0x21bd03, - 0x203e42, - 0x880c8, - 0x880c8, - 0x16582, - 0x113682, - 0x1a6845, - 0x880c8, - 0x2d183, - 0x77947, - 0x1161cf, - 0xf9c44, - 0x11220a, - 0xac287, - 0xf78a, - 0x93e8a, - 0xa660a, - 0xf9bc6, - 0x27ca, - 0xccd, - 0x123ac3, - 0x880c8, - 0x16582, - 0x47344, - 0x67683, - 0xe9cc5, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x204803, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x290c83, - 0x223ec3, - 0x204803, - 0x224104, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x232dc3, - 0x22d183, - 0x2343c3, - 0x206ac3, - 0x211cc3, - 0x21eb03, - 0x201604, - 0x36b683, - 0x2348c3, - 0x211003, - 0x238483, - 0x2264c3, - 0x21bd03, - 0x3b0343, - 0x1822d183, - 0x2343c3, - 0x244d43, - 0x21eb03, - 0x275803, - 0x2348c3, - 0x2264c3, - 0x207443, - 0x27f5c4, - 0x880c8, - 0x18a2d183, - 0x2343c3, - 0x2a7243, - 0x21eb03, - 0x211003, - 0x212444, - 0x238483, - 0x2264c3, - 0x220703, - 0x880c8, - 0x1922d183, - 0x2343c3, - 0x211cc3, - 0x2025c3, - 0x2264c3, - 0x880c8, - 0x142fe87, - 0x323ac3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x212444, - 0x238483, - 0x2264c3, - 0x16e747, - 0x176c84, - 0x1462d48, - 0xa7dcd, - 0x3256c5, - 0x880c8, - 0x742, - 0x35bc3, - 0xe6786, - 0x307e48, - 0x3afd07, - 0x224104, - 0x355346, - 0x359446, - 0x880c8, - 0x301043, - 0x20b149, - 0x2b46d5, - 0xb46df, - 0x22d183, - 0x32f8d2, - 0xfea86, - 0x13af45, - 0x110f4a, - 0x58d89, - 0x32f68f, - 0x22d684, - 0x331145, - 0x2f26d0, - 0x3aaf87, - 0x2025c3, - 0x32d208, - 0x2aeeca, - 0x2014c4, - 0x2e1a03, - 0x241f86, - 0x203e42, - 0x38660b, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x2e6a83, - 0x216582, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x332683, - 0x203043, - 0x2264c3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x238483, - 0x2264c3, - 0x200882, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x224104, - 0x22d183, - 0x2343c3, - 0x307b04, - 0x238483, - 0x2264c3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x220883, - 0x211003, - 0x238483, - 0x2264c3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x251283, - 0x2efc3, - 0x132683, - 0x238483, - 0x2264c3, - 0x301e0a, - 0x31ec09, - 0x33e6cb, - 0x33ed4a, - 0x34600a, - 0x3543cb, - 0x372d8a, - 0x37acca, - 0x38074a, - 0x3809cb, - 0x39bb89, - 0x39d94a, - 0x39e38b, - 0x3a9bcb, - 0x3b2c8a, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x211003, - 0x238483, - 0x2264c3, - 0x37549, - 0x880c8, - 0x22d183, - 0x260584, - 0x214a02, - 0x212444, - 0x226bc5, - 0x204803, - 0x224104, - 0x22d183, - 0x2374c4, - 0x2343c3, - 0x247344, - 0x22d684, - 0x201604, - 0x2348c3, - 0x238483, - 0x2264c3, - 0x28fb85, - 0x232dc3, - 0x21bd03, - 0x25b743, - 0x252dc4, - 0x250bc4, - 0x26dfc5, - 0x880c8, - 0x2f88c4, - 0x208606, - 0x283c04, - 0x216582, - 0x360307, - 0x249c07, - 0x2455c4, - 0x257a05, - 0x2d37c5, - 0x2a9c05, - 0x201604, - 0x318348, - 0x36ed86, - 0x2dbb08, - 0x236f45, - 0x2cf145, - 0x240584, - 0x2264c3, - 0x2e26c4, - 0x353586, - 0x242083, - 0x252dc4, - 0x262c45, - 0x2cfbc4, - 0x365b84, - 0x203e42, - 0x245206, - 0x392446, - 0x2f56c5, - 0x200882, - 0x323ac3, - 0x1f216582, - 0x2358c4, - 0x201f82, - 0x211003, - 0x209f82, - 0x238483, - 0x201502, - 0x217643, - 0x223ec3, - 0x880c8, - 0x880c8, - 0x21eb03, - 0x200882, - 0x1fe16582, - 0x21eb03, - 0x266383, - 0x36b683, - 0x31f984, - 0x238483, - 0x2264c3, - 0x880c8, - 0x200882, - 0x20616582, - 0x22d183, - 0x238483, - 0x2264c3, - 0x4b42, - 0x2019c2, - 0x221e42, - 0x332683, - 0x2db083, - 0x200882, - 0x880c8, - 0x16e747, - 0x216582, - 0x2343c3, - 0x247344, - 0x208f43, - 0x21eb03, - 0x220883, - 0x211003, - 0x238483, - 0x21ab43, - 0x2264c3, - 0x220683, - 0x1244d3, - 0x12f214, - 0x16e747, - 0x15686, - 0x1e1c6, - 0x52fc7, - 0x9f049, - 0x2654a, - 0x85b0d, - 0x119a4c, - 0x29e8a, - 0x51f05, - 0x18bec8, - 0x161186, - 0x155646, - 0x200d82, - 0xde40c, - 0x1aa9c7, - 0x24d11, - 0x22d183, - 0xd0585, - 0x1b4284, - 0x18346, - 0x19f046, - 0x8a24a, - 0xacec3, - 0x9f005, - 0xce83, - 0xc120c, - 0xe5408, - 0x1ad408, - 0xa0288, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x200882, - 0x216582, - 0x21eb03, - 0x2082c2, - 0x238483, - 0x2264c3, - 0x217643, - 0x35e48f, - 0x35e84e, - 0x880c8, - 0x22d183, - 0x429c7, - 0x2343c3, - 0x21eb03, - 0x202243, - 0x238483, - 0x2264c3, - 0x220003, - 0x36b047, - 0x204642, - 0x2a9849, - 0x201002, - 0x32a10b, - 0x28c14a, - 0x2a4209, - 0x201902, - 0x250e06, - 0x248f95, - 0x32a255, - 0x24de93, - 0x32a7d3, - 0x2056c2, - 0x214605, - 0x27ff4c, - 0x219e0b, - 0x269005, - 0x201482, - 0x2040c2, - 0x377506, - 0x204802, - 0x24eb06, - 0x332ecd, - 0x36288c, - 0x30a784, - 0x2009c2, - 0x219f02, - 0x22c108, - 0x202d42, - 0x29d5c6, - 0x3345c4, - 0x249155, - 0x24e013, - 0x20a583, - 0x34868a, - 0x30e547, - 0x2e6089, - 0x20f507, - 0x24f882, + 0x19a18a, + 0x2ac47, + 0x15f048, + 0x10b708, + 0xbac7, + 0x16c1b74b, + 0x1082, + 0x15de87, + 0xdb4a, + 0x5e58f, + 0xfd4f, + 0x1c402, + 0xd1c2, + 0xa8508, + 0x185b0a, + 0x1681c8, + 0x3b02, + 0x5e30f, + 0xa3d4b, + 0x1672c8, + 0x13edc7, + 0x15104a, + 0x2f64b, + 0x11dd09, + 0x150f47, + 0x100c4c, + 0x1b3c47, + 0x18d18a, + 0x94b88, + 0x195a8e, + 0x28b8e, + 0x2aa8b, + 0x2b2cb, + 0x2d3cb, + 0x2e209, + 0x3558b, + 0x4a68d, + 0xe984b, + 0xec8cd, + 0xecc4d, + 0x18274a, + 0x2fd0b, + 0x3688b, + 0x42305, + 0x14243d0, + 0x125d8f, + 0x1264cf, + 0x48dcd, + 0x25910, + 0x1742, + 0x17203988, + 0xfb48, + 0x176f4745, + 0x505cb, + 0x117050, + 0x57488, + 0x2b48a, + 0x5f4c9, + 0x695c7, + 0x69907, + 0x69ac7, + 0x6c287, + 0x6d307, + 0x6dd07, + 0x6e487, + 0x6e8c7, + 0x6f487, + 0x6f787, + 0x6ffc7, + 0x70187, + 0x70347, + 0x70507, + 0x70807, + 0x70c47, + 0x718c7, + 0x71d87, + 0x729c7, + 0x72f07, + 0x730c7, + 0x73807, + 0x73e87, + 0x74087, + 0x74347, + 0x74507, + 0x746c7, + 0x75047, + 0x754c7, + 0x75987, + 0x76147, + 0x76407, + 0x76bc7, + 0x76d87, + 0x77107, + 0x77d07, + 0x78987, + 0x78d87, + 0x78f47, + 0x79107, + 0x79547, + 0x7a307, + 0x7a607, + 0x7a907, + 0x7aac7, + 0x7ae47, + 0x7b387, + 0xa9c2, + 0x4c94a, + 0x16dc87, + 0x178d528b, + 0x14d5296, + 0x19151, + 0xf080a, + 0xa838a, + 0x59b46, + 0xd2acb, + 0x642, + 0x2e891, + 0x94609, + 0x9a109, + 0x74842, + 0xa4f4a, + 0xaa689, + 0xab24f, + 0xaca4e, + 0xad708, + 0x18d82, + 0x15da89, + 0x18b88e, + 0xfd08c, + 0xf254f, + 0x146ce, + 0x23b4c, + 0x2ccc9, + 0x2db51, + 0x465c8, + 0x482d2, + 0x49fcd, + 0x82bcd, + 0x19090b, + 0x3d2d5, + 0x4c809, + 0x4ec0a, + 0x4f489, + 0x5ecd0, + 0x72c4b, + 0x79f4f, + 0x7c48b, + 0x8340c, + 0x84610, + 0x8894a, + 0x8dd0d, + 0x16618e, + 0x1ae00a, + 0x8f7cc, + 0x93994, + 0x94291, + 0xa494b, + 0xa578f, + 0xa7bcd, + 0xac3ce, + 0xb1acc, + 0xb220c, + 0xdc90b, + 0xdcc0e, + 0x122110, + 0x11640b, + 0x17a64d, + 0x1af38f, + 0xb798c, + 0xb934e, + 0xbb311, + 0xc3ccc, + 0xc5a47, + 0x15e58d, + 0x12b50c, + 0x14be50, + 0xd1b4d, + 0xd8e47, + 0xe2350, + 0xfa008, + 0xfb08b, + 0x150bcf, + 0x17de88, + 0xf0a0d, + 0x181d50, + 0x17fb1846, + 0xb6003, + 0x1b02, + 0xd0309, + 0x5ac0a, + 0x1084c6, + 0x180dff49, + 0x13203, + 0xdab91, + 0xdafc9, + 0xdc047, + 0x5e40b, + 0xe4a90, + 0xe4f4c, + 0xe5385, + 0x126cc8, + 0x19dc0a, + 0x129607, + 0x1002, + 0x12464a, + 0x25e49, + 0x3a20a, + 0x8eacf, + 0x44f4b, + 0x1b280c, + 0x1b2ad2, + 0xaa085, + 0x2124a, + 0x186f2fc5, + 0x17698c, + 0x121203, + 0x184982, + 0xf86ca, + 0x96b88, + 0xeca88, + 0x14a587, + 0xd42, + 0x2602, + 0x3f390, + 0x1702, + 0x1aa2cf, + 0x177ac6, + 0x301ce, + 0xe7fcb, + 0x1ae208, + 0xd6c09, + 0x17cd92, + 0x7c0d, + 0x56608, + 0x56a89, + 0x5700d, + 0x59c89, + 0x5a28b, + 0x5b088, + 0x5f6c8, + 0x68bc8, + 0x68e49, + 0x6904a, + 0x6c8cc, + 0xe5d0a, + 0x104f87, + 0x16b2cd, + 0xf910b, + 0x12fb4c, + 0x1a05d0, + 0x6902, + 0x1968cd, + 0x16c2, + 0x11fc2, + 0x104eca, + 0xf070a, + 0xf6bcb, + 0x36a4c, + 0x10b48e, + 0x21ccd, + 0x1ab488, + 0x6c82, + 0x1166118e, + 0x11f6a18e, + 0x1266c00a, + 0x12ed0a0e, + 0x137156ce, + 0x13f2a00c, + 0x15926c7, + 0x15926c9, + 0x1410c43, + 0x14731e8c, + 0x14f3a009, + 0x1409402, + 0x610d1, + 0x16a0d1, + 0x6bf4d, + 0xd0951, + 0x11b1d1, + 0x129f4f, + 0x131dcf, + 0x139f4c, + 0x14a94d, + 0x18d555, + 0x1ace0c, + 0x1b41cc, + 0x1b5850, + 0x940c, + 0x5838c, + 0xedc19, + 0x1a6719, + 0x115419, + 0x15c754, + 0x17f854, + 0x198594, + 0x19ae14, + 0x1a9054, + 0x1577fb09, + 0x15d98849, + 0x167b4289, + 0x11b6b089, + 0x9402, + 0x1236b089, + 0x9402, + 0xedc0a, + 0x9402, + 0x12b6b089, + 0x9402, + 0xedc0a, + 0x9402, + 0x1336b089, + 0x9402, + 0x13b6b089, + 0x9402, + 0x1436b089, + 0x9402, + 0xedc0a, + 0x9402, + 0x14b6b089, + 0x9402, + 0xedc0a, + 0x9402, + 0x1536b089, + 0x9402, + 0x15b6b089, + 0x9402, + 0x1636b089, + 0x9402, + 0x16b6b089, + 0x9402, + 0xedc0a, + 0x9402, + 0x102d05, + 0x19a184, + 0x11d644, + 0x1a4884, + 0xbfc04, + 0x2144, + 0x5e884, + 0x1482283, + 0x1420183, + 0xffd84, + 0x1542b83, + 0x1742, + 0x21cc3, + 0x204cc2, + 0x20d1c2, + 0x2000c2, + 0x2041c2, + 0x208a42, + 0x200382, + 0x202602, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204143, + 0x204ac3, + 0x200383, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x204ac3, + 0x200383, + 0x3b943, + 0x332ec3, + 0x204cc2, + 0x368883, + 0x1a2d0783, + 0x20ef47, + 0x332ec3, + 0x205d83, + 0x213184, + 0x204ac3, + 0x200383, + 0x25084a, + 0x2716c5, + 0x21aa03, + 0x205bc2, + 0x15f048, + 0x15f048, + 0xd1c2, + 0x11f0c2, + 0x15dfc5, + 0x15f048, + 0xd0783, + 0x1ae3db07, + 0xcfd46, + 0x1b1acd05, + 0xcfe07, + 0xa54a, + 0xa408, + 0xb747, + 0x5f2c8, + 0x18c407, + 0xed30f, + 0x3ab07, + 0x165bc6, + 0x117050, + 0x122f0f, + 0x108544, + 0x1b4cfece, + 0xafd0c, + 0x11de8a, + 0xac687, + 0x12d54a, + 0x60989, + 0xc2f4a, + 0x77a8a, + 0x1084c6, + 0xac74a, + 0x11a58a, + 0x154009, + 0xda448, + 0xda746, + 0xde74d, + 0xb9905, + 0x5a107, + 0x16df94, + 0xfe58b, + 0x16710a, + 0xa8bcd, + 0x28b83, + 0x28b83, + 0x28b86, + 0x28b83, + 0x168883, + 0x15f048, + 0xd1c2, + 0x51304, + 0x8cdc3, + 0x170145, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x205283, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x29a2c3, + 0x24abc3, + 0x205283, + 0x24ae04, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x209103, + 0x2d0783, + 0x231b83, + 0x2041c3, + 0x2135c3, + 0x332ec3, + 0x2964c4, + 0x23a0c3, + 0x22d603, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x21aa03, + 0x38c743, + 0x1d2d0783, + 0x231b83, + 0x2c3ec3, + 0x332ec3, + 0x2075c3, + 0x22d603, + 0x200383, + 0x2054c3, + 0x343c44, + 0x15f048, + 0x1dad0783, + 0x231b83, + 0x2ad7c3, + 0x332ec3, + 0x20fbc3, + 0x213184, + 0x204ac3, + 0x200383, + 0x21d7c3, + 0x15f048, + 0x1e2d0783, + 0x231b83, + 0x2135c3, + 0x20abc3, + 0x200383, + 0x15f048, + 0x15926c7, + 0x368883, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x213184, + 0x204ac3, + 0x200383, + 0x13ecc7, + 0x16e1cb, + 0xdb3c4, + 0xb9905, + 0x1491b48, + 0xae2cd, + 0x1f68a405, + 0x192c4, + 0x1a5c3, + 0x367fc5, + 0x15f048, + 0x1d202, + 0x2803, + 0xf7286, + 0x32f448, + 0x304507, + 0x24ae04, + 0x3b3006, + 0x3b5706, + 0x15f048, + 0x310683, + 0x2384c9, + 0x2bdad5, + 0xbdadf, + 0x2d0783, + 0x39a9d2, + 0xf5806, + 0x10cfc5, + 0x2b48a, + 0x5f4c9, + 0x39a78f, + 0x2da904, + 0x20f345, + 0x2fee50, + 0x2959c7, + 0x20abc3, + 0x2842c8, + 0x1257c6, + 0x2b400a, + 0x200e84, + 0x2f2a03, + 0x2716c6, + 0x205bc2, + 0x26b44b, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x2f74c3, + 0x20d1c2, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x205d83, + 0x223103, + 0x200383, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x204ac3, + 0x200383, + 0x204cc2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x24ae04, + 0x2d0783, + 0x231b83, + 0x222044, + 0x204ac3, + 0x200383, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x209e43, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x262fc3, + 0x1e303, + 0x5d83, + 0x204ac3, + 0x200383, + 0x310c8a, + 0x32a949, + 0x34184b, + 0x341f8a, + 0x347d0a, + 0x356e8b, + 0x37300a, + 0x37914a, + 0x37e80a, + 0x37ea8b, + 0x39d949, + 0x39f84a, + 0x39fbcb, + 0x3a9d0b, + 0x3b4a8a, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x10c9c9, + 0x15f048, + 0x2d0783, + 0x2695c4, + 0x200c82, + 0x213184, + 0x3ac705, + 0x205283, + 0x24ae04, + 0x2d0783, + 0x23a184, + 0x231b83, + 0x251304, + 0x2da904, + 0x2964c4, + 0x22d603, + 0x204ac3, + 0x200383, + 0x293ac5, + 0x209103, + 0x21aa03, + 0x22c6c3, + 0x258a04, + 0x262904, + 0x35d705, + 0x15f048, + 0x306e44, + 0x20e546, + 0x28a8c4, + 0x20d1c2, + 0x361ac7, + 0x253587, + 0x24f0c4, + 0x25d8c5, + 0x2d1d45, + 0x2b0405, + 0x2964c4, + 0x23cfc8, + 0x33f306, + 0x311f48, + 0x227b05, + 0x2e0405, + 0x377004, + 0x200383, + 0x2f39c4, + 0x355f46, + 0x2717c3, + 0x258a04, + 0x291a45, + 0x363644, + 0x234e84, + 0x205bc2, + 0x25e206, + 0x392206, + 0x3022c5, + 0x204cc2, + 0x368883, + 0x24e0d1c2, + 0x232dc4, + 0x208a42, + 0x20fbc3, + 0x25e402, + 0x204ac3, + 0x200382, + 0x213e83, + 0x24abc3, + 0x15f048, + 0x15f048, + 0x332ec3, + 0x204cc2, + 0x25a0d1c2, + 0x332ec3, + 0x2702c3, + 0x23a0c3, + 0x32bc44, + 0x204ac3, + 0x200383, + 0x15f048, + 0x204cc2, + 0x2620d1c2, + 0x2d0783, + 0x204ac3, + 0x200383, + 0x682, + 0x2044c2, + 0x217082, + 0x205d83, + 0x2ec383, + 0x204cc2, + 0x15f048, + 0x13ecc7, + 0x20d1c2, + 0x231b83, + 0x251304, + 0x202743, + 0x332ec3, + 0x209e43, + 0x20fbc3, + 0x204ac3, + 0x2183c3, + 0x200383, + 0x21d743, + 0x1286d3, + 0x12cb54, + 0x13ecc7, + 0x1fd86, + 0x5ae0b, + 0x28b86, + 0x58c07, + 0x130089, + 0xe9cca, + 0x8cb0d, + 0x16978c, + 0x13d64a, + 0x63c85, + 0xa588, + 0x177ac6, + 0x125886, + 0x201742, + 0x827cc, + 0x19a347, + 0x23551, + 0x2d0783, + 0x5f245, + 0x102c4, + 0x274341c6, + 0x19146, + 0x178146, + 0x920ca, + 0xb2f03, + 0x27a5c984, + 0x130045, + 0xa383, + 0xd2b8c, + 0xf6188, + 0xbaf48, + 0xa3bc9, + 0x20c48, + 0x141dd06, + 0xfbc88, + 0x5e884, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x204cc2, + 0x20d1c2, + 0x332ec3, + 0x20a3c2, + 0x204ac3, + 0x200383, + 0x213e83, + 0x36388f, + 0x363c4e, + 0x15f048, + 0x2d0783, + 0x4cd07, + 0x231b83, + 0x332ec3, + 0x204303, + 0x204ac3, + 0x200383, + 0x21d0c3, + 0x239c47, + 0x200142, + 0x2c1949, + 0x201442, + 0x23f68b, + 0x2b5b8a, + 0x2bcfc9, + 0x200282, + 0x262b46, + 0x26d615, + 0x23f7d5, + 0x274a13, + 0x23fd53, + 0x202342, + 0x20d605, + 0x3ab1cc, + 0x24698b, + 0x29a745, + 0x205ac2, + 0x289c02, + 0x386746, + 0x200f82, + 0x25fb86, + 0x294d0d, + 0x255dcc, + 0x224444, + 0x201342, + 0x203782, + 0x248688, + 0x202302, + 0x208886, + 0x303bc4, + 0x26d7d5, + 0x274b93, + 0x210b83, + 0x33070a, + 0x2f0187, + 0x3b2209, + 0x32dc47, + 0x3124c2, 0x200002, - 0x200006, - 0x202f42, - 0x880c8, - 0x211302, - 0x211d42, - 0x273cc7, - 0x3a8ac7, - 0x21ed05, - 0x215802, - 0x220647, - 0x220808, - 0x279a82, - 0x293f02, - 0x22f802, - 0x202ec2, - 0x239c48, - 0x21abc3, - 0x267848, - 0x2cf4cd, - 0x215743, - 0x369908, - 0x234a4f, - 0x234e0e, - 0x223f8a, - 0x36a611, - 0x36aa90, - 0x2b348d, - 0x2b37cc, - 0x3b2447, - 0x348807, - 0x355409, - 0x23c302, - 0x2004c2, - 0x254c8c, - 0x254f8b, - 0x2008c2, - 0x2dca86, - 0x206c02, - 0x2131c2, - 0x21dec2, - 0x216582, - 0x392904, - 0x23b147, + 0x3a4386, + 0x20a0c2, + 0x15f048, + 0x200802, + 0x211cc2, + 0x27d407, + 0x3b3507, + 0x21c7c5, + 0x201082, + 0x21d707, + 0x21d8c8, + 0x2413c2, + 0x2c2fc2, + 0x22c1c2, + 0x20e542, + 0x23b688, + 0x218443, + 0x2b72c8, + 0x2e078d, + 0x21fe43, + 0x226288, + 0x23e88f, + 0x23ec4e, + 0x24ac8a, + 0x229d11, + 0x22a190, + 0x2bf0cd, + 0x2bf40c, + 0x38c5c7, + 0x330887, + 0x3b30c9, + 0x204f02, + 0x200702, + 0x25a6cc, + 0x25a9cb, + 0x202ac2, + 0x2dcac6, + 0x20db42, + 0x20c502, + 0x21c402, + 0x20d1c2, + 0x384684, + 0x23c7c7, + 0x220802, + 0x242c07, + 0x243c47, + 0x2271c2, + 0x20e482, + 0x24cbc5, + 0x204d02, + 0x20cb4e, + 0x36f1cd, + 0x231b83, + 0x353a0e, + 0x2c0b8d, + 0x3ab643, + 0x201842, + 0x206744, + 0x208182, + 0x220a42, + 0x33e805, + 0x348447, + 0x372202, + 0x2041c2, + 0x250f07, + 0x2543c8, + 0x2abc02, + 0x2aa106, + 0x25a54c, + 0x25a88b, + 0x20da42, + 0x26588f, + 0x265c50, + 0x26604f, + 0x266415, + 0x266954, + 0x266e4e, + 0x2671ce, + 0x26754f, + 0x26790e, + 0x267c94, + 0x268193, + 0x26864d, + 0x27b549, + 0x28dbc3, + 0x200182, + 0x237b85, + 0x206506, + 0x208a42, + 0x21a2c7, + 0x332ec3, + 0x200642, + 0x36edc8, + 0x229f51, + 0x22a390, 0x200bc2, - 0x23f1c7, - 0x240447, - 0x214ec2, - 0x230542, - 0x242885, - 0x200682, - 0x26380e, - 0x27620d, - 0x2343c3, - 0x28268e, - 0x356b8d, - 0x2de803, - 0x2058c2, - 0x27cac4, - 0x2446c2, - 0x20ddc2, - 0x346905, - 0x34f587, - 0x372642, - 0x206ac2, - 0x246cc7, - 0x24c608, - 0x23aa82, - 0x2b5506, - 0x254b0c, - 0x254e4b, - 0x221842, - 0x25bccf, - 0x25c090, - 0x25c48f, - 0x25c855, - 0x25cd94, - 0x25d28e, - 0x25d60e, - 0x25d98f, - 0x25dd4e, - 0x25e0d4, - 0x25e5d3, - 0x25ea8d, - 0x271749, - 0x286d43, - 0x2007c2, - 0x20a805, - 0x208f46, - 0x201f82, - 0x26bdc7, - 0x21eb03, - 0x20b2c2, - 0x2c7448, - 0x36a851, - 0x36ac90, - 0x200942, - 0x22a647, - 0x2074c2, - 0x3328c7, - 0x209682, - 0x305dc9, - 0x3774c7, - 0x3615c8, - 0x228246, - 0x2daf83, - 0x34ad45, - 0x234642, - 0x200402, - 0x200405, - 0x399485, - 0x202102, - 0x24a503, - 0x2cfc47, - 0x208a07, - 0x203f42, - 0x2fec04, - 0x214803, - 0x2be489, - 0x2db488, - 0x20b402, - 0x2061c2, - 0x22f2c7, - 0x24bcc5, - 0x2a5f88, - 0x3b0e87, - 0x2047c3, - 0x27df86, - 0x2b330d, - 0x2b368c, - 0x276a06, - 0x203142, - 0x29a1c2, - 0x206742, - 0x2348cf, - 0x234cce, - 0x2d3847, - 0x200342, - 0x39e205, - 0x39e206, - 0x250042, - 0x203042, - 0x217c86, - 0x2a9a83, - 0x332806, - 0x2bfb05, - 0x2bfb0d, - 0x2c00d5, - 0x2c0c0c, - 0x2c150d, - 0x2c18d2, - 0x20e842, + 0x28d387, + 0x200b02, + 0x205fc7, + 0x201b02, + 0x32f249, + 0x386707, + 0x281408, + 0x234006, + 0x2cf4c3, + 0x2cf4c5, + 0x231e02, + 0x204842, + 0x3a4785, + 0x376e05, + 0x205e82, + 0x245f43, + 0x3636c7, + 0x210687, + 0x204982, + 0x3aae04, + 0x214183, + 0x2c9209, + 0x2ed188, + 0x205d82, + 0x2032c2, + 0x26e2c7, + 0x282185, + 0x2ab548, + 0x20d2c7, + 0x216143, + 0x372306, + 0x2bef4d, + 0x2bf2cc, + 0x280346, + 0x208842, + 0x2017c2, + 0x201f02, + 0x23e70f, + 0x23eb0e, + 0x2d1dc7, + 0x203cc2, + 0x2c3345, + 0x2c3346, + 0x20bcc2, + 0x205f02, + 0x28f406, + 0x205f03, + 0x205f06, + 0x2ca585, + 0x2ca58d, + 0x2cab55, + 0x2cb38c, + 0x2cc28d, + 0x2cc652, + 0x20b602, + 0x273fc2, + 0x201302, + 0x240fc6, + 0x2fcf46, + 0x201002, + 0x206586, + 0x211f82, + 0x37edc5, + 0x202382, + 0x20cc89, + 0x2df2cc, + 0x2df60b, + 0x200382, + 0x255688, + 0x213a02, + 0x204c82, + 0x246746, + 0x36b005, + 0x235007, + 0x2567c5, + 0x294805, + 0x24cd82, + 0x20a642, + 0x217902, + 0x2f2847, + 0x24410d, + 0x24448c, + 0x2b3387, + 0x2aa082, + 0x23cf82, + 0x24ba48, + 0x2d0488, + 0x2e5f88, + 0x2f09c4, + 0x2dce87, + 0x2ee483, + 0x2b6042, + 0x200e82, + 0x2f11c9, + 0x395e87, + 0x2054c2, + 0x277245, + 0x201202, + 0x26a102, + 0x349c43, + 0x349c46, + 0x2f74c2, + 0x2f7fc2, + 0x201402, + 0x3b3fc6, + 0x206687, + 0x207842, 0x200e02, - 0x200ac2, - 0x2d6646, - 0x2ac506, - 0x2025c2, - 0x208fc6, + 0x38bc8f, + 0x35384d, + 0x2b714e, + 0x2c0a0c, + 0x2003c2, + 0x205502, + 0x233e45, + 0x3b4e86, + 0x201ec2, + 0x200fc2, + 0x200682, + 0x20d244, + 0x2e0604, + 0x2d29c6, + 0x202602, + 0x27e787, + 0x22d703, + 0x22d708, + 0x24b048, + 0x390787, + 0x240ec6, + 0x20e5c2, + 0x23b383, + 0x23b387, + 0x272846, + 0x2e4385, + 0x2f0d48, + 0x206342, + 0x34a007, + 0x220c42, + 0x33c282, + 0x203b82, + 0x2dbe09, + 0x22fb02, + 0x200242, + 0x240183, + 0x319ac7, + 0x2018c2, + 0x2df44c, + 0x2df74b, + 0x2803c6, + 0x20a2c5, + 0x222e82, + 0x200a42, + 0x2bd306, + 0x235b83, + 0x39c147, + 0x23bb02, 0x203382, - 0x2249c5, - 0x204c02, - 0x263949, - 0x34700c, - 0x34734b, - 0x201502, - 0x24ca48, - 0x2042c2, - 0x207a82, - 0x219bc6, - 0x36c005, - 0x3a1687, - 0x24a385, - 0x28df05, - 0x242a42, - 0x202042, - 0x206d42, - 0x26e847, - 0x2d094d, - 0x2d0ccc, - 0x2229c7, - 0x2b5482, - 0x226282, - 0x36f088, - 0x22ce88, - 0x2d5b48, - 0x2dfd44, - 0x2ef207, - 0x2dbf83, - 0x280b82, - 0x2014c2, - 0x2e0489, - 0x3a3247, - 0x207442, - 0x26d485, - 0x241542, - 0x22ff42, - 0x297fc3, - 0x297fc6, - 0x2e6a82, - 0x2e8642, - 0x200c02, - 0x30f006, - 0x29dc87, - 0x200b82, - 0x208782, - 0x26768f, - 0x2824cd, - 0x284d8e, - 0x356a0c, - 0x20d342, - 0x207482, - 0x228085, - 0x3b3086, - 0x212182, - 0x208b02, + 0x26d495, + 0x23f995, + 0x2748d3, + 0x23fed3, + 0x2934c7, + 0x2b5948, + 0x2fb310, + 0x30ee4f, + 0x2b5953, + 0x2bcd92, + 0x2c1510, + 0x2ca1cf, + 0x2d57d2, + 0x2d84d1, + 0x2d9513, + 0x2dbbd2, + 0x2dd20f, + 0x2e57ce, + 0x2f5092, + 0x2f6351, + 0x2f754f, + 0x2f834e, + 0x300011, + 0x355810, + 0x35f212, + 0x3702d1, + 0x2f9bc6, + 0x307487, + 0x373387, 0x204b42, - 0x282844, - 0x2cf344, - 0x338a86, - 0x201bc2, - 0x275247, - 0x214343, - 0x21cb88, - 0x224348, - 0x239247, - 0x33fbc6, - 0x2015c2, - 0x239bc3, - 0x35bf87, - 0x266c86, - 0x2d6585, - 0x2d8e88, - 0x208d82, - 0x3211c7, - 0x20dfc2, - 0x35b202, - 0x20ad82, - 0x2bf8c9, - 0x200242, - 0x200a02, - 0x222c43, - 0x321887, - 0x201a02, - 0x34718c, - 0x34748b, - 0x276a86, - 0x20cdc5, - 0x224982, - 0x206ec2, - 0x2b3006, - 0x229083, - 0x340d07, - 0x246002, - 0x200cc2, - 0x248e15, - 0x32a415, - 0x24dd53, - 0x32a953, - 0x264dc7, - 0x288288, - 0x288290, - 0x289b0f, - 0x28bf13, - 0x2a3fd2, - 0x2a9410, - 0x2e79cf, - 0x2f14d2, - 0x351551, - 0x2afb13, - 0x2bf692, - 0x2c8d0f, - 0x2cb38e, - 0x2cc152, - 0x2cd191, - 0x2cdd8f, - 0x2ced4e, - 0x2d4c11, - 0x2e0090, - 0x2e4452, - 0x2e55d1, - 0x2e6b06, - 0x2e89c7, - 0x2f77c7, - 0x201582, - 0x27a485, - 0x2f2447, - 0x221e42, - 0x203c42, - 0x22c605, - 0x221303, - 0x26e246, - 0x2d0b0d, - 0x2d0e4c, - 0x203dc2, - 0x27fdcb, - 0x219cca, - 0x31b28a, - 0x2b22c9, - 0x2dd20b, - 0x3b0fcd, - 0x2f2b4c, - 0x2144ca, - 0x221e8c, - 0x24d10b, - 0x268e4c, - 0x26c10b, - 0x346f83, - 0x287e86, - 0x326e82, - 0x2e9202, - 0x208943, - 0x205882, - 0x205883, - 0x238406, - 0x25ca07, - 0x271406, - 0x2ea8c8, - 0x22cb88, - 0x2f0206, - 0x22d742, - 0x2f508d, - 0x2f53cc, - 0x22d747, - 0x2f8787, - 0x2156c2, - 0x21bf02, - 0x21eb82, - 0x24b982, - 0x216582, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x212444, - 0x238483, - 0x2264c3, - 0x217643, - 0x200882, - 0x200702, - 0x2368c545, - 0x23a03985, - 0x23f0b146, - 0x880c8, - 0x242b0185, - 0x216582, - 0x201a42, - 0x24755c45, - 0x24a786c5, - 0x24e79547, - 0x2527b109, - 0x2564b084, - 0x201f82, - 0x20b2c2, - 0x25a43785, - 0x25e8bb89, - 0x26311f08, - 0x266abe45, - 0x26b0bc07, - 0x26e18948, - 0x272d8045, - 0x27604606, - 0x27b47b09, - 0x27f25348, - 0x282b8888, - 0x2869310a, - 0x28a48444, - 0x28ec9405, - 0x292b5dc8, - 0x29616c85, - 0x21ac42, - 0x29a00343, - 0x29ea2f86, - 0x2a233a88, - 0x2a69e2c6, - 0x2aa9ddc8, - 0x2ae09c86, - 0x2b2f6284, - 0x205902, - 0x2b73b7c7, - 0x2baa8444, - 0x2be746c7, - 0x2c333a07, - 0x201502, - 0x2c696ec5, - 0x2ca31504, - 0x2cf7fd07, - 0x2d211e47, - 0x2d67c906, - 0x2da29585, - 0x2de91bc7, - 0x2e2d1a48, - 0x2e60ab87, - 0x2eb12b09, - 0x2eec0f45, - 0x2f32bb87, - 0x2f68b886, - 0x2fa51008, - 0x225a4d, - 0x242bc9, - 0x2e4ccb, - 0x3726cb, - 0x26f30b, - 0x2a564b, - 0x2ffd0b, - 0x2fffcb, - 0x300849, - 0x30208b, - 0x30234b, - 0x30290b, - 0x30340a, - 0x30394a, - 0x303f4c, - 0x308e0b, - 0x30964a, - 0x31c18a, - 0x32734e, - 0x3282ce, - 0x32864a, - 0x32b14a, - 0x32cb4b, - 0x32ce0b, - 0x32dd0b, - 0x34c50b, - 0x34cb0a, - 0x34d7cb, - 0x34da8a, - 0x34dd0a, - 0x34df8a, - 0x373f4b, - 0x37c18b, - 0x37dace, - 0x37de4b, - 0x3849cb, - 0x38598b, - 0x3893ca, - 0x389649, - 0x38988a, - 0x38af0a, - 0x39c60b, - 0x39e64b, - 0x39f24a, - 0x3a02cb, - 0x3a4f0b, - 0x3b26cb, - 0x2fe7ab48, - 0x30285289, - 0x30626809, - 0x30acfe48, - 0x338805, - 0x203443, - 0x204204, - 0x327145, - 0x24adc6, - 0x259145, - 0x284404, - 0x26bcc8, - 0x3739c5, - 0x28d904, - 0x3b3f87, - 0x2996ca, - 0x3605ca, - 0x336947, - 0x203347, - 0x2f10c7, - 0x362607, - 0x2b9d05, - 0x306d46, - 0x2fb0c7, - 0x3a2784, - 0x376b06, - 0x376a06, - 0x3a3745, - 0x280584, - 0x2d3d46, - 0x298387, - 0x225d46, - 0x2c7ec7, - 0x28d6c3, - 0x268446, - 0x2328c5, - 0x279647, - 0x266fca, - 0x263204, - 0x2180c8, - 0x2fb649, - 0x2ce487, - 0x3aed06, - 0x29f988, - 0x306409, - 0x2e6244, - 0x35d804, - 0x2f9685, - 0x2fadc8, - 0x2bd1c7, - 0x2aa009, - 0x327c48, - 0x2fc706, - 0x379bc6, - 0x2943c8, - 0x375fc6, - 0x203985, - 0x27c9c6, - 0x274bc8, - 0x2347c6, - 0x25708b, - 0x233406, - 0x295d4d, - 0x358a45, - 0x2a8306, - 0x218a05, - 0x297c89, - 0x2f1d07, - 0x382048, - 0x2db2c6, - 0x294ac9, - 0x3a6246, - 0x266f45, - 0x29c1c6, - 0x2a8e86, - 0x2c2909, - 0x306206, - 0x279247, - 0x33cf05, - 0x205703, - 0x257205, - 0x296007, - 0x323446, - 0x358949, - 0x30b146, - 0x27cc06, - 0x365389, - 0x27c3c9, - 0x29ca07, - 0x369fc8, - 0x39ee89, - 0x27a108, - 0x31f686, - 0x2cc9c5, - 0x30a44a, - 0x27cc86, - 0x330bc6, - 0x2a3145, - 0x24d588, - 0x206847, - 0x2318ca, - 0x247786, - 0x243005, - 0x2a0e06, - 0x265b47, - 0x3aebc7, - 0x2bacc5, - 0x267105, - 0x2acf46, - 0x383446, - 0x2ad886, - 0x328cc4, - 0x27b489, - 0x286146, - 0x2a5a0a, - 0x214048, - 0x32b888, - 0x3605ca, - 0x202145, - 0x2982c5, - 0x323c48, - 0x2c9188, - 0x320d47, - 0x265146, - 0x315308, - 0x202b87, - 0x279788, - 0x35f146, - 0x27dd08, - 0x2b3b86, - 0x2370c7, - 0x295506, - 0x2d3d46, - 0x23628a, - 0x392986, - 0x2cc9c9, - 0x2b0486, - 0x2d238a, - 0x2f6289, - 0x2f0306, - 0x37c504, - 0x20a8cd, - 0x285507, - 0x317906, - 0x2b8745, - 0x3a62c5, - 0x305546, - 0x26d9c9, - 0x3ad5c7, - 0x275cc6, - 0x2cd946, - 0x284489, - 0x212a44, - 0x228dc4, - 0x204308, - 0x2387c6, - 0x26d548, - 0x2d6b88, - 0x203ac7, - 0x200849, - 0x2ada87, - 0x2b004a, - 0x22decf, - 0x2377ca, - 0x227e85, - 0x274e05, - 0x216005, + 0x284e05, + 0x2febc7, + 0x217082, + 0x203142, + 0x2293c5, + 0x21ee43, + 0x35d986, + 0x2442cd, + 0x24460c, + 0x206602, + 0x3ab04b, + 0x24684a, + 0x30be4a, + 0x2bbf49, + 0x2ef68b, + 0x20d40d, + 0x2ff2cc, + 0x24890a, + 0x275b0c, + 0x27afcb, + 0x29a58c, + 0x2fa34b, + 0x2df243, + 0x35ee06, + 0x3a6502, + 0x2f8b82, + 0x2db2c3, + 0x202502, + 0x202503, + 0x245886, + 0x2665c7, + 0x365146, + 0x385cc8, + 0x2d0188, + 0x2d3186, + 0x2019c2, + 0x301c8d, + 0x301fcc, + 0x2da9c7, + 0x306d07, + 0x21fdc2, + 0x21ac02, + 0x23b302, + 0x254782, + 0x20d1c2, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x213184, + 0x204ac3, + 0x200383, + 0x213e83, + 0x204cc2, + 0x204e02, + 0x29a94005, + 0x29e02e85, + 0x2a3177c6, + 0x15f048, + 0x2a6b5145, + 0x20d1c2, + 0x2000c2, + 0x2ab9c685, + 0x2ae83305, + 0x2b283e07, + 0x2b68b309, + 0x2ba47ac4, + 0x208a42, + 0x200642, + 0x2bf72245, + 0x2c293149, + 0x2c71db88, + 0x2cab2085, + 0x2cf363c7, + 0x2d219b08, + 0x2d6e8605, + 0x2da5b4c6, + 0x2de346c9, + 0x2e2b8648, + 0x2e6c4b48, + 0x2ea9cf0a, + 0x2ee52104, + 0x2f2d6585, + 0x2f6becc8, + 0x2fb51245, + 0x2184c2, + 0x2fe63b83, + 0x302a7786, + 0x3064ea48, + 0x30a24f06, + 0x30ecec88, + 0x3132d1c6, + 0x316e4444, + 0x202082, + 0x31b630c7, + 0x31eaeb84, + 0x3227dc47, + 0x327a1087, + 0x200382, + 0x32aa0685, + 0x32e03bc4, + 0x332d1807, + 0x3362adc7, + 0x33a87406, + 0x33e36085, + 0x3429b807, + 0x346d2688, + 0x34a37f07, + 0x34eb0689, + 0x3538e6c5, + 0x35719c07, + 0x35a92e46, + 0x35e62d48, + 0x2460cd, + 0x24cf09, + 0x2f484b, + 0x25534b, + 0x27de4b, + 0x2aa88b, + 0x30f20b, + 0x30f4cb, + 0x30fd49, + 0x310f0b, + 0x3111cb, + 0x311ccb, + 0x31284a, + 0x312d8a, + 0x31338c, + 0x31608b, + 0x3166ca, + 0x327eca, + 0x3328ce, + 0x333a4e, + 0x333dca, + 0x335d8a, + 0x3369cb, + 0x336c8b, + 0x337a4b, + 0x34c7cb, + 0x34cdca, + 0x34da8b, + 0x34dd4a, + 0x34dfca, + 0x34e24a, + 0x373ecb, + 0x37a2cb, + 0x37c38e, + 0x37c70b, + 0x383ecb, + 0x38500b, + 0x38984a, + 0x389ac9, + 0x389d0a, + 0x38b38a, + 0x39e50b, + 0x39fe8b, + 0x3a09ca, + 0x3a28cb, + 0x3a588b, + 0x3b44cb, + 0x36285b88, + 0x3668c289, + 0x36aa3a49, + 0x36ee0bc8, + 0x33c685, + 0x202943, + 0x212944, + 0x206885, + 0x247806, + 0x25b245, + 0x28adc4, + 0x21a1c8, + 0x30af85, + 0x297a44, + 0x209907, + 0x2a280a, + 0x361d8a, + 0x3101c7, + 0x211f47, + 0x2fdec7, + 0x255b47, + 0x2fad45, + 0x343d06, + 0x22cb47, + 0x26fec4, + 0x2e6b46, + 0x2e6a46, + 0x208305, + 0x3492c4, + 0x38ec86, + 0x2a1647, + 0x22d046, + 0x351b47, + 0x26a783, + 0x2b4846, + 0x232045, + 0x283f07, + 0x270e0a, + 0x26dfc4, + 0x218ec8, + 0x2affc9, + 0x2cb147, + 0x334646, + 0x255908, + 0x200a49, + 0x3b23c4, + 0x2210c4, + 0x278285, + 0x22c848, + 0x2c7f47, + 0x2a7109, + 0x2f9cc8, + 0x347a86, + 0x24c646, + 0x29de88, + 0x354c46, + 0x202e85, + 0x2874c6, + 0x27e108, + 0x254b86, + 0x25d14b, + 0x29dac6, + 0x29f50d, + 0x3b1785, + 0x2aea46, + 0x20f505, + 0x349909, + 0x2abe87, + 0x3195c8, + 0x292986, + 0x29e709, + 0x364546, + 0x270d85, + 0x2a4dc6, + 0x2c99c6, + 0x2cdb89, + 0x200846, + 0x253087, + 0x277885, + 0x202383, + 0x25d2c5, + 0x29f7c7, + 0x358e06, + 0x3b1689, + 0x3177c6, + 0x287706, + 0x215ec9, + 0x286ec9, + 0x2a5607, + 0x2cf688, + 0x377f89, + 0x284a88, + 0x379386, + 0x2d9dc5, + 0x23cb4a, + 0x287786, + 0x3a8506, + 0x2cbbc5, + 0x272188, + 0x215587, + 0x22e68a, + 0x251746, + 0x24d345, + 0x329cc6, + 0x2d6347, 0x334507, - 0x277283, - 0x36a1c8, - 0x334d86, - 0x334e89, - 0x2c6a06, - 0x2c2747, - 0x294889, - 0x381f48, - 0x2a3207, - 0x2fee83, - 0x338885, - 0x3b2005, - 0x328b0b, - 0x216d44, - 0x2c5044, - 0x273486, - 0x2ff447, - 0x39794a, - 0x245807, - 0x3b0407, - 0x2786c5, - 0x205285, - 0x2196c9, - 0x2d3d46, - 0x24568d, - 0x358045, - 0x2a2d43, - 0x205d03, - 0x349205, - 0x350005, - 0x29f988, - 0x276707, - 0x228b46, - 0x29a646, - 0x22d905, - 0x234687, - 0x2035c7, - 0x36ec47, - 0x2c948a, - 0x268508, - 0x328cc4, - 0x2ade47, - 0x277cc7, - 0x32d086, - 0x264607, - 0x2b2a08, - 0x226708, - 0x26b786, - 0x367f08, - 0x2c32c4, - 0x2fb0c6, - 0x3a9146, - 0x2c0a86, - 0x349c06, - 0x29abc4, - 0x3626c6, - 0x2b76c6, - 0x293606, - 0x2293c6, - 0x205bc6, - 0x2b2846, - 0x228a48, - 0x39de08, - 0x2c9cc8, - 0x259348, - 0x323bc6, - 0x210785, - 0x275386, - 0x2abec5, - 0x388807, - 0x28ae45, - 0x213d43, - 0x364605, - 0x22fd84, - 0x205d05, - 0x210143, - 0x39a7c7, - 0x31c448, - 0x2c7f86, - 0x2c514d, - 0x274dc6, - 0x292b85, - 0x2afd43, - 0x2b5789, - 0x212bc6, - 0x231246, - 0x29c2c4, - 0x237747, - 0x235b06, - 0x243245, - 0x216ec3, - 0x378a44, - 0x277e86, - 0x2b15c4, - 0x30d748, - 0x324a89, - 0x2f2209, - 0x29c0ca, - 0x23f88d, - 0x29da47, - 0x330a46, - 0x20ec44, - 0x27b109, - 0x283588, - 0x285106, - 0x263bc6, - 0x264607, - 0x2c3c06, - 0x21b606, - 0x38c346, - 0x333a8a, - 0x218948, - 0x22bc45, - 0x27d649, - 0x279c8a, - 0x2c54c8, - 0x2979c8, - 0x292108, - 0x2a864c, - 0x2dc7c5, - 0x29a8c8, - 0x39e106, - 0x2d1d06, - 0x37af07, - 0x245705, - 0x27cb45, - 0x2f20c9, - 0x212747, - 0x2b2ec5, - 0x21e9c7, - 0x205d03, - 0x2bd705, - 0x366548, - 0x2d1687, - 0x297889, - 0x2d7985, - 0x2f4504, - 0x2a1788, - 0x2cf787, - 0x2a33c8, - 0x2740c8, - 0x32c005, - 0x334c86, - 0x257706, - 0x2e7649, - 0x315b87, - 0x2ac986, - 0x30e947, - 0x217d43, - 0x24b084, - 0x298c45, - 0x2ae0c4, - 0x236844, - 0x27adc7, - 0x3affc7, - 0x239a04, - 0x2976d0, - 0x3056c7, - 0x205285, - 0x22ae8c, - 0x2018c4, - 0x2bee08, - 0x236fc9, - 0x2ffb86, - 0x2a03c8, - 0x25a7c4, - 0x25a7c8, - 0x231ec6, - 0x229248, - 0x298946, - 0x2c828b, - 0x205705, - 0x2c3248, - 0x21a3c4, - 0x27c04a, - 0x297889, - 0x2e0d06, - 0x2160c8, - 0x258645, - 0x301944, - 0x2bed06, - 0x36eb08, - 0x27ab48, - 0x345d06, - 0x31d6c4, - 0x30a3c6, - 0x2adb07, - 0x2745c7, - 0x26460f, - 0x2074c7, - 0x2f03c7, - 0x2d1bc5, - 0x2ed845, - 0x29c6c9, - 0x28ae86, - 0x278fc5, - 0x27c6c7, - 0x37b188, - 0x293705, - 0x295506, - 0x213e88, - 0x29e2ca, - 0x282988, - 0x287947, - 0x22e306, - 0x27d606, - 0x21f283, - 0x2042c3, - 0x279e49, - 0x39ed09, - 0x2bec06, - 0x2d7985, - 0x2a84c8, - 0x2160c8, - 0x387808, - 0x38c3cb, - 0x2c5387, - 0x2fd189, - 0x264888, - 0x33c7c4, - 0x2c2a08, - 0x2895c9, - 0x2acc85, - 0x334407, - 0x24b105, - 0x27aa48, - 0x28c3cb, - 0x291910, - 0x2a8105, - 0x21a30c, - 0x228d05, - 0x2032c3, - 0x2a2c06, - 0x2b6e04, - 0x231606, - 0x298387, - 0x213f04, - 0x2415c8, - 0x36a08d, - 0x2d9685, - 0x23fd84, - 0x219904, - 0x27d0c9, - 0x297408, - 0x30afc7, - 0x231f48, - 0x27b548, - 0x275fc5, - 0x331547, - 0x275f47, - 0x20af07, - 0x267109, - 0x235989, - 0x23f346, - 0x2b39c6, - 0x264846, - 0x25b8c5, - 0x3af344, - 0x204506, - 0x204a46, - 0x276008, - 0x26580b, - 0x2630c7, - 0x20ec44, - 0x317d86, - 0x203107, - 0x348b45, - 0x318f05, - 0x201e84, - 0x235906, - 0x204588, - 0x27b109, - 0x257c86, - 0x282f08, - 0x243306, - 0x33ce48, - 0x2d8a8c, - 0x275e86, - 0x29284d, - 0x292ccb, - 0x279305, - 0x203707, - 0x306306, - 0x3aea88, - 0x23f3c9, - 0x2e7288, - 0x205285, - 0x2ecc07, - 0x27a208, - 0x384789, - 0x2a05c6, - 0x33bfca, - 0x3ae808, - 0x2e70cb, - 0x2c608c, - 0x25a8c8, - 0x277506, - 0x334708, - 0x29df47, - 0x2cfa09, - 0x28ba8d, - 0x2961c6, - 0x3017c8, - 0x39dcc9, - 0x2b6188, - 0x27de08, - 0x2b7f8c, - 0x2b9347, - 0x2b9f07, - 0x266f45, - 0x3a4d47, - 0x37b048, - 0x2bed86, - 0x257b0c, - 0x2e4988, - 0x2c44c8, - 0x24b5c6, - 0x3b1d87, - 0x23f544, - 0x259348, - 0x356e4c, - 0x3a1a0c, - 0x227f05, - 0x393d47, - 0x31d646, - 0x3b1d06, - 0x297e48, - 0x38c284, - 0x225d4b, - 0x22844b, - 0x22e306, - 0x369f07, - 0x307d45, - 0x26ca85, - 0x225e86, - 0x258605, - 0x216d05, - 0x3accc7, - 0x273a89, - 0x233504, - 0x2722c5, - 0x2d7645, - 0x254448, - 0x22b4c5, - 0x2a7809, - 0x2af2c7, - 0x2af2cb, - 0x2d1046, - 0x228789, - 0x2804c8, - 0x271c05, - 0x20b008, - 0x2359c8, - 0x207ec7, - 0x27d4c7, - 0x27ae49, - 0x229187, - 0x32d3c9, - 0x2aaf4c, - 0x312a08, - 0x2b9b49, - 0x2be7c7, - 0x27b609, - 0x3b0107, - 0x2c6188, - 0x3afac5, - 0x2fb046, - 0x2b8788, - 0x2f8b88, - 0x279b49, - 0x216d47, - 0x26cb45, - 0x20e3c9, - 0x2c4086, - 0x28b884, - 0x2e6f46, - 0x233908, - 0x2426c7, - 0x265a08, - 0x367fc9, - 0x261a47, - 0x299886, - 0x2037c4, - 0x364689, - 0x3313c8, - 0x24b487, - 0x306e46, - 0x3b20c6, - 0x330b44, + 0x2c4145, + 0x270f45, + 0x2b2f86, + 0x351746, + 0x387046, + 0x2b8bc4, + 0x286209, + 0x28d146, + 0x30e50a, + 0x222848, + 0x309148, + 0x361d8a, + 0x2145c5, + 0x2a1585, + 0x37f588, + 0x2b6348, + 0x21b507, + 0x293846, + 0x320d48, + 0x3674c7, + 0x285188, + 0x2b9206, + 0x2885c8, + 0x29ad46, + 0x227c87, + 0x272b06, + 0x38ec86, + 0x25d9ca, + 0x384706, + 0x2d9dc9, + 0x2b5446, + 0x2e3d8a, + 0x2e4449, + 0x362586, + 0x2ba844, + 0x237c4d, + 0x28c507, + 0x3268c6, + 0x2c4a05, + 0x3645c5, + 0x375846, + 0x2d1649, + 0x2b4287, 0x27f886, - 0x205c83, - 0x308149, - 0x2056c6, - 0x2a61c5, - 0x29a646, - 0x2a3505, - 0x27a688, - 0x25a607, - 0x362446, - 0x355c86, - 0x32b888, - 0x29c847, - 0x296205, - 0x29ab48, - 0x39ea48, - 0x3ae808, - 0x228bc5, - 0x2fb0c6, - 0x2f1fc9, - 0x257584, - 0x3760cb, - 0x21b30b, - 0x22bb49, - 0x205d03, - 0x256385, - 0x205986, - 0x229908, - 0x22de44, - 0x2c7f86, - 0x2c95c9, - 0x2c5b05, - 0x3acc06, - 0x2cf786, - 0x2160c4, - 0x2a1b4a, - 0x2a6108, - 0x2f8b86, - 0x368a05, - 0x204887, - 0x301547, - 0x334c84, - 0x21b547, - 0x2b0044, - 0x2c0a06, - 0x202e03, - 0x267105, - 0x373445, - 0x207708, - 0x2ae005, - 0x275bc9, - 0x259187, - 0x25918b, - 0x2a2d8c, - 0x2a3a0a, - 0x30bc07, - 0x200a83, - 0x2d3948, - 0x228d85, - 0x293785, - 0x338944, - 0x2c6086, - 0x236fc6, - 0x27f8c7, - 0x3656cb, - 0x29abc4, - 0x3821c4, - 0x26b904, - 0x2c25c6, - 0x213f04, - 0x2faec8, - 0x338745, - 0x23fec5, - 0x387747, - 0x203809, - 0x350005, - 0x375a4a, - 0x37b2c9, - 0x290f8a, - 0x333bc9, - 0x353144, - 0x2cda05, - 0x2c3d08, - 0x37fdcb, - 0x2f9685, - 0x38d4c6, - 0x2159c4, - 0x276106, - 0x2618c9, - 0x317e47, - 0x30b308, - 0x23fc06, - 0x2ada87, - 0x27ab48, - 0x38f586, - 0x280204, - 0x35eb87, - 0x34e905, - 0x360c07, - 0x204604, - 0x306286, - 0x218bc8, - 0x292e88, - 0x3a4ac7, - 0x217d88, - 0x2b3c45, - 0x205b44, - 0x3604c8, - 0x217e84, - 0x207ec5, - 0x2ed984, - 0x202c87, - 0x286207, - 0x27b748, - 0x2a3546, - 0x2adf85, - 0x2759c8, - 0x282b88, - 0x29c009, - 0x21b606, - 0x231948, - 0x27beca, - 0x348bc8, - 0x2d8045, - 0x275586, - 0x26d888, - 0x2eccca, - 0x341107, - 0x283985, - 0x28ef48, - 0x2b1184, - 0x24d606, - 0x2ba688, - 0x205bc6, - 0x380dc8, - 0x2573c7, - 0x3b3e86, - 0x37c504, - 0x29ce07, - 0x2fac04, - 0x261887, - 0x23108d, - 0x22bbc5, - 0x2d148b, - 0x298a46, - 0x24cb48, - 0x241584, - 0x272086, - 0x277e86, - 0x334a47, - 0x29250d, - 0x25fd07, - 0x300248, - 0x29fb05, - 0x284008, - 0x2bd146, - 0x2b3cc8, - 0x20e886, - 0x367707, - 0x368189, - 0x33f9c7, - 0x2853c8, - 0x26f705, - 0x21ed88, - 0x3b1c45, - 0x23b2c5, - 0x333e45, - 0x226743, - 0x27ca44, - 0x27d645, - 0x347b09, - 0x31b646, - 0x2b2b08, - 0x2ecec5, - 0x312447, - 0x249dca, - 0x3acb49, - 0x2a8d8a, - 0x2c9d48, - 0x21e80c, - 0x27c74d, - 0x2f86c3, - 0x380cc8, - 0x378a05, - 0x29e086, - 0x381dc6, - 0x2e3985, - 0x30ea49, - 0x310045, - 0x2759c8, - 0x279146, - 0x33f4c6, - 0x2a1649, - 0x38ed87, - 0x28c686, - 0x249d48, - 0x2c0988, - 0x2d0047, - 0x2293ce, - 0x2bd385, - 0x384685, - 0x205ac8, - 0x322d07, - 0x214782, - 0x2b7b04, - 0x23150a, - 0x24b548, - 0x203206, - 0x2949c8, - 0x257706, - 0x335988, - 0x2ac988, - 0x23b284, - 0x328945, - 0x683c04, - 0x683c04, - 0x683c04, - 0x203983, - 0x3b1f46, - 0x275e86, - 0x29924c, - 0x205b03, - 0x279c86, - 0x213f84, - 0x212b48, - 0x2c9405, - 0x231606, - 0x2b5ec8, - 0x2cb0c6, - 0x3623c6, - 0x29f788, - 0x298cc7, - 0x228f49, - 0x2e96ca, - 0x20abc4, - 0x28ae45, - 0x2a9fc5, - 0x2128c6, - 0x29da86, - 0x299c86, - 0x2ec386, - 0x229084, - 0x22908b, - 0x233904, - 0x204905, - 0x2ab5c5, - 0x203b86, - 0x359288, - 0x27c607, - 0x30b0c4, - 0x259cc3, - 0x2b0c85, - 0x2e6e07, - 0x2a4449, - 0x27c50b, - 0x27f8c7, - 0x207607, - 0x2b5dc8, - 0x312587, - 0x2a4686, - 0x242e88, - 0x299e8b, - 0x327086, - 0x213a89, - 0x29a005, - 0x2fee83, - 0x3acc06, - 0x2572c8, - 0x20e943, - 0x2e6f03, - 0x27ab46, - 0x257706, - 0x38ac8a, - 0x277545, - 0x277ccb, - 0x29a58b, - 0x240a03, - 0x20f943, - 0x2affc4, - 0x367b47, - 0x257344, - 0x2039c4, - 0x39df84, - 0x348ec8, - 0x368948, - 0x30e389, + 0x2c9546, + 0x28ae49, + 0x264a04, + 0x2d4a44, + 0x3ac808, + 0x245c46, + 0x277308, + 0x2e66c8, + 0x202fc7, + 0x3a80c9, + 0x387247, + 0x2b500a, + 0x2498cf, + 0x250b0a, + 0x233c45, + 0x27e345, + 0x218745, + 0x303b07, + 0x20e183, + 0x2cf888, + 0x3028c6, + 0x3029c9, + 0x2d4006, + 0x3aeb47, + 0x29e4c9, + 0x3194c8, + 0x2cbc87, + 0x30d803, + 0x33c705, + 0x20e105, + 0x2b8a0b, + 0x351304, + 0x257984, + 0x27cbc6, + 0x30e887, + 0x38b10a, + 0x2757c7, + 0x38c807, + 0x283305, + 0x200045, + 0x240909, + 0x38ec86, + 0x27564d, + 0x35af05, + 0x29f4c3, + 0x20ad83, + 0x34f785, + 0x347845, + 0x255908, + 0x280047, + 0x2d47c6, + 0x2a36c6, + 0x2296c5, + 0x231e47, + 0x202ac7, + 0x33f1c7, + 0x2d660a, + 0x2b4908, + 0x2b8bc4, + 0x254907, + 0x281607, + 0x3400c6, + 0x26f8c7, + 0x2eaa08, + 0x2e9e88, + 0x2abd86, + 0x2d1ec8, + 0x2008c4, + 0x22cb46, + 0x247d86, + 0x216646, + 0x3a8c46, + 0x22d9c4, + 0x255c06, + 0x2c31c6, + 0x29d406, + 0x235ec6, + 0x20ac46, + 0x2ea846, + 0x2d46c8, + 0x3af1c8, + 0x2d6e48, + 0x25b448, + 0x37f506, + 0x212485, + 0x2e2006, + 0x2b2105, + 0x388c87, + 0x216605, + 0x2136c3, + 0x203ec5, + 0x33fb44, + 0x20ad85, + 0x2266c3, + 0x338007, + 0x34bc88, + 0x351c06, + 0x32250d, + 0x27e306, + 0x29c985, + 0x2d9743, + 0x2be689, + 0x264b86, + 0x23c0c6, + 0x2a4ec4, + 0x250a87, + 0x233006, + 0x2b4545, + 0x234a83, + 0x207ac4, + 0x2817c6, + 0x2ded04, + 0x32b8c8, + 0x39ba49, + 0x24d849, + 0x2a4cca, + 0x387acd, + 0x208d07, + 0x224bc6, + 0x20a684, + 0x28b309, + 0x28a088, + 0x28c106, + 0x23dfc6, + 0x26f8c7, + 0x2b9a46, + 0x21f706, + 0x3ac246, + 0x3a110a, + 0x219b08, + 0x2464c5, + 0x26fd09, + 0x28568a, + 0x2fa988, + 0x2a0ec8, + 0x29bd48, + 0x2af08c, + 0x316305, + 0x2a3948, + 0x2e8e06, + 0x319746, + 0x3aea07, + 0x2756c5, + 0x287645, + 0x24d709, + 0x213487, + 0x302985, + 0x227487, + 0x20ad83, + 0x2c8485, + 0x20b8c8, + 0x25d647, + 0x2a0d89, + 0x2de405, + 0x307784, + 0x2a6508, + 0x363207, + 0x2cbe48, + 0x368c48, + 0x2dc805, + 0x304286, + 0x278686, + 0x2ac1c9, + 0x31c407, + 0x2b29c6, + 0x3b3907, + 0x221d03, + 0x247ac4, + 0x2a7885, + 0x231f84, + 0x383c84, + 0x286947, + 0x35bdc7, + 0x27fa44, + 0x2a0bd0, + 0x367c87, + 0x200045, + 0x2536cc, + 0x225344, + 0x2b1588, + 0x227b89, + 0x2b4e06, + 0x220d88, + 0x247344, + 0x247348, + 0x22ec86, + 0x235d48, + 0x2a1c06, + 0x2d328b, + 0x202385, + 0x2cb988, + 0x216ac4, + 0x39be8a, + 0x2a0d89, + 0x381346, + 0x218808, + 0x25ebc5, + 0x2b69c4, + 0x2b1486, + 0x33f088, + 0x285b88, + 0x340bc6, + 0x31d104, + 0x23cac6, + 0x3872c7, + 0x27db47, + 0x26f8cf, + 0x205547, + 0x362647, + 0x38eb45, + 0x352245, + 0x2a52c9, + 0x30e1c6, + 0x284045, + 0x2871c7, + 0x2c1108, + 0x29d505, + 0x272b06, + 0x222688, + 0x224f0a, + 0x2e13c8, + 0x28f187, + 0x249d06, + 0x26fcc6, + 0x20df43, + 0x218303, + 0x285849, + 0x377e09, + 0x2b0586, + 0x2de405, + 0x2163c8, + 0x218808, + 0x354dc8, + 0x3ac2cb, + 0x322747, + 0x30b249, + 0x26fb48, + 0x335844, + 0x349588, + 0x291409, + 0x2b2cc5, + 0x303a07, + 0x247b45, + 0x285a88, + 0x293e8b, + 0x29b550, + 0x2ae605, + 0x216a0c, + 0x2d4985, + 0x283383, + 0x29f386, + 0x2c0984, + 0x203cc6, + 0x2a1647, + 0x222704, + 0x24b388, + 0x2cf74d, + 0x35e245, + 0x208d44, + 0x233984, + 0x287bc9, + 0x2990c8, + 0x317647, + 0x22ed08, + 0x2862c8, + 0x27fb85, + 0x20f747, + 0x27fb07, + 0x238287, + 0x270f49, + 0x232e89, + 0x242d86, + 0x2bf606, + 0x26fb06, + 0x289845, + 0x39b744, + 0x3b0e86, + 0x3b5306, + 0x27fbc8, + 0x2d600b, + 0x26de87, + 0x20a684, + 0x364a46, + 0x367a47, + 0x34f0c5, + 0x263645, + 0x212dc4, + 0x232e06, + 0x3b0f08, + 0x28b309, + 0x252f86, + 0x289a48, + 0x2b4606, + 0x342708, + 0x34c34c, + 0x27fa46, + 0x29c64d, + 0x29cacb, + 0x253145, + 0x202c07, + 0x200946, + 0x3343c8, + 0x242e09, + 0x393c88, + 0x200045, + 0x2e2a87, + 0x284b88, + 0x358649, + 0x344106, + 0x252e8a, + 0x334148, + 0x393acb, + 0x3298cc, + 0x247448, + 0x280e46, + 0x303d08, + 0x3a8347, + 0x363489, + 0x29304d, + 0x29f986, + 0x21e608, + 0x3af089, + 0x2bfd08, + 0x2886c8, + 0x2c3a0c, + 0x2c5047, + 0x2c5507, + 0x270d85, + 0x31e5c7, 0x2c0fc8, - 0x3065c7, - 0x2293c6, - 0x2b274f, - 0x2bd4c6, - 0x2c9384, - 0x36878a, - 0x2e6d07, - 0x3a37c6, - 0x28b8c9, - 0x30e305, - 0x207845, - 0x30e446, - 0x21eec3, - 0x2b11c9, - 0x218ac6, - 0x367d89, - 0x397946, - 0x267105, - 0x228305, - 0x2074c3, - 0x367c88, - 0x2df587, - 0x334d84, - 0x2129c8, - 0x2d3ac4, - 0x2d4646, - 0x2a2c06, - 0x23e7c6, - 0x2c3109, - 0x293705, - 0x2d3d46, - 0x264ac9, - 0x3ac846, - 0x2b2846, - 0x387c46, - 0x2119c5, - 0x2ed986, - 0x367704, - 0x3afac5, - 0x2b8784, - 0x309246, - 0x358004, - 0x202c83, - 0x283645, - 0x2356c8, - 0x21e007, - 0x2b4549, - 0x283888, - 0x294191, - 0x2cf80a, - 0x22e247, - 0x2ee8c6, - 0x213f84, - 0x2b8888, - 0x239748, - 0x29434a, - 0x2a75cd, - 0x29c1c6, - 0x29f886, - 0x29cec6, - 0x2bab47, - 0x300305, - 0x250ec7, - 0x212a85, - 0x2af404, - 0x2a7006, - 0x27f707, - 0x2b0ecd, - 0x26d7c7, - 0x26bbc8, - 0x275cc9, - 0x275486, - 0x2a0545, - 0x210184, - 0x233a06, - 0x334b86, - 0x24b6c6, - 0x297088, - 0x211883, - 0x203b43, - 0x323585, - 0x3112c6, - 0x2ac945, - 0x23fe08, - 0x29854a, - 0x2f5cc4, - 0x212b48, - 0x292108, - 0x2039c7, - 0x2ecf89, - 0x2b5ac8, - 0x27b187, - 0x264fc6, - 0x205bca, - 0x233a88, - 0x2c5ec9, - 0x2974c8, - 0x21adc9, - 0x2e7387, - 0x2d9005, - 0x226986, - 0x2bec08, - 0x24ccc8, - 0x30bec8, - 0x22e408, - 0x204905, - 0x200884, - 0x2df288, - 0x20bdc4, - 0x3339c4, - 0x267105, - 0x28d947, - 0x2035c9, - 0x334847, - 0x231985, - 0x273686, - 0x346d46, - 0x213bc4, - 0x2a1986, - 0x2addc4, - 0x283f06, - 0x3b0586, - 0x2150c6, - 0x205285, - 0x23fcc7, - 0x200a83, - 0x3334c9, - 0x32b688, - 0x2129c4, - 0x27b00d, - 0x292f88, - 0x2f0848, - 0x2c5e46, - 0x368289, - 0x3acb49, - 0x2615c5, - 0x29864a, - 0x2863ca, - 0x28b24c, - 0x28b3c6, - 0x274446, - 0x2bd646, - 0x269509, - 0x29e2c6, - 0x250f06, - 0x310106, - 0x259348, - 0x217d86, - 0x2c344b, - 0x28dac5, - 0x23fec5, - 0x2746c5, - 0x202606, - 0x205b83, - 0x23e746, - 0x26d747, - 0x2b8745, - 0x379c85, - 0x3a62c5, - 0x2eb2c6, - 0x261684, - 0x311e06, - 0x28f789, - 0x20248c, - 0x2af148, - 0x28f8c4, - 0x2ed746, - 0x298b46, - 0x2572c8, - 0x2160c8, - 0x202389, - 0x204887, - 0x238509, - 0x24c346, - 0x22f904, - 0x20edc4, - 0x27a944, - 0x27ab48, - 0x20340a, - 0x34ff86, - 0x353d47, - 0x2c7687, - 0x228885, - 0x2a9f84, - 0x289586, - 0x300346, - 0x235bc3, - 0x32b4c7, - 0x273fc8, - 0x26170a, - 0x30fa88, - 0x29ddc8, - 0x358045, - 0x279405, - 0x2631c5, - 0x228c46, - 0x229d06, - 0x3aff05, - 0x308389, - 0x2a9d8c, - 0x263287, - 0x2943c8, - 0x258945, - 0x683c04, - 0x2e3d84, - 0x2d17c4, - 0x214b06, - 0x29b10e, - 0x2078c7, - 0x2bad45, - 0x25750c, - 0x2c0847, - 0x27f687, - 0x2806c9, - 0x218189, - 0x283985, - 0x32b688, - 0x2f1fc9, - 0x2f3d05, - 0x2b8688, - 0x2c2c06, - 0x360746, - 0x2f6284, - 0x33c1c8, - 0x248283, - 0x3630c4, - 0x2b0d05, - 0x305547, - 0x201ec5, - 0x27bd89, - 0x38040d, - 0x2a1f86, - 0x2e9644, - 0x2650c8, - 0x2738ca, - 0x21fe87, - 0x23a245, - 0x203c43, - 0x29a74e, - 0x25770c, - 0x2f99c7, - 0x29b2c7, - 0x204643, - 0x29e305, - 0x2d17c5, - 0x294d88, - 0x291f49, - 0x36e986, - 0x257344, - 0x22e186, - 0x32ffcb, - 0x3a694c, - 0x35dc47, - 0x2c90c5, - 0x39e948, - 0x2cfe05, - 0x368787, - 0x33b7c7, - 0x248285, - 0x205b83, - 0x371284, - 0x2041c5, - 0x383505, - 0x383506, - 0x28e848, - 0x27f707, - 0x3820c6, - 0x200a06, - 0x333d86, - 0x265689, - 0x331647, - 0x378186, - 0x3a6ac6, - 0x248346, - 0x2a8405, - 0x399a86, - 0x398f45, - 0x22b548, - 0x29154b, - 0x289386, - 0x2c76c4, - 0x2eca89, - 0x259184, - 0x2c2b88, - 0x2aab47, - 0x27dd04, - 0x2b4e88, - 0x2b9904, - 0x2a8444, - 0x3a26c5, - 0x2d96c6, - 0x348e07, - 0x23fd43, - 0x299945, - 0x316144, - 0x3846c6, - 0x261648, - 0x323ac5, - 0x28d3c9, - 0x20e5c5, - 0x2d6288, - 0x34a5c7, - 0x388948, - 0x2b4387, - 0x2f0489, - 0x362546, - 0x336186, - 0x310104, - 0x264f05, - 0x2f490c, - 0x2746c7, - 0x274cc7, - 0x2c7548, - 0x2a1f86, - 0x26d684, - 0x31b184, - 0x27acc9, - 0x2bd746, - 0x219747, - 0x349b84, - 0x31b746, - 0x27f285, - 0x2a3087, - 0x2c33c6, - 0x33be89, - 0x28b087, - 0x264607, - 0x2a14c6, - 0x23f785, - 0x278e48, - 0x218948, - 0x23acc6, - 0x323b05, - 0x251a46, - 0x206583, - 0x294c09, - 0x299a0e, - 0x2b3188, - 0x2d3bc8, - 0x23aacb, - 0x28d606, - 0x209c84, - 0x27c344, - 0x299b0a, - 0x21a207, - 0x378245, - 0x213a89, - 0x2b7785, - 0x333a07, - 0x2ff984, - 0x324c07, - 0x2d6a88, - 0x2ce546, - 0x34a889, - 0x2b5bca, - 0x21a186, - 0x292ac6, - 0x2ab545, - 0x37e405, - 0x3261c7, - 0x244208, - 0x27f1c8, - 0x23b286, - 0x228385, - 0x29d80e, - 0x328cc4, - 0x23ac45, - 0x273009, - 0x28ac88, - 0x287886, - 0x296d0c, - 0x298150, - 0x29ad4f, - 0x29c5c8, - 0x30bc07, - 0x205285, - 0x27d645, - 0x348c89, - 0x28f149, - 0x30a4c6, - 0x2f9707, - 0x393cc5, - 0x320d49, - 0x32d106, - 0x29e10d, - 0x27a809, - 0x2039c4, - 0x2b2f08, - 0x2df349, - 0x350146, - 0x273785, - 0x336186, - 0x30b1c9, - 0x38e148, - 0x210785, - 0x27bfc4, - 0x296ecb, - 0x350005, - 0x226786, - 0x27ca86, - 0x25f1c6, - 0x38c5cb, - 0x28d4c9, - 0x3b0245, - 0x388707, - 0x2cf786, - 0x231346, - 0x27bc48, - 0x2d97c9, - 0x26b98c, - 0x2e6c08, - 0x350246, - 0x345d03, - 0x334606, - 0x27d305, - 0x278008, - 0x227d86, - 0x2a32c8, - 0x245885, - 0x294505, - 0x2a1d48, - 0x301687, - 0x381d07, - 0x27f8c7, - 0x2a03c8, - 0x30bd48, - 0x262286, - 0x309087, - 0x24af47, - 0x27d1ca, - 0x24c243, - 0x202606, - 0x203545, - 0x231504, - 0x275cc9, - 0x2f0404, - 0x21e084, - 0x2989c4, - 0x29b2cb, - 0x2df4c7, - 0x29da45, - 0x2913c8, - 0x273686, - 0x273688, - 0x277486, - 0x287d45, - 0x288685, - 0x28a0c6, - 0x28b548, - 0x28b808, - 0x275e86, - 0x29120f, - 0x2946d0, - 0x358a45, - 0x200a83, - 0x24a985, - 0x2fd0c8, - 0x28f049, - 0x3ae808, - 0x34a708, - 0x330608, - 0x2df587, - 0x273349, - 0x2a34c8, - 0x2785c4, - 0x298848, - 0x254509, - 0x30aac7, - 0x296144, - 0x334908, - 0x23fa8a, - 0x2c2446, - 0x29c1c6, - 0x21b4c9, - 0x298387, - 0x2c2f88, - 0x332348, - 0x349a08, - 0x353885, - 0x37f385, - 0x23fec5, - 0x2d1785, - 0x371dc7, - 0x205b85, - 0x2b8745, - 0x36fd86, - 0x3ae747, - 0x37fd07, - 0x23fd86, - 0x2ca285, - 0x226786, - 0x25a685, - 0x2c06c8, - 0x31b5c4, - 0x3ac8c6, - 0x358844, - 0x301948, - 0x22534a, - 0x27670c, - 0x3658c5, - 0x2bac06, - 0x26bb46, - 0x323946, - 0x2fd2c4, - 0x27f545, - 0x2772c7, - 0x298409, - 0x2a4547, - 0x683c04, - 0x683c04, - 0x30af45, - 0x20f5c4, - 0x2966ca, - 0x273506, - 0x2e7044, - 0x3a3745, - 0x2eee85, - 0x300244, - 0x27c6c7, - 0x20e547, - 0x2c25c8, - 0x319188, - 0x210789, - 0x2994c8, - 0x29688b, - 0x2128c4, - 0x35d745, - 0x279045, - 0x27f849, - 0x2d97c9, - 0x2ec988, - 0x327ac8, - 0x203b84, - 0x298b85, - 0x203443, - 0x212885, - 0x2d3dc6, - 0x291d8c, - 0x2189c6, - 0x25a6c6, - 0x287b05, - 0x2eb348, - 0x3a6bc6, - 0x2eea46, - 0x29c1c6, - 0x21f40c, - 0x24b884, - 0x333eca, - 0x287a48, - 0x291bc7, - 0x316046, - 0x36ea47, - 0x2e12c5, - 0x306e46, - 0x352386, - 0x381bc7, - 0x21e0c4, - 0x202d85, - 0x273004, - 0x2af487, - 0x273248, - 0x2742ca, - 0x27a087, - 0x23ae47, - 0x30bb87, - 0x2cff49, - 0x291d8a, - 0x229043, - 0x21dfc5, - 0x215103, - 0x39dfc9, - 0x24b308, - 0x2d1bc7, - 0x3ae909, - 0x218a46, - 0x2c6b08, - 0x39a745, - 0x282c8a, - 0x216249, - 0x26b649, - 0x37af07, - 0x239849, - 0x214fc8, - 0x2edb06, - 0x2badc8, - 0x2119c7, - 0x229187, - 0x37b2c7, - 0x2d1a48, - 0x2ed5c6, - 0x23f845, - 0x2772c7, - 0x2925c8, - 0x3587c4, - 0x2a58c4, - 0x28c587, - 0x2acd07, - 0x2f1e4a, - 0x2eda86, - 0x2f984a, - 0x2b7a47, - 0x328a87, - 0x23b384, - 0x32d484, - 0x2272c6, - 0x30ed84, - 0x30ed8c, - 0x3a2005, - 0x215f09, - 0x2d6404, - 0x300305, - 0x273848, - 0x28b8c5, - 0x305546, - 0x207c84, - 0x29044a, - 0x2b14c6, - 0x29228a, - 0x20ab87, - 0x265b45, - 0x21eec5, - 0x2288ca, - 0x2a1a85, - 0x29c0c6, - 0x20bdc4, - 0x2b0146, - 0x326285, - 0x227e46, - 0x3a4acc, - 0x2cba4a, - 0x264fc4, - 0x2293c6, - 0x298387, - 0x2c8744, - 0x259348, - 0x38d3c6, - 0x29d689, - 0x2c4b89, - 0x312b09, - 0x376286, - 0x211ac6, - 0x2baf07, - 0x3082c8, - 0x2118c9, - 0x2df4c7, - 0x2b3ac6, - 0x2adb07, - 0x29cd85, - 0x328cc4, - 0x2baac7, - 0x24b105, - 0x2846c5, - 0x2fe0c7, - 0x248148, - 0x39e8c6, - 0x29344d, - 0x294f8f, - 0x29a58d, - 0x21b3c4, - 0x2357c6, - 0x2cbe08, - 0x3100c5, - 0x27d388, - 0x207d8a, - 0x2039c4, - 0x330206, - 0x27e487, - 0x33fe07, - 0x298d89, - 0x2bad85, - 0x300244, - 0x32888a, - 0x2b5689, - 0x239947, - 0x268206, - 0x350146, - 0x298ac6, - 0x35ec46, - 0x2cb70f, - 0x2cbcc9, - 0x217d86, - 0x239646, - 0x29ed49, - 0x309187, - 0x2101c3, - 0x21f586, - 0x2042c3, - 0x2e3848, - 0x2ad947, - 0x29c7c9, - 0x2a2a88, - 0x381e48, - 0x216e86, - 0x331209, - 0x33b905, - 0x2a33c4, - 0x2d90c7, - 0x269585, - 0x21b3c4, - 0x29db08, - 0x21a4c4, - 0x302b87, - 0x31c3c6, - 0x2ad005, - 0x2974c8, - 0x35000b, - 0x32bb87, - 0x228b46, - 0x2bd544, - 0x209c06, - 0x267105, - 0x24b105, - 0x278bc9, - 0x27c2c9, - 0x2291c4, - 0x229205, - 0x229405, - 0x282b06, - 0x32b788, - 0x2b7186, - 0x273e0b, - 0x2ffa0a, - 0x2fad05, - 0x288706, - 0x2f59c5, - 0x3b2585, - 0x297b47, - 0x204308, - 0x238504, - 0x2614c6, - 0x28b886, - 0x215187, - 0x2fee44, - 0x277e86, - 0x239d85, - 0x239d89, - 0x211cc4, - 0x2aa109, - 0x275e86, - 0x2b9408, - 0x229405, - 0x2c7785, - 0x227e46, - 0x26b889, - 0x218189, - 0x25a746, - 0x28ad88, - 0x257608, - 0x2f5984, - 0x32e244, - 0x32e248, - 0x317a08, - 0x238609, - 0x2d3d46, - 0x29c1c6, - 0x3151cd, - 0x2c7f86, - 0x2d8949, - 0x254785, - 0x30e446, - 0x251008, - 0x311d45, - 0x24af84, - 0x267105, - 0x27b948, - 0x296489, - 0x2730c4, - 0x306286, - 0x2e74ca, - 0x2c54c8, - 0x2f1fc9, - 0x2d114a, - 0x3ae886, - 0x295148, - 0x368545, - 0x30f908, - 0x2b4485, - 0x218909, - 0x36c449, - 0x228e82, - 0x29a005, - 0x26c7c6, - 0x275dc7, - 0x3aacc5, - 0x2f8a86, - 0x2f7e08, - 0x2a1f86, - 0x2c3bc9, - 0x274dc6, - 0x27bac8, - 0x2a90c5, - 0x244046, - 0x367808, - 0x27ab48, - 0x3b0608, - 0x2fc788, - 0x399a84, - 0x22d8c3, - 0x2c3e04, - 0x22e106, - 0x29cdc4, - 0x2d3b07, - 0x2ee949, - 0x2bcd45, - 0x332346, - 0x21f586, - 0x28e68b, - 0x2fac46, - 0x318546, - 0x3ac9c8, - 0x379bc6, - 0x265943, - 0x396f83, - 0x328cc4, - 0x231845, - 0x243147, - 0x273248, - 0x27324f, - 0x2771cb, - 0x32b588, - 0x306306, - 0x32b88e, - 0x227e43, - 0x2430c4, - 0x2fabc5, - 0x33db46, - 0x28968b, - 0x28da06, - 0x213f09, - 0x2ad005, - 0x389d88, - 0x206408, - 0x21804c, - 0x29b306, - 0x2128c6, - 0x2d7985, - 0x285188, - 0x276705, - 0x33c7c8, - 0x29a9ca, - 0x226809, - 0x683c04, - 0x31216582, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x323ac3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x238483, - 0x2264c3, - 0x224103, - 0x224104, - 0x22d183, - 0x2374c4, - 0x2343c3, - 0x22d684, - 0x21eb03, - 0x3aaf87, - 0x211003, - 0x2025c3, - 0x32d208, - 0x2264c3, - 0x2aeecb, - 0x2e1a03, - 0x241f86, - 0x203e42, - 0x38660b, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x2264c3, - 0x280ec3, - 0x200cc3, - 0x200882, - 0x880c8, - 0x281045, - 0x2db108, - 0x2e7e08, - 0x216582, - 0x2a0f05, - 0x340ec7, - 0x200202, - 0x2417c7, - 0x201f82, - 0x23a887, - 0x36b2c9, - 0x318908, - 0x349889, - 0x32ed82, - 0x266707, - 0x25a4c4, - 0x340f87, - 0x2ff907, - 0x233e42, - 0x211003, - 0x20e842, - 0x205902, - 0x201502, - 0x206d42, - 0x208782, - 0x217642, - 0x2a8c45, - 0x2e3cc5, - 0x16582, - 0x343c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x12003, - 0x481, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x202243, - 0x238483, - 0x2264c3, - 0x21ca03, - 0x340f2d86, - 0x107003, - 0x79ac5, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x9502, - 0x880c8, - 0x441c4, - 0xd0205, - 0x200882, - 0x2ba384, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x35bb03, - 0x2a9c05, - 0x202243, - 0x332683, - 0x238483, - 0x201f43, - 0x2264c3, - 0x217643, - 0x224183, - 0x223ec3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x216582, - 0x2264c3, - 0x880c8, - 0x21eb03, - 0x880c8, - 0x316403, - 0x22d183, - 0x232144, - 0x2343c3, - 0x21eb03, - 0x2082c2, - 0x211003, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x2082c2, - 0x2348c3, - 0x238483, - 0x2264c3, - 0x2db083, - 0x217643, - 0x200882, - 0x216582, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x241f85, - 0x1835c6, - 0x224104, - 0x203e42, - 0x880c8, - 0x200882, - 0x20448, - 0x216582, - 0xee46, - 0x167404, - 0x10f2cb, - 0x173606, - 0x131ac7, - 0x2343c3, - 0x21eb03, - 0x157f45, - 0x155dc4, - 0x202c43, - 0x4c207, - 0xcd884, - 0x238483, - 0x133184, - 0x2264c3, - 0x2e26c4, - 0x149708, - 0x155646, - 0x216582, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x2025c3, - 0x2264c3, - 0x2e1a03, - 0x203e42, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201603, - 0x212444, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x22d684, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x241f86, - 0x2343c3, - 0x21eb03, - 0x179ac3, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x131ac7, - 0x880c8, - 0x21eb03, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x3aa2d183, - 0x2343c3, - 0x238483, - 0x2264c3, - 0x880c8, - 0x200882, - 0x216582, - 0x22d183, - 0x21eb03, - 0x238483, - 0x201502, - 0x2264c3, - 0x309dc7, - 0x20b28b, - 0x200b03, - 0x2a06c8, - 0x308047, - 0x2017c6, - 0x2bba05, - 0x2f7989, - 0x20bc48, - 0x20bc49, - 0x20bc50, - 0x359fcb, - 0x2ea589, - 0x20c783, - 0x221749, - 0x232c46, - 0x232c4c, - 0x20be48, - 0x3ac688, - 0x26e089, - 0x29bace, - 0x37cc4b, - 0x38db4c, - 0x204803, - 0x2582cc, - 0x207209, - 0x2de107, - 0x23430c, - 0x39b60a, - 0x245dc4, - 0x3b08cd, - 0x258188, - 0x2ded8d, - 0x266b86, - 0x28a70b, - 0x209dc9, - 0x318407, - 0x31d846, - 0x320f49, - 0x332a4a, - 0x302708, - 0x2e1604, - 0x272187, - 0x226a87, - 0x349d84, - 0x20f244, - 0x27e989, - 0x326ec9, - 0x20a588, - 0x2114c5, - 0x392785, - 0x20d3c6, - 0x3b0789, - 0x20800d, - 0x38d5c8, - 0x20d2c7, - 0x2bba88, - 0x22eec6, - 0x3a1504, - 0x37f645, - 0x2055c6, - 0x206104, - 0x207107, - 0x20914a, - 0x2139c4, - 0x21a0c6, - 0x21aa49, - 0x21aa4f, - 0x21b00d, - 0x21b786, - 0x220050, - 0x220446, - 0x220b87, - 0x221087, - 0x22108f, - 0x222309, - 0x227746, - 0x229747, - 0x229748, - 0x229b09, - 0x28d708, - 0x2d7d07, - 0x20cd03, - 0x3852c6, - 0x204008, - 0x29bd8a, - 0x215749, - 0x20bd83, - 0x340dc6, - 0x26130a, - 0x2ef8c7, - 0x2ddf4a, - 0x377e0e, - 0x222446, - 0x29a207, - 0x214d86, - 0x2072c6, - 0x37f18b, - 0x21d18a, - 0x21768d, - 0x211b87, - 0x310288, - 0x310289, - 0x31028f, - 0x3b218c, - 0x278289, - 0x33948e, - 0x3ab08a, - 0x368dc6, - 0x37bf86, - 0x30420c, - 0x31370c, - 0x327688, - 0x33f8c7, - 0x2131c5, - 0x29e584, - 0x24fb0e, - 0x332cc4, - 0x238a87, - 0x26274a, - 0x382554, - 0x3839cf, - 0x221248, - 0x385188, - 0x370e8d, - 0x370e8e, - 0x38fec9, - 0x22fe88, - 0x22fe8f, - 0x23400c, - 0x23400f, - 0x235507, - 0x237bca, - 0x21f18b, - 0x23a0c8, - 0x23bb47, - 0x25b08d, - 0x252506, - 0x3b0a86, - 0x23e5c9, - 0x215d48, - 0x242188, - 0x24218e, + 0x2b1506, + 0x2aaccc, + 0x2f55c8, + 0x2d0d88, + 0x2ba286, + 0x20de87, + 0x242f84, + 0x25b448, + 0x28f50c, + 0x353d0c, + 0x233cc5, + 0x2d2887, + 0x31d086, + 0x20de06, + 0x349ac8, + 0x2027c4, + 0x22d04b, + 0x27e8cb, + 0x249d06, + 0x2cf5c7, + 0x31a2c5, + 0x276545, + 0x22d186, + 0x25eb85, + 0x3512c5, + 0x2cd5c7, + 0x27d1c9, + 0x351904, + 0x34ee05, + 0x2e6fc5, + 0x2dea88, + 0x2287c5, + 0x2bca49, + 0x37aac7, + 0x37aacb, + 0x244806, + 0x2d4409, + 0x349208, + 0x27c385, + 0x238388, + 0x232ec8, + 0x23a6c7, + 0x2e2f87, + 0x2869c9, + 0x235c87, + 0x289149, + 0x2acf8c, + 0x2b0588, + 0x2b6189, + 0x321f87, + 0x286389, + 0x35bf07, + 0x3299c8, + 0x3a8285, + 0x22cac6, + 0x2c4a48, + 0x2f0fc8, + 0x285549, + 0x351307, + 0x276605, + 0x36b6c9, + 0x2b9ec6, + 0x2323c4, + 0x2323c6, + 0x24e8c8, + 0x252847, + 0x2d6208, + 0x2d1f89, + 0x3a1e07, + 0x2a29c6, + 0x202cc4, + 0x203f49, + 0x20f5c8, + 0x2ba147, + 0x343e06, + 0x20e1c6, + 0x3a8484, + 0x247f86, + 0x201b83, + 0x296789, + 0x202346, + 0x2d2205, + 0x2a36c6, + 0x24f305, + 0x285008, + 0x247187, + 0x244b46, + 0x39c6c6, + 0x309148, + 0x2a5447, + 0x29f9c5, + 0x2a09c8, + 0x3ada88, + 0x334148, + 0x2d4845, + 0x22cb46, + 0x24d609, + 0x2ac044, + 0x24f18b, + 0x21f40b, + 0x2463c9, + 0x20ad83, + 0x25bf05, + 0x213a86, + 0x313788, + 0x249844, + 0x351c06, + 0x2d6749, + 0x2bc545, + 0x2cd506, + 0x363206, + 0x2163c4, + 0x2aec0a, + 0x2d2148, + 0x2f0fc6, + 0x2c2585, + 0x3b1987, + 0x231147, + 0x304284, + 0x21f647, + 0x2165c4, + 0x2165c6, + 0x203c83, + 0x270f45, + 0x350e85, + 0x205788, + 0x254ac5, + 0x27f789, + 0x25b287, + 0x25b28b, + 0x2a758c, + 0x2a810a, + 0x3363c7, + 0x204083, + 0x212188, + 0x2d4a05, + 0x29d585, + 0x20ae44, + 0x3298c6, + 0x227b86, + 0x247fc7, + 0x2349cb, + 0x22d9c4, + 0x2e8f04, + 0x219e04, + 0x2cd786, + 0x222704, + 0x22c948, + 0x33c5c5, + 0x244d85, + 0x354d07, + 0x202d09, + 0x347845, + 0x37584a, + 0x277789, + 0x29810a, + 0x3a1249, + 0x335fc4, + 0x2c9605, + 0x2b9b48, + 0x2d18cb, + 0x278285, + 0x2f0086, + 0x2200c4, + 0x27fcc6, + 0x3a1c89, + 0x364b07, + 0x317988, + 0x387e46, + 0x387247, + 0x285b88, + 0x380946, + 0x37f0c4, + 0x363f87, + 0x366085, + 0x377547, + 0x25b4c4, + 0x2008c6, + 0x2f1e08, + 0x29cc88, + 0x2e88c7, + 0x27d548, + 0x29ae05, + 0x20abc4, + 0x361c88, + 0x27d644, + 0x2186c5, + 0x2fac44, + 0x3675c7, + 0x28d207, + 0x2864c8, + 0x2cbfc6, + 0x254a45, + 0x27f588, + 0x2e15c8, + 0x2a4c09, + 0x21f706, + 0x22e708, + 0x39bd0a, + 0x34f148, + 0x2e8605, + 0x2e2206, + 0x277648, + 0x2e2b4a, 0x20b387, - 0x25f8c5, - 0x243a85, - 0x202084, - 0x201a86, - 0x20a488, - 0x24f103, - 0x3b154e, + 0x28a645, + 0x298888, + 0x2b3c44, + 0x272206, + 0x2c5888, + 0x20ac46, + 0x239a88, + 0x29bfc7, + 0x209806, + 0x2ba844, + 0x28ba07, + 0x2b6804, + 0x3a1c47, + 0x23bf0d, + 0x21b585, + 0x2d144b, + 0x2a1d06, + 0x255788, + 0x24b344, + 0x27bc86, + 0x2817c6, + 0x304047, + 0x29c30d, + 0x226dc7, + 0x2b6d48, + 0x271a05, + 0x27f048, + 0x2c7ec6, + 0x29ae88, + 0x223a06, + 0x26a9c7, + 0x336689, + 0x33d2c7, + 0x28c3c8, + 0x279685, + 0x21c848, + 0x20dd45, + 0x396005, + 0x3a14c5, + 0x221443, + 0x235984, + 0x26fd05, + 0x2346c9, + 0x285f86, + 0x2eab08, + 0x2e2d45, + 0x2b8847, + 0x2aee8a, + 0x2cd449, + 0x2c98ca, + 0x2d6ec8, + 0x2272cc, + 0x28724d, + 0x2ff683, + 0x239988, + 0x207a85, + 0x224cc6, + 0x319346, + 0x2e7f05, + 0x3b3a09, + 0x358f45, + 0x27f588, + 0x2841c6, + 0x348806, + 0x2a63c9, + 0x38f247, + 0x294146, + 0x2aee08, + 0x216548, + 0x2e0dc7, + 0x235ece, + 0x2c8105, + 0x358545, + 0x20ab48, + 0x27f3c7, + 0x20e202, + 0x2c3584, + 0x203bca, + 0x2ba208, + 0x367b46, + 0x29e608, + 0x278686, + 0x31a7c8, + 0x2b29c8, + 0x395fc4, + 0x2b8d85, + 0x68a8c4, + 0x68a8c4, + 0x68a8c4, + 0x202403, + 0x20e046, + 0x27fa46, + 0x2a220c, + 0x209843, + 0x285686, + 0x215344, + 0x264b08, + 0x2d6585, + 0x203cc6, + 0x2bedc8, + 0x2d8206, + 0x244ac6, + 0x381148, + 0x2a7907, + 0x235a49, + 0x2d4bca, + 0x208a84, + 0x216605, + 0x2a70c5, + 0x264886, + 0x208d46, + 0x2a2dc6, + 0x2f9ec6, + 0x235b84, + 0x235b8b, + 0x231144, + 0x2a23c5, + 0x2b19c5, + 0x203086, + 0x3b5548, + 0x287107, + 0x317744, + 0x2453c3, + 0x2b3745, + 0x30a847, + 0x28700b, + 0x205687, + 0x2becc8, + 0x2e8b47, + 0x231646, + 0x24d1c8, + 0x2e318b, + 0x2067c6, + 0x213bc9, + 0x2e3305, + 0x30d803, + 0x2cd506, + 0x29bec8, + 0x214cc3, + 0x200a03, + 0x285b86, + 0x278686, + 0x375dca, + 0x280e85, + 0x28160b, + 0x2a360b, + 0x245103, + 0x202043, + 0x2b4f84, + 0x278447, + 0x247444, + 0x202ec4, + 0x2e8c84, + 0x34f448, + 0x2c24c8, + 0x3b2049, + 0x38e748, + 0x200c07, + 0x235ec6, + 0x2ea74f, + 0x2c8246, + 0x2d6504, + 0x2c230a, + 0x30a747, + 0x208386, + 0x292e89, + 0x3b1fc5, + 0x2058c5, + 0x3b2106, + 0x21c983, + 0x2b3c89, + 0x219c86, + 0x212009, + 0x38b106, + 0x270f45, + 0x2340c5, + 0x205543, + 0x278588, + 0x211607, + 0x3028c4, + 0x264988, + 0x2313c4, + 0x338d86, + 0x29f386, + 0x2419c6, + 0x2cb849, + 0x29d505, + 0x38ec86, + 0x2a2fc9, + 0x2c7606, + 0x2ea846, + 0x386e86, + 0x200b45, + 0x2fac46, + 0x26a9c4, + 0x3a8285, + 0x2c4a44, + 0x2b7846, + 0x35aec4, + 0x20f843, + 0x28a145, + 0x232bc8, + 0x2e9687, + 0x2bd949, + 0x28a548, + 0x29dc51, + 0x36328a, + 0x249c47, + 0x2ea1c6, + 0x215344, + 0x2c4b48, + 0x282f48, + 0x29de0a, + 0x2bc80d, + 0x2a4dc6, + 0x381246, + 0x28bac6, + 0x2c3fc7, + 0x2b6e05, + 0x262c07, + 0x264a45, + 0x37ac04, + 0x2ad586, + 0x216287, + 0x2b398d, + 0x277587, + 0x21a0c8, + 0x27f889, + 0x2e2106, + 0x344085, + 0x226704, + 0x24e9c6, + 0x304186, + 0x2ba386, + 0x29ee88, + 0x2179c3, + 0x203043, + 0x3598c5, + 0x2300c6, + 0x2b2985, + 0x388048, + 0x2a180a, + 0x2cee04, + 0x264b08, + 0x29bd48, + 0x202ec7, + 0x2e2e09, + 0x2be9c8, + 0x28b387, + 0x2936c6, + 0x20ac4a, + 0x24ea48, + 0x396449, + 0x299188, + 0x21cec9, + 0x2ea087, + 0x2effc5, + 0x3ac4c6, + 0x2b1388, + 0x285d08, + 0x2a1048, + 0x249e08, + 0x2a23c5, + 0x20f444, + 0x211308, + 0x208484, + 0x3a1044, + 0x270f45, + 0x297a87, + 0x202ac9, + 0x303e47, + 0x215f45, + 0x27cdc6, + 0x34ebc6, + 0x203d44, + 0x2a6706, + 0x254884, + 0x27ef46, + 0x202886, + 0x214b06, + 0x200045, + 0x387f07, + 0x204083, + 0x206b49, + 0x308f48, + 0x264984, + 0x28b20d, + 0x29cd88, + 0x3053c8, + 0x3963c6, + 0x336789, + 0x2cd449, + 0x3a1985, + 0x2a190a, + 0x2adb4a, + 0x2af7cc, + 0x2af946, + 0x27d9c6, + 0x2c83c6, + 0x273209, + 0x224f06, + 0x262c46, + 0x359006, 0x25b448, - 0x29ff0b, - 0x366947, - 0x3a31c5, - 0x239506, - 0x2aa947, - 0x39a248, - 0x27efc9, - 0x28f685, - 0x283688, - 0x213446, - 0x38b30a, - 0x24fa09, - 0x2343c9, - 0x2343cb, - 0x364b88, - 0x349c49, - 0x211586, - 0x2b074a, - 0x35904a, - 0x237dcc, - 0x367287, - 0x2a998a, - 0x27258b, - 0x272599, - 0x2da488, - 0x242005, - 0x25b246, - 0x2ed389, - 0x318e06, - 0x21250a, - 0x2f31c6, - 0x212104, - 0x2bf38d, - 0x3412c7, - 0x212109, - 0x244d45, - 0x244e88, - 0x245389, - 0x2455c4, - 0x245cc7, - 0x245cc8, - 0x246347, - 0x263e88, - 0x24c807, - 0x36f505, - 0x2567cc, - 0x256e89, - 0x2d9a8a, - 0x38ec09, - 0x221849, - 0x26b00c, - 0x259b8b, - 0x259e48, - 0x25bac8, - 0x25ee84, - 0x27d9c8, - 0x282309, - 0x39b6c7, - 0x21ac86, - 0x399907, - 0x325e89, - 0x366ecb, - 0x324907, - 0x3714c7, - 0x20acc7, - 0x2ded04, - 0x2ded05, - 0x2a81c5, - 0x337ecb, - 0x3981c4, - 0x319fc8, - 0x25f4ca, - 0x213507, - 0x346547, - 0x288f12, - 0x283e06, - 0x231ac6, - 0x322fce, - 0x361506, - 0x28edc8, - 0x28ff4f, - 0x2df148, - 0x284c08, - 0x35f54a, - 0x35f551, - 0x2a274e, - 0x23be4a, - 0x23be4c, - 0x230087, - 0x230090, - 0x204ac8, - 0x2a2945, - 0x2aad0a, - 0x20614c, - 0x2b3e0d, - 0x2ac3c6, - 0x2ac3c7, - 0x2ac3cc, - 0x2efc8c, - 0x2da98c, - 0x28c98b, - 0x283044, - 0x21b644, - 0x3741c9, - 0x2d72c7, - 0x2e94c9, - 0x358e89, - 0x36bb47, - 0x39b486, - 0x39b489, - 0x3a4a43, - 0x2a208a, - 0x29e7c7, - 0x30b6cb, - 0x21750a, - 0x23a9c4, - 0x353e86, - 0x27a309, - 0x30ec04, - 0x3a20ca, - 0x228e45, - 0x2b6485, - 0x2b648d, - 0x2b67ce, - 0x39f105, - 0x3167c6, - 0x241b87, - 0x26748a, - 0x39a446, - 0x35a2c4, - 0x35e2c7, - 0x210ecb, - 0x22ef87, - 0x202104, - 0x265d46, - 0x265d4d, - 0x325b4c, - 0x32fd86, - 0x38d7ca, - 0x225806, - 0x210288, - 0x263507, - 0x23660a, - 0x23c3c6, - 0x211a83, - 0x251186, - 0x203e88, - 0x296a8a, - 0x24aa47, - 0x24aa48, - 0x267b84, - 0x27b2c7, - 0x2c4108, - 0x2a3648, - 0x286808, - 0x27fa0a, - 0x2cf145, - 0x2cf3c7, - 0x23bc93, - 0x22d206, - 0x2b1648, - 0x224709, - 0x241688, - 0x216f0b, - 0x2b7848, - 0x211004, - 0x2a1e46, - 0x3b3106, - 0x2d9509, - 0x385c07, - 0x2568c8, - 0x287bc6, - 0x3a17c4, - 0x2c2e45, - 0x2bdc88, - 0x2be28a, - 0x2bf008, - 0x2c3906, - 0x29718a, - 0x233588, - 0x2c8548, - 0x2c9908, - 0x2c9f46, - 0x2cc006, - 0x31098c, - 0x2cc5d0, - 0x286fc5, - 0x2def48, - 0x2f8310, - 0x2def50, - 0x20bace, - 0x31060e, - 0x310614, - 0x31d9cf, - 0x31dd86, - 0x342211, - 0x349e53, - 0x34a2c8, - 0x27fd45, - 0x358288, - 0x20f985, - 0x22b24c, - 0x24bf89, - 0x2388c9, - 0x399687, - 0x240589, - 0x215a47, - 0x2b9d86, - 0x37f447, - 0x20c185, - 0x212043, - 0x24f2c9, - 0x217a49, - 0x379ac3, - 0x3aabc4, - 0x34ae4d, - 0x3558cf, - 0x2fe005, - 0x31aa06, - 0x20cfc7, - 0x21d5c7, - 0x285d86, - 0x285d8b, - 0x2a3bc5, - 0x258ac6, - 0x208487, - 0x26d109, - 0x2de6c6, - 0x364505, - 0x21c08b, - 0x22e946, - 0x246045, - 0x27e148, - 0x2d8748, - 0x2ca44c, - 0x2ca450, - 0x2ce7c9, - 0x2d5587, - 0x2f6c4b, - 0x2d5d46, - 0x2d7bca, - 0x2d928b, - 0x2d9d0a, - 0x2d9f86, - 0x2daf45, - 0x307f46, - 0x274f88, - 0x39974a, - 0x370b1c, - 0x2e1acc, - 0x2e1dc8, - 0x241f85, - 0x2e42c7, - 0x29b706, - 0x273c45, - 0x21dd86, - 0x285f48, - 0x2b5907, - 0x29b9c8, - 0x29a30a, - 0x3212cc, - 0x321549, - 0x224a87, - 0x282844, - 0x244386, - 0x28478a, - 0x358f85, - 0x3637cc, - 0x364f08, - 0x360d08, - 0x3b188c, - 0x20c8cc, - 0x20da49, - 0x20dc87, - 0x22d34c, - 0x29d284, - 0x2e83ca, - 0x2ad2cc, - 0x26eb4b, - 0x39070b, - 0x3a5946, - 0x23c107, - 0x2302c7, - 0x2302cf, - 0x2f0c11, - 0x3b3a12, - 0x23c90d, - 0x23c90e, - 0x23cc4e, - 0x31db88, - 0x31db92, - 0x23ea88, - 0x201407, - 0x248bca, - 0x20d888, - 0x3614c5, - 0x371c0a, - 0x220987, - 0x2da0c4, - 0x202b43, - 0x311185, - 0x35f7c7, - 0x39fe87, - 0x2b400e, - 0x3366cd, - 0x338149, - 0x20dfc5, - 0x35d103, - 0x24ea46, - 0x36fb85, - 0x271a48, - 0x2b2449, - 0x25b285, - 0x25b28f, - 0x2dad87, - 0x2f78c5, - 0x306b0a, - 0x27fc06, - 0x23dd09, - 0x2ea18c, - 0x2ebe09, - 0x378a86, - 0x25f2cc, - 0x2ec206, - 0x2ef3c8, - 0x2ef5c6, - 0x2da606, - 0x280604, - 0x25a1c3, - 0x35760a, - 0x369111, - 0x38c04a, - 0x3627c5, - 0x2a64c7, - 0x253647, - 0x2c4204, - 0x2c420b, - 0x318788, - 0x2b3006, - 0x2c75c5, - 0x38b604, - 0x262b49, - 0x29f2c4, - 0x21da87, - 0x322045, - 0x322047, - 0x323205, - 0x2a8d03, - 0x2012c8, - 0x27f30a, - 0x23fd43, - 0x28108a, - 0x26db46, - 0x25b00f, - 0x356849, - 0x3b14d0, - 0x2e22c8, - 0x2c45c9, - 0x293287, - 0x265ccf, - 0x3aecc4, - 0x22d704, - 0x219f46, - 0x222b86, - 0x3a5dca, - 0x3903c6, - 0x33eb87, - 0x2f6fc8, - 0x2f71c7, - 0x2f7bc7, - 0x34b94a, - 0x2fa14b, - 0x38e7c5, - 0x3b3648, - 0x238b83, - 0x261d0c, - 0x212f4f, - 0x2594cd, - 0x2bb187, - 0x338289, - 0x22f4c7, - 0x25a288, - 0x38274c, - 0x2a6c48, - 0x252cc8, - 0x30c28e, - 0x31f814, - 0x31fd24, - 0x33f28a, - 0x35a54b, - 0x215b04, - 0x215b09, - 0x330288, - 0x244545, - 0x24ec0a, - 0x36b187, - 0x307e44, - 0x323ac3, - 0x22d183, - 0x2374c4, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x202243, - 0x211003, - 0x2cc5c6, - 0x212444, - 0x238483, - 0x2264c3, - 0x21bd03, - 0x200882, - 0x323ac3, - 0x216582, - 0x22d183, - 0x2374c4, - 0x2343c3, - 0x21eb03, - 0x202243, - 0x2cc5c6, - 0x238483, - 0x2264c3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x238483, - 0x2264c3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x212444, - 0x238483, - 0x2264c3, - 0x200882, - 0x2f5003, - 0x216582, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x202ec2, - 0x200482, - 0x216582, - 0x22d183, - 0x22b782, - 0x200a82, - 0x201604, - 0x307b04, - 0x219382, - 0x212444, - 0x201502, - 0x2264c3, + 0x27d546, + 0x2d36cb, + 0x297c05, + 0x244d85, + 0x27dc45, + 0x366f46, + 0x20ac03, + 0x241946, + 0x277507, + 0x2c4a05, + 0x24c705, + 0x3645c5, + 0x327346, + 0x31da84, + 0x31da86, + 0x3add49, + 0x366dcc, + 0x37a948, + 0x33f004, + 0x2fa886, + 0x2a1e06, + 0x29bec8, + 0x218808, + 0x366cc9, + 0x3b1987, + 0x245989, + 0x254106, + 0x22c2c4, + 0x20bf04, + 0x286cc4, + 0x285b88, + 0x20290a, + 0x3477c6, + 0x352107, + 0x36f007, + 0x2d4505, + 0x2a7084, + 0x2913c6, + 0x2b6e46, + 0x202803, + 0x308d87, + 0x368b48, + 0x3a1aca, + 0x2ce348, + 0x2cec88, + 0x35af05, + 0x253245, + 0x26df85, + 0x2d48c6, + 0x33d4c6, + 0x35bd05, + 0x2969c9, + 0x2a6e8c, + 0x26e047, + 0x29de88, + 0x381a45, + 0x68a8c4, + 0x24df84, + 0x25d784, + 0x214486, + 0x2a450e, + 0x205947, + 0x2c41c5, + 0x2abfcc, + 0x231287, + 0x216207, + 0x218089, + 0x218f89, + 0x28a645, + 0x308f48, + 0x24d609, + 0x334005, + 0x2c4948, + 0x322906, + 0x361f06, + 0x2e4444, + 0x2ae848, + 0x251f43, + 0x303084, + 0x2b37c5, + 0x3ac0c7, + 0x210445, + 0x39bbc9, + 0x28aa8d, + 0x299886, + 0x245404, + 0x2937c8, + 0x27d00a, + 0x224107, + 0x23be45, + 0x203143, + 0x2a37ce, + 0x27868c, + 0x2faa87, + 0x2a46c7, + 0x203c03, + 0x224f45, + 0x25d785, + 0x29e9c8, + 0x29bb89, + 0x33ef06, + 0x247444, + 0x249b86, + 0x21e3cb, + 0x2cd1cc, + 0x221507, + 0x2d5c45, + 0x3ad988, + 0x2e0b85, + 0x2c2307, + 0x3630c7, + 0x251f45, + 0x20ac03, + 0x39a644, + 0x212905, + 0x351805, + 0x351806, + 0x34fd08, + 0x216287, + 0x319646, + 0x35c106, + 0x3a1406, + 0x2d5e89, + 0x20f847, + 0x26a5c6, + 0x2cd346, + 0x252006, + 0x2aeb45, + 0x219646, + 0x3768c5, + 0x228848, + 0x2973cb, + 0x291086, + 0x36f044, + 0x2e2909, + 0x25b284, + 0x322888, + 0x2324c7, + 0x2885c4, + 0x2be288, + 0x2c5304, + 0x2aeb84, + 0x28b145, + 0x35e286, + 0x34f387, + 0x239b43, + 0x2a2a85, + 0x322e84, + 0x358586, + 0x3a1a08, + 0x368885, + 0x297089, + 0x3363c5, + 0x2e1e88, + 0x215207, + 0x388dc8, + 0x2bd787, + 0x362709, + 0x255a86, + 0x32b3c6, + 0x359004, + 0x293605, + 0x30150c, + 0x27dc47, + 0x27e207, + 0x36eec8, + 0x299886, + 0x277444, + 0x32e344, + 0x286849, + 0x2c84c6, + 0x240987, + 0x3a8bc4, + 0x286086, + 0x343905, + 0x2cbb07, + 0x2d3646, + 0x252d49, + 0x2aab07, + 0x26f8c7, + 0x2a6246, + 0x3879c5, + 0x283988, + 0x219b08, + 0x2646c6, + 0x3688c5, + 0x261b06, + 0x209983, + 0x29e849, + 0x2a2b4e, + 0x2bd488, + 0x2314c8, + 0x2644cb, + 0x2972c6, + 0x2089c4, + 0x244ac4, + 0x2a2c4a, + 0x216907, + 0x26a685, + 0x213bc9, + 0x2c3285, + 0x3a1087, + 0x2b4c04, + 0x284487, + 0x2e65c8, + 0x2cb206, + 0x21e789, + 0x2beaca, + 0x216886, + 0x29c8c6, + 0x2b1945, + 0x37ccc5, + 0x31a107, + 0x24dd08, + 0x343848, + 0x395fc6, + 0x234145, + 0x208ace, + 0x2b8bc4, + 0x264645, + 0x27c749, + 0x30dfc8, + 0x28f0c6, + 0x2a04cc, + 0x2a1410, + 0x2a414f, + 0x2a51c8, + 0x3363c7, + 0x200045, + 0x26fd05, + 0x34f209, + 0x298a89, + 0x23cbc6, + 0x278307, + 0x2d2805, + 0x21b509, + 0x340146, + 0x224d4d, + 0x286b89, + 0x202ec4, + 0x2bd208, + 0x2113c9, + 0x347986, + 0x27cec5, + 0x32b3c6, + 0x317849, + 0x26ba08, + 0x212485, + 0x2ae844, + 0x2a068b, + 0x347845, + 0x2a07c6, + 0x287586, + 0x26ed86, + 0x287fcb, + 0x297189, + 0x35c045, + 0x388b87, + 0x363206, + 0x220f06, + 0x25d508, + 0x35e389, + 0x219e8c, + 0x30a648, + 0x360406, + 0x340bc3, + 0x303c06, + 0x287e05, + 0x281948, + 0x233b46, + 0x2cbd48, + 0x275845, + 0x29dfc5, + 0x215348, + 0x31a947, + 0x319287, + 0x247fc7, + 0x220d88, + 0x336508, + 0x31e4c6, + 0x2b7687, + 0x247987, + 0x287cca, + 0x254003, + 0x366f46, + 0x202a45, + 0x203bc4, + 0x27f889, + 0x362684, + 0x2a7e44, + 0x2a1c84, + 0x2a46cb, + 0x211547, + 0x208d05, + 0x29ab08, + 0x27cdc6, + 0x27cdc8, + 0x280dc6, + 0x2900c5, + 0x290385, + 0x291f46, + 0x292b08, + 0x292dc8, + 0x27fa46, + 0x29a94f, + 0x29e310, + 0x3b1785, + 0x204083, + 0x22c385, + 0x30b188, + 0x298989, + 0x334148, + 0x2d5d08, + 0x224788, + 0x211607, + 0x27ca89, + 0x2cbf48, + 0x25bd44, + 0x2a1b08, + 0x2deb49, + 0x2b81c7, + 0x29f904, + 0x303f08, + 0x387cca, + 0x2ebe06, + 0x2a4dc6, + 0x21f5c9, + 0x2a1647, + 0x2ce1c8, + 0x30cc88, + 0x3a8a48, + 0x356245, + 0x37dc45, + 0x244d85, + 0x25d745, + 0x37e287, + 0x20ac05, + 0x2c4a05, + 0x2b5546, + 0x334087, + 0x2d1807, + 0x387fc6, + 0x2d7405, + 0x2a07c6, + 0x212245, + 0x2b84c8, + 0x2f1d84, + 0x2c7686, + 0x343744, + 0x2b69c8, + 0x2c778a, + 0x28004c, + 0x234bc5, + 0x2c4086, + 0x21a046, + 0x368706, + 0x30b384, + 0x343bc5, + 0x280c07, + 0x2a16c9, + 0x2cdc87, + 0x68a8c4, + 0x68a8c4, + 0x3175c5, + 0x32dd04, + 0x29fe8a, + 0x27cc46, + 0x24d404, + 0x208305, + 0x37a545, + 0x2b6d44, + 0x2871c7, + 0x36b847, + 0x2cd788, + 0x368ec8, + 0x212489, + 0x340248, + 0x2a004b, + 0x250b44, + 0x221005, + 0x2840c5, + 0x247f49, + 0x35e389, + 0x2e2808, + 0x232248, + 0x203084, + 0x2a1e45, + 0x202943, + 0x264845, + 0x38ed06, + 0x29b9cc, + 0x20f4c6, + 0x247246, + 0x28f345, + 0x3273c8, + 0x2bd606, + 0x2ea346, + 0x2a4dc6, + 0x2297cc, + 0x2ba544, + 0x3a154a, + 0x28f288, + 0x29b807, + 0x322d86, + 0x33efc7, + 0x2f2185, + 0x343e06, + 0x34af86, + 0x356707, + 0x2be7c4, + 0x3676c5, + 0x27c744, + 0x37ac87, + 0x27c988, + 0x27d84a, + 0x284a07, + 0x2d22c7, + 0x336347, + 0x2e0cc9, + 0x29b9ca, + 0x219e43, + 0x2e9645, + 0x200c83, + 0x2e8cc9, + 0x26ac48, + 0x38eb47, + 0x334249, + 0x219c06, + 0x2d4108, + 0x337f85, + 0x2e16ca, + 0x2d8c49, + 0x2abc49, + 0x3aea07, + 0x283049, + 0x214a08, + 0x3568c6, + 0x2c4248, + 0x217b07, + 0x235c87, + 0x277787, + 0x2d2688, + 0x2fa706, + 0x387a85, + 0x280c07, + 0x29c3c8, + 0x3436c4, + 0x30e3c4, + 0x294047, + 0x2b2d47, + 0x24d48a, + 0x356846, + 0x330f0a, + 0x2c34c7, + 0x2b8987, + 0x257e44, + 0x289204, + 0x2d3546, + 0x3b3d44, + 0x3b3d4c, + 0x203505, + 0x218649, + 0x2dfc44, + 0x2b6e05, + 0x27cf88, + 0x292e85, + 0x375846, + 0x217f84, + 0x3ae3ca, + 0x32b7c6, + 0x2a68ca, + 0x237f07, + 0x2d3385, + 0x21c985, + 0x2d454a, + 0x2a6805, + 0x2a4cc6, + 0x208484, + 0x2b5106, + 0x31a1c5, + 0x233c06, + 0x2e88cc, + 0x2cd90a, + 0x2936c4, + 0x235ec6, + 0x2a1647, + 0x2d5204, + 0x25b448, + 0x38e5c6, + 0x208949, + 0x2bb109, + 0x2b0689, + 0x24f346, + 0x217c06, + 0x2c4387, + 0x296908, + 0x217a09, + 0x211547, + 0x29ac86, + 0x3872c7, + 0x28b985, + 0x2b8bc4, + 0x2c3f47, + 0x247b45, + 0x28b085, + 0x235247, + 0x251e08, + 0x3ad906, + 0x29d24d, + 0x29ebcf, + 0x2a360d, + 0x215f84, + 0x232cc6, + 0x2d91c8, + 0x358fc5, + 0x287e88, + 0x23a58a, + 0x202ec4, + 0x21e946, + 0x239607, + 0x22d9c7, + 0x2a79c9, + 0x2c4205, + 0x2b6d44, + 0x2b8cca, + 0x2be589, + 0x283147, + 0x272086, + 0x347986, + 0x2a1d86, + 0x364046, + 0x2d890f, + 0x2d9089, + 0x27d546, + 0x282e46, + 0x32fd89, + 0x2b7787, + 0x226743, + 0x229946, + 0x218303, + 0x2e7dc8, + 0x387107, + 0x2a53c9, + 0x29f208, + 0x3193c8, + 0x351446, + 0x20f409, + 0x23c1c5, + 0x2b7844, + 0x2a73c7, + 0x273285, + 0x215f84, + 0x208dc8, + 0x216bc4, + 0x2b74c7, + 0x34bc06, + 0x2b3045, + 0x299188, + 0x34784b, + 0x319c07, + 0x2d47c6, + 0x2c82c4, + 0x32d146, + 0x270f45, + 0x247b45, + 0x283709, + 0x286dc9, + 0x235cc4, + 0x235d05, + 0x235f05, + 0x2e1546, + 0x309048, + 0x2c2c46, + 0x36898b, + 0x2b4c8a, + 0x2b6905, + 0x290406, + 0x3025c5, + 0x2e0a45, + 0x2ab6c7, + 0x3ac808, + 0x245984, + 0x26c586, + 0x292e46, + 0x214bc7, + 0x30d7c4, + 0x2817c6, + 0x2b9f85, + 0x2b9f89, + 0x2135c4, + 0x2a7209, + 0x27fa46, + 0x2c5108, + 0x235f05, + 0x36f105, + 0x233c06, + 0x219d89, + 0x218f89, + 0x2472c6, + 0x30e0c8, + 0x28abc8, + 0x302584, + 0x2b9004, + 0x2b9008, + 0x3269c8, + 0x245a89, + 0x38ec86, + 0x2a4dc6, + 0x320c0d, + 0x351c06, + 0x34c209, + 0x23d1c5, + 0x3b2106, + 0x262d48, + 0x31d9c5, + 0x2479c4, + 0x270f45, + 0x2866c8, + 0x29fc49, + 0x27c804, + 0x2008c6, + 0x39660a, + 0x2fa988, + 0x24d609, + 0x244c4a, + 0x3341c6, + 0x29ed88, + 0x2c20c5, + 0x2c0e48, + 0x2bd885, + 0x219ac9, + 0x36bd09, + 0x203602, + 0x2e3305, + 0x276286, + 0x27f987, + 0x295705, + 0x2f0ec6, + 0x306288, + 0x299886, + 0x2b9a09, + 0x27e306, + 0x25d388, + 0x2afb85, + 0x25c586, + 0x26aac8, + 0x285b88, + 0x2e9f88, + 0x347b08, + 0x219644, + 0x209fc3, + 0x2b9c44, + 0x249b06, + 0x28b9c4, + 0x231407, + 0x2ea249, + 0x2c7a05, + 0x30cc86, + 0x229946, + 0x34fb4b, + 0x2b6846, + 0x20edc6, + 0x2cb6c8, + 0x24c646, + 0x2bcb03, + 0x2080c3, + 0x2b8bc4, + 0x22e605, + 0x2b4447, + 0x27c988, + 0x27c98f, + 0x280b0b, + 0x308e48, + 0x200946, + 0x30914e, + 0x233c03, + 0x2b43c4, + 0x2b67c5, + 0x2b6bc6, + 0x2914cb, + 0x297b46, + 0x222709, + 0x2b3045, + 0x38a208, + 0x211d88, + 0x218e4c, + 0x2a4706, + 0x264886, + 0x2de405, + 0x28c188, + 0x26aac5, + 0x335848, + 0x2a084a, + 0x2a3a49, + 0x68a8c4, + 0x3760d1c2, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x368883, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x204ac3, + 0x200383, + 0x210e03, + 0x24ae04, + 0x2d0783, + 0x23a184, + 0x231b83, + 0x2da904, + 0x332ec3, + 0x2959c7, + 0x20fbc3, + 0x20abc3, + 0x2842c8, + 0x200383, + 0x2b400b, + 0x2f2a03, + 0x2716c6, + 0x205bc2, + 0x26b44b, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x200383, + 0x200e03, + 0x203383, + 0x204cc2, + 0x15f048, + 0x325b45, + 0x247bc8, + 0x2ec408, + 0x20d1c2, + 0x329dc5, + 0x39c307, + 0x2001c2, + 0x24b587, + 0x208a42, + 0x246f87, + 0x239ec9, + 0x2c1c88, + 0x3a88c9, + 0x338b02, + 0x270647, + 0x2abac4, + 0x39c3c7, + 0x2b4b87, + 0x24ca02, + 0x20fbc3, + 0x20b602, + 0x202082, + 0x200382, + 0x217902, + 0x200e02, + 0x20c4c2, + 0x2af685, + 0x24dec5, + 0xd1c2, + 0x31b83, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x117c3, + 0x701, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x204303, + 0x204ac3, + 0x200383, 0x21bd03, - 0x3a5946, - 0x221e42, - 0x206202, - 0x224dc2, - 0x3d224643, - 0x3d626703, - 0x53d46, - 0x53d46, - 0x224104, - 0x140a30a, - 0x16970c, - 0x165f0c, - 0x798cd, - 0xdb7c7, - 0x1b908, - 0x22f08, - 0x1a7eca, - 0x3e31f345, - 0x11f349, - 0x163048, - 0x1ac10a, - 0x16348e, - 0x144148b, - 0x167404, - 0x2988, - 0x16e847, - 0x178587, - 0x112089, - 0x10ec87, - 0x132d48, - 0x1a2f89, - 0x17a845, - 0x5074e, - 0xa910d, - 0x131948, - 0x3e6d7e86, - 0x60c47, - 0x62607, - 0x67347, - 0x6c4c7, - 0xd382, - 0x141807, - 0x1d34c, - 0xeaec7, - 0x8ddc6, - 0xa5449, - 0xa7188, - 0xf1c2, - 0xa82, - 0x13088b, - 0x15309, - 0x33c49, - 0x2b848, - 0xb09c2, - 0x1afb89, - 0xccf89, - 0xcdbc8, - 0xce147, - 0xcf0c9, - 0xd2905, - 0xd2d10, - 0x164d46, - 0x51f05, - 0x23b4d, - 0x10e846, - 0xdc047, - 0xe26d8, - 0x108548, - 0x19104a, - 0x4114d, - 0x1402, - 0x161186, - 0x89948, - 0x180248, - 0x87f89, - 0x45e88, - 0x4da0e, - 0xe8f85, - 0x539c8, - 0x3282, - 0x155646, - 0x6c2, - 0xb81, - 0x3eae2f44, - 0x3ee90c43, + 0x3a40d686, + 0x5e303, + 0x854c5, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x8082, + 0x15f048, + 0x4dcc4, + 0xe0f85, + 0x204cc2, + 0x2cfa44, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x236d03, + 0x2b0405, + 0x204303, + 0x205d83, + 0x204ac3, + 0x2104c3, + 0x200383, + 0x213e83, + 0x24ae83, + 0x24abc3, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x20d1c2, + 0x200383, + 0x15f048, + 0x332ec3, + 0x15f048, + 0x26ae03, + 0x2d0783, + 0x22ef04, + 0x231b83, + 0x332ec3, + 0x20a3c2, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20a3c2, + 0x22d603, + 0x204ac3, + 0x200383, + 0x2ec383, + 0x213e83, + 0x204cc2, + 0x20d1c2, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2716c5, + 0x1540c6, + 0x24ae04, + 0x205bc2, + 0x15f048, + 0x204cc2, + 0x1d508, + 0x20d1c2, + 0x97606, + 0x1681c4, + 0x16e1cb, + 0x3dc06, + 0xfcc7, + 0x231b83, + 0x332ec3, + 0x15ae05, + 0x19c804, + 0x221543, + 0x53fc7, + 0xdc304, + 0x204ac3, + 0x94fc4, + 0x200383, + 0x2f39c4, + 0xfe588, + 0x125886, + 0x114f85, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x20abc3, + 0x200383, + 0x2f2a03, + 0x205bc2, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204143, + 0x213184, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x2da904, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2716c6, + 0x231b83, + 0x332ec3, + 0x178ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0xfcc7, + 0x15f048, + 0x332ec3, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x40ed0783, + 0x231b83, + 0x204ac3, + 0x200383, + 0x15f048, + 0x204cc2, + 0x20d1c2, + 0x2d0783, + 0x332ec3, + 0x204ac3, + 0x200382, + 0x200383, + 0x316e47, + 0x23860b, + 0x2396c3, + 0x24be08, + 0x296687, + 0x225246, + 0x2c6145, + 0x373549, + 0x20f948, + 0x260d09, + 0x260d10, + 0x35d28b, + 0x385989, + 0x209303, + 0x2b5649, + 0x230806, + 0x23080c, + 0x260f08, + 0x3ae848, + 0x35d7c9, + 0x2a5d0e, + 0x20780b, + 0x2eb20c, + 0x205283, + 0x26cc4c, + 0x205289, + 0x257a87, + 0x231acc, + 0x36aa8a, + 0x24fe44, + 0x393f4d, + 0x26cb08, + 0x210e0d, + 0x272746, + 0x29258b, + 0x31a3c9, + 0x23d087, + 0x339606, + 0x349d89, + 0x38ce8a, + 0x37a0c8, + 0x2f24c4, + 0x34ecc7, + 0x3ac5c7, + 0x3a8dc4, + 0x32d984, + 0x237209, + 0x2ceac9, + 0x237908, + 0x210b85, + 0x392545, + 0x20aa06, + 0x393e09, + 0x23a80d, + 0x2eac88, + 0x20a907, + 0x2c61c8, + 0x382986, + 0x37ed04, + 0x359b85, + 0x202246, + 0x203204, + 0x205187, + 0x206d8a, + 0x211cc4, + 0x2167c6, + 0x2182c9, + 0x2182cf, + 0x2197cd, + 0x21a486, + 0x21d110, + 0x21d506, + 0x21dc47, + 0x21ebc7, + 0x21ebcf, + 0x21f889, + 0x2242c6, + 0x226487, + 0x226488, + 0x227649, + 0x2b3108, + 0x2e7907, + 0x20a203, + 0x378c86, + 0x3abf08, + 0x2a5fca, + 0x21fe49, + 0x20fa83, + 0x39c206, + 0x26c3ca, + 0x2fca47, + 0x2578ca, + 0x26a24e, + 0x21f9c6, + 0x2e3507, + 0x227086, + 0x201806, + 0x37da4b, + 0x30c58a, + 0x317ecd, + 0x217cc7, + 0x359188, + 0x359189, + 0x35918f, + 0x20e28c, + 0x281bc9, + 0x2e928e, + 0x295aca, + 0x3035c6, + 0x2fbbc6, + 0x3b06cc, + 0x3106cc, + 0x311448, + 0x33d1c7, + 0x25b7c5, + 0x2251c4, + 0x2438ce, + 0x38d104, + 0x257bc7, + 0x26d08a, + 0x36e914, + 0x373a4f, + 0x21ed88, + 0x378b48, + 0x357e8d, + 0x357e8e, + 0x3823c9, + 0x3a5b08, + 0x3a5b0f, + 0x2317cc, + 0x2317cf, + 0x232a07, + 0x23acca, + 0x21cc4b, + 0x23bcc8, + 0x23e5c7, + 0x264f4d, + 0x3151c6, + 0x394106, + 0x2417c9, + 0x259888, + 0x24c108, + 0x24c10e, + 0x238707, + 0x226985, + 0x24da85, + 0x205e04, + 0x225506, + 0x237808, + 0x260183, + 0x2efb8e, + 0x265308, + 0x2f198b, + 0x26afc7, + 0x395e05, + 0x26cdc6, + 0x2b0e07, + 0x307048, + 0x319f09, + 0x298fc5, + 0x28a188, + 0x217306, + 0x3a02ca, + 0x2437c9, + 0x231b89, + 0x231b8b, + 0x201148, + 0x3a8c89, + 0x210c46, + 0x22c54a, + 0x2b7f4a, + 0x23aecc, + 0x3acb87, + 0x2c1a8a, + 0x328ecb, + 0x328ed9, + 0x30fa48, + 0x271745, + 0x265106, + 0x258fc9, + 0x261cc6, + 0x21324a, + 0x20fb46, + 0x201e44, + 0x2c9ecd, + 0x201e47, + 0x20b549, + 0x383305, + 0x24e548, + 0x24ee89, + 0x24f0c4, + 0x24fd47, + 0x24fd48, + 0x250287, + 0x26ea08, + 0x2545c7, + 0x35c2c5, + 0x25c70c, + 0x25cf49, + 0x2c4dca, + 0x38f0c9, + 0x2b5749, + 0x2739cc, + 0x263e0b, + 0x2640c8, + 0x265688, + 0x268a44, + 0x288288, + 0x289389, + 0x36ab47, + 0x218506, + 0x317287, + 0x21e1c9, + 0x328b0b, + 0x32cfc7, + 0x200407, + 0x238047, + 0x210d84, + 0x210d85, + 0x2ac905, + 0x33c00b, + 0x399404, + 0x369d08, + 0x26f08a, + 0x2173c7, + 0x341dc7, + 0x290c12, + 0x27ee46, + 0x22e886, + 0x35898e, + 0x281346, + 0x298708, + 0x29938f, + 0x2111c8, + 0x38bb08, + 0x3af64a, + 0x3af651, + 0x2a6b4e, + 0x254e4a, + 0x254e4c, + 0x2014c7, + 0x3a5d10, + 0x3b5388, + 0x2a6d45, + 0x2b114a, + 0x20324c, + 0x29afcd, + 0x2fce06, + 0x2fce07, + 0x2fce0c, + 0x305c8c, + 0x32814c, + 0x28f98b, + 0x289b84, + 0x21f744, + 0x374149, + 0x2fe3c7, + 0x23e389, + 0x2b7d89, + 0x35a587, + 0x36a906, + 0x36a909, + 0x39d403, + 0x2129ca, + 0x32f807, + 0x238acb, + 0x317d4a, + 0x2abb44, + 0x39c546, + 0x284c89, + 0x3b3bc4, + 0x2035ca, + 0x2d4ac5, + 0x2c0005, + 0x2c000d, + 0x2c034e, + 0x378205, + 0x323506, + 0x2712c7, + 0x38684a, + 0x38d406, + 0x35ecc4, + 0x2f8987, + 0x2da18b, + 0x382a47, + 0x282ac4, + 0x24f706, + 0x24f70d, + 0x21de8c, + 0x204986, + 0x2eae8a, + 0x235806, + 0x2f3248, + 0x28bf47, + 0x33f88a, + 0x23d986, + 0x217bc3, + 0x262ec6, + 0x3abd88, + 0x2a024a, + 0x2766c7, + 0x2766c8, + 0x27dd84, + 0x2cc0c7, + 0x23ccc8, + 0x29e008, + 0x288b48, + 0x33110a, + 0x2e0405, + 0x2e0687, + 0x254c93, + 0x2d0806, + 0x26f288, + 0x222c09, + 0x24b448, + 0x3514cb, + 0x2cddc8, + 0x273704, + 0x215446, + 0x3b4f06, + 0x35e0c9, + 0x2c72c7, + 0x25c808, + 0x29e186, + 0x235144, + 0x2ce085, + 0x2c8a08, + 0x2c900a, + 0x2c9b48, + 0x2ce746, + 0x29ef8a, + 0x351988, + 0x2d5008, + 0x2d6a88, + 0x2d70c6, + 0x2d93c6, + 0x20168c, + 0x2d99d0, + 0x28de45, + 0x210fc8, + 0x306790, + 0x210fd0, + 0x260b8e, + 0x20130e, + 0x201314, + 0x31abcf, + 0x31af86, + 0x3319d1, + 0x339793, + 0x339c08, + 0x3aafc5, + 0x35b6c8, + 0x385785, + 0x22854c, + 0x229489, + 0x282449, + 0x245d47, + 0x377009, + 0x243d87, + 0x2fadc6, + 0x359987, + 0x261245, + 0x211803, + 0x260349, + 0x222ec9, + 0x378ac3, + 0x39a544, + 0x35c40d, + 0x3b1b0f, + 0x235185, + 0x35b5c6, + 0x211b07, + 0x325987, + 0x28cd86, + 0x28cd8b, + 0x2a82c5, + 0x25f106, + 0x2fba47, + 0x276ec9, + 0x2290c6, + 0x22e405, + 0x31190b, + 0x23bb46, + 0x3724c5, + 0x28b548, + 0x321d88, + 0x2d75cc, + 0x2d75d0, + 0x2e0149, + 0x2e7107, + 0x30860b, + 0x2e6186, + 0x2e77ca, + 0x2ea4cb, + 0x2eb74a, + 0x2eb9c6, + 0x2ec245, + 0x32f546, + 0x27e4c8, + 0x245e0a, + 0x357b1c, + 0x2f2acc, + 0x2f2dc8, + 0x2716c5, + 0x2f4f07, + 0x26a106, + 0x27d385, + 0x21c2c6, + 0x28cf48, + 0x2be807, + 0x2a5c08, + 0x2e360a, + 0x34a10c, + 0x34a389, + 0x37ee87, + 0x20d244, + 0x24db46, + 0x38b68a, + 0x2b7e85, + 0x20734c, + 0x20b088, + 0x377648, + 0x20d98c, + 0x21be8c, + 0x2206c9, + 0x220907, + 0x342c0c, + 0x3aa644, + 0x23c54a, + 0x2580cc, + 0x278acb, + 0x24140b, + 0x241f46, + 0x383847, + 0x2ddb07, + 0x3a5f4f, + 0x2fda11, + 0x2ddb12, + 0x30d0cd, + 0x30d0ce, + 0x30d40e, + 0x31ad88, + 0x31ad92, + 0x252288, + 0x2962c7, + 0x25260a, + 0x204748, + 0x281305, + 0x37e0ca, + 0x21da47, + 0x305304, + 0x21b083, + 0x2b0fc5, + 0x3af8c7, + 0x2fea07, + 0x29b1ce, + 0x30ff4d, + 0x313c49, + 0x220c45, + 0x33aa03, + 0x25fac6, + 0x36ffc5, + 0x2f1bc8, + 0x30c009, + 0x265145, + 0x26514f, + 0x2ec087, + 0x373485, + 0x21b2ca, + 0x299b86, + 0x2f33c9, + 0x384d0c, + 0x2f99c9, + 0x207b06, + 0x26ee8c, + 0x340cc6, + 0x2fc548, + 0x2fc746, + 0x30fbc6, + 0x349344, + 0x264443, + 0x2b270a, + 0x35b211, + 0x281d8a, + 0x255d05, + 0x277947, + 0x259307, + 0x23cdc4, + 0x23cdcb, + 0x3a8748, + 0x2bd306, + 0x36ef45, + 0x3a05c4, + 0x291949, + 0x330304, + 0x25cd87, + 0x332705, + 0x332707, + 0x358bc5, + 0x2af743, + 0x296188, + 0x34398a, + 0x239b43, + 0x325b8a, + 0x3b4086, + 0x264ecf, + 0x353689, + 0x2efb10, + 0x2dee88, + 0x2d0e89, + 0x29d087, + 0x24f68f, + 0x334604, + 0x2da984, + 0x21d386, + 0x2b3546, + 0x256dca, + 0x383586, + 0x32a787, + 0x3055c8, + 0x3057c7, + 0x306047, + 0x307a4a, + 0x309b4b, + 0x3a2445, + 0x2dd748, + 0x2166c3, + 0x3b120c, + 0x37140f, + 0x25b5cd, + 0x2c4607, + 0x313d89, + 0x217687, + 0x23e148, + 0x36eb0c, + 0x273608, + 0x258908, + 0x3188ce, + 0x32bad4, + 0x32bfe4, + 0x3424ca, + 0x35ea8b, + 0x243e44, + 0x243e49, + 0x21e9c8, + 0x24e105, + 0x25fc8a, + 0x239d87, + 0x2957c4, + 0x368883, + 0x2d0783, + 0x23a184, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x204303, + 0x20fbc3, + 0x201686, + 0x213184, + 0x204ac3, + 0x200383, + 0x21aa03, + 0x204cc2, + 0x368883, + 0x20d1c2, + 0x2d0783, + 0x23a184, + 0x231b83, + 0x332ec3, + 0x204303, + 0x201686, + 0x204ac3, + 0x200383, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x204ac3, + 0x200383, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x213184, + 0x204ac3, + 0x200383, + 0x204cc2, + 0x21fd43, + 0x20d1c2, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x20e542, + 0x20d882, + 0x20d1c2, + 0x2d0783, + 0x209c02, + 0x201d42, + 0x2964c4, + 0x222044, + 0x223342, + 0x213184, + 0x200382, + 0x200383, + 0x21aa03, + 0x241f46, + 0x217082, + 0x2016c2, + 0x201a82, + 0x436111c3, + 0x43a014c3, + 0x59a86, + 0x59a86, + 0x24ae04, + 0x143768a, + 0x2608c, + 0x21ecc, + 0x852cd, + 0x2ac47, + 0x1a608, + 0x218c8, + 0x19834a, + 0x446db445, + 0x12b089, + 0x103008, + 0x8ed4a, + 0x14a60e, + 0x144b24b, + 0x1681c4, + 0x1672c8, + 0x13edc7, + 0x16f07, + 0x11dd09, + 0x1b3c47, + 0x94b88, + 0x61f49, + 0x4bfc5, + 0x12494e, + 0xafbcd, + 0xfb48, + 0x44a37046, + 0x45437048, + 0x79c88, + 0x117050, + 0x69c87, + 0x6cf47, + 0x71187, + 0x75f87, + 0xa9c2, + 0x62507, + 0x10c74c, + 0x3b9c7, + 0xa9f46, + 0xaa689, + 0xad708, + 0x18d82, + 0x1d42, + 0x24a0b, + 0x2ccc9, + 0x4c809, + 0x17de88, + 0xb5e02, + 0x104389, + 0xd2fca, + 0xdb9c9, + 0xdd048, + 0xddfc7, + 0xe0389, + 0xe4685, + 0xe4a90, + 0x1a8e86, + 0x63c85, + 0x4a84d, + 0x1b3806, + 0xee547, + 0xf39d8, + 0x96b88, + 0xba9ca, + 0x53b4d, + 0x1702, + 0x177ac6, + 0x91788, + 0x1ae208, + 0x15ef09, + 0x56608, + 0x5dece, + 0xd68d, + 0xf8805, + 0x62288, + 0x59688, + 0x6902, + 0x125886, + 0x6c82, + 0x3c1, + 0x8b4c3, + 0x44ef4244, + 0x4529a283, 0x141, - 0x1650c6, + 0x15c06, 0x141, 0x1, - 0x1650c6, - 0x14f60c5, - 0x245dc4, - 0x22d183, - 0x247344, - 0x201604, - 0x238483, - 0x2245c5, - 0x21ca03, - 0x215cc3, - 0x2e9cc5, - 0x223ec3, - 0x3fe2d183, - 0x2343c3, - 0x21eb03, + 0x15c06, + 0x8b4c3, + 0x14e4285, + 0x24fe44, + 0x2d0783, + 0x251304, + 0x2964c4, + 0x204ac3, + 0x222ac5, + 0x21bd03, + 0x2202c3, + 0x370145, + 0x24abc3, + 0x466d0783, + 0x231b83, + 0x332ec3, 0x200041, - 0x211003, - 0x307b04, - 0x212444, - 0x238483, - 0x2264c3, - 0x217643, - 0x880c8, - 0x200882, - 0x323ac3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x200a82, - 0x201604, - 0x202243, - 0x211003, - 0x238483, - 0x2025c3, - 0x2264c3, - 0x223ec3, - 0x880c8, - 0x38bcc2, - 0x16582, - 0x1462d48, - 0xf738e, - 0x40e00142, - 0x29e988, - 0x227fc6, - 0x2bb546, - 0x227947, - 0x41201102, - 0x417566c8, - 0x3af8ca, - 0x2606c8, - 0x201002, - 0x29e609, - 0x38e807, - 0x21ac06, - 0x201009, - 0x254704, - 0x2f5fc6, - 0x2d5fc4, - 0x273a04, - 0x2563c9, - 0x281786, - 0x2e3d85, - 0x220e45, - 0x3a5287, - 0x2b7cc7, - 0x243884, - 0x227b86, - 0x39fac5, - 0x202b05, - 0x2f5905, - 0x392547, - 0x366785, - 0x308bc9, - 0x2808c5, - 0x2d07c4, - 0x39a387, - 0x30584e, - 0x30fc49, - 0x322e89, - 0x348986, - 0x31e708, - 0x2b024b, - 0x2d210c, - 0x25b946, - 0x37cb07, - 0x209805, - 0x20f24a, - 0x20a689, - 0x252249, - 0x293d86, - 0x2ee6c5, - 0x28b145, - 0x361f09, - 0x2f5a8b, - 0x277606, - 0x32e5c6, - 0x20d2c4, - 0x288bc6, - 0x25f948, - 0x203d06, - 0x3a82c6, - 0x208bc8, - 0x2093c7, - 0x209589, - 0x20c445, - 0x880c8, - 0x378504, - 0x229e04, - 0x212d45, - 0x395589, - 0x223707, - 0x22370b, - 0x2255ca, - 0x22b185, - 0x41a0b602, - 0x2173c7, - 0x41e2c488, - 0x2833c7, - 0x281ac5, - 0x32594a, - 0x16582, - 0x24b90b, - 0x2adc4a, - 0x2248c6, - 0x3a31c3, - 0x230dcd, - 0x3320cc, - 0x36210d, - 0x3845c5, - 0x237205, - 0x24f147, - 0x3a8e89, - 0x3af7c6, - 0x390245, - 0x2ee3c8, - 0x288ac3, - 0x2e8108, - 0x288ac8, - 0x2bc507, - 0x2e62c8, - 0x3af3c9, - 0x236107, - 0x20ae07, - 0x335048, - 0x253384, - 0x253387, - 0x266a88, - 0x205846, - 0x3661cf, - 0x215507, - 0x2e3506, - 0x25a405, - 0x224f43, - 0x372207, - 0x36e143, - 0x246506, - 0x247f86, - 0x249686, - 0x28d1c5, - 0x263e83, - 0x3885c8, - 0x370489, - 0x38124b, - 0x249808, - 0x24c4c5, - 0x24d4c5, - 0x4223aa82, - 0x37f509, - 0x201687, - 0x258b45, - 0x2562c7, - 0x257e06, - 0x35eb05, - 0x36f9cb, - 0x259e44, - 0x260285, - 0x2603c7, - 0x271986, - 0x271fc5, - 0x27dbc7, - 0x27e647, - 0x26db04, - 0x2871ca, - 0x287688, - 0x3685c9, - 0x3a65c5, - 0x333386, - 0x25fb0a, - 0x220d46, - 0x24bb47, - 0x318a8d, - 0x2273c9, - 0x30ff45, - 0x24ff87, - 0x335608, - 0x3675c8, - 0x341b87, - 0x34a486, - 0x2116c7, - 0x247883, - 0x337ec4, - 0x35c385, - 0x38cac7, - 0x391f49, - 0x21a6c8, - 0x22fd05, - 0x382a04, - 0x240e85, - 0x2448cd, - 0x201142, - 0x3006c6, - 0x3610c6, - 0x2bde4a, - 0x3791c6, - 0x37fc45, - 0x319285, - 0x319287, - 0x38b14c, - 0x26fb8a, - 0x288886, - 0x29e445, - 0x288a06, + 0x20fbc3, + 0x222044, + 0x213184, + 0x204ac3, + 0x200383, + 0x213e83, + 0x15f048, + 0x204cc2, + 0x368883, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x201d42, + 0x2964c4, + 0x204303, + 0x20fbc3, + 0x204ac3, + 0x20abc3, + 0x200383, + 0x24abc3, + 0x15f048, + 0x371182, + 0xd1c2, + 0x1491b48, + 0x10598e, + 0x47608c42, + 0x32f9c8, + 0x233d86, + 0x210186, + 0x233707, + 0x47a00902, + 0x47f53508, + 0x20ebca, + 0x269708, + 0x201442, + 0x32f649, + 0x3a2487, + 0x218486, + 0x295ec9, + 0x247ec4, + 0x2e4186, + 0x2e1bc4, + 0x26bdc4, + 0x25bf49, + 0x326286, + 0x24df85, + 0x291285, + 0x390387, + 0x2c3747, + 0x2911c4, + 0x233946, + 0x2ffb45, + 0x367445, + 0x302505, + 0x392307, + 0x26ae05, + 0x315e49, + 0x32d305, + 0x307184, + 0x38d347, + 0x32ecce, + 0x330a09, + 0x358849, + 0x3ac9c6, + 0x2fe248, + 0x2b520b, + 0x2e3b0c, + 0x2898c6, + 0x2076c7, + 0x37b305, + 0x32d98a, + 0x237a09, + 0x3aa989, + 0x257646, + 0x2fb805, + 0x2aabc5, + 0x348f89, + 0x30268b, + 0x280f46, + 0x338346, + 0x20a904, + 0x2908c6, + 0x226a08, + 0x3abc06, + 0x20c5c6, + 0x206188, + 0x207f47, + 0x208649, + 0x209705, + 0x15f048, + 0x216e84, + 0x33d5c4, + 0x369f05, + 0x204f49, + 0x222347, + 0x22234b, + 0x223e4a, + 0x228485, + 0x4820a002, + 0x238987, + 0x48629248, + 0x27be07, + 0x2bf945, + 0x3aac0a, + 0xd1c2, + 0x38740b, + 0x25470a, + 0x222dc6, + 0x395e03, + 0x29538d, + 0x3582cc, + 0x37f24d, + 0x381085, + 0x227dc5, + 0x2601c7, + 0x209c09, + 0x20eac6, + 0x383405, + 0x2d8008, + 0x2907c3, + 0x2ec708, + 0x2907c8, + 0x2c6c47, + 0x3b2448, + 0x39b7c9, + 0x2c9747, + 0x238187, + 0x302b88, + 0x38ca44, + 0x38ca47, + 0x272648, + 0x2024c6, + 0x206fcf, + 0x2118c7, + 0x2e7a86, + 0x23e2c5, + 0x223783, + 0x365a47, + 0x36da03, + 0x250446, + 0x251c46, + 0x252a06, + 0x296e85, + 0x26ea03, + 0x388a48, + 0x370c89, + 0x37ffcb, + 0x252b88, + 0x254285, + 0x256405, + 0x48aabc02, + 0x359a49, + 0x296547, + 0x25f185, + 0x25be47, + 0x25dd86, + 0x363f05, + 0x36fe0b, + 0x2640c4, + 0x2692c5, + 0x269407, + 0x27b786, + 0x27bbc5, + 0x288487, 0x288d47, - 0x28a9c6, - 0x28d0cc, - 0x201149, - 0x42765547, - 0x290305, - 0x290306, - 0x2906c8, - 0x2b1f05, - 0x2a4805, - 0x2a4a48, - 0x2a4c4a, - 0x42a6a242, - 0x42e0ff82, - 0x382245, - 0x29cdc3, - 0x37a688, - 0x21d083, - 0x2a4ec4, - 0x23de4b, - 0x272408, - 0x2d77c8, - 0x433255c9, - 0x2a8949, - 0x2a9006, - 0x2aa5c8, - 0x2aa7c9, - 0x2ab386, - 0x2ab505, - 0x383086, - 0x2abc09, - 0x2802c7, - 0x243f06, - 0x235c47, - 0x3af647, - 0x33b504, - 0x43743909, - 0x2c2288, - 0x3565c8, - 0x2368c7, - 0x2bd906, - 0x2fe209, - 0x331f47, - 0x2f1b0a, - 0x376848, - 0x3237c7, - 0x326086, - 0x33aa0a, - 0x249fc8, - 0x28ab05, - 0x21bf85, - 0x2bcb87, - 0x2d26c9, - 0x2d6e0b, - 0x2dd8c8, - 0x280949, - 0x249b07, - 0x3ad20c, - 0x2b1acc, - 0x2b1dca, - 0x2b204c, - 0x2bb4c8, - 0x2bb6c8, - 0x2bb8c4, - 0x2bbc89, - 0x2bbec9, - 0x2bc10a, - 0x2bc389, - 0x2bc6c7, - 0x20010c, - 0x36ef86, - 0x26de48, - 0x220e06, - 0x387346, - 0x30fe47, - 0x341d08, - 0x25180b, - 0x283287, - 0x2aeb49, - 0x2474c9, - 0x255f87, - 0x2d6204, - 0x35efc7, - 0x29f606, - 0x219006, - 0x38d985, - 0x2ccd88, - 0x20ef04, - 0x20ef06, - 0x26fa4b, - 0x2a2389, - 0x364086, - 0x3a8409, - 0x3926c6, - 0x2fec08, - 0x214803, - 0x2083c5, - 0x219149, - 0x21fe05, - 0x3a6084, - 0x270fc6, - 0x3a5a85, - 0x2e6846, - 0x2fbc07, - 0x367186, - 0x2952cb, - 0x2b0647, - 0x2d2586, - 0x374346, - 0x3a5346, - 0x243849, - 0x26238a, - 0x2b6045, - 0x21f68d, - 0x2a4d46, - 0x391246, - 0x2e21c6, - 0x210205, - 0x2d3007, - 0x2962c7, - 0x23b68e, - 0x211003, - 0x2bd8c9, - 0x318fc9, - 0x20f647, - 0x276b87, - 0x299d85, - 0x306f45, - 0x43a7eacf, - 0x2c4807, - 0x2c49c8, - 0x2c5a44, - 0x2c5d06, - 0x43e43b02, - 0x2ca1c6, - 0x2cc5c6, - 0x251b4e, - 0x2e7f4a, - 0x21cd06, - 0x33fcca, - 0x3b4089, - 0x316fc5, - 0x393b48, - 0x3ad0c6, - 0x34ab88, - 0x30f788, - 0x25ab8b, - 0x227a45, - 0x366808, - 0x208d0c, - 0x281987, - 0x248b06, - 0x22f108, - 0x201948, - 0x44208382, - 0x362b0b, - 0x280bc9, - 0x363e89, - 0x209987, - 0x30e688, - 0x4460c648, - 0x3a8c0b, - 0x22b6c9, - 0x20870d, - 0x217e88, - 0x22c288, - 0x44a02282, - 0x31d784, - 0x44e23b42, - 0x2ebc06, - 0x452016c2, - 0x3a180a, - 0x201fc6, - 0x225f08, - 0x31ea08, - 0x2b7546, - 0x386986, - 0x2e6606, - 0x2a00c5, - 0x23b184, - 0x456feb84, - 0x338986, - 0x269047, - 0x45a2ab47, - 0x32be0b, - 0x305c09, - 0x23724a, - 0x251404, - 0x3193c8, - 0x243ccd, - 0x2e07c9, - 0x2e0a08, - 0x2e1149, - 0x2e26c4, - 0x200f04, - 0x269885, - 0x30b48b, - 0x272386, - 0x3387c5, - 0x281c49, - 0x227c48, - 0x29ca84, - 0x20f3c9, - 0x2b0585, - 0x2b7d08, - 0x20b4c7, - 0x323288, - 0x27a506, - 0x217287, - 0x28eb89, - 0x21c209, - 0x2460c5, - 0x231445, - 0x45e25242, - 0x39a144, - 0x2fd585, - 0x2a9746, - 0x2f89c5, - 0x268307, - 0x243405, - 0x243484, - 0x348a46, - 0x3902c7, - 0x243b46, - 0x325dc5, - 0x31d488, - 0x2281c5, - 0x332607, - 0x397409, - 0x2a24ca, - 0x22dac7, - 0x22dacc, - 0x2e3d46, - 0x226349, - 0x2ad585, - 0x2c6e08, - 0x211543, - 0x211545, - 0x2e9405, - 0x256cc7, - 0x46214f02, - 0x236e47, - 0x2d6786, - 0x343846, - 0x2e8cc6, - 0x201886, - 0x347e88, - 0x3583c5, - 0x2e35c7, - 0x2e35cd, - 0x202b43, - 0x3a35c5, - 0x3068c7, - 0x3864c8, - 0x386085, - 0x366c88, - 0x22a946, - 0x31f507, - 0x2bcf05, - 0x227ac6, - 0x3711c5, - 0x2ba40a, - 0x2eb1c6, - 0x236487, - 0x2c5bc5, - 0x35a387, - 0x35e244, - 0x3a6006, - 0x2f61c5, - 0x28158b, - 0x29f489, - 0x37a20a, - 0x246148, - 0x2ff148, - 0x300a4c, - 0x3047c7, - 0x32b388, - 0x32edc8, - 0x336085, - 0x2bc94a, - 0x35d109, - 0x46601082, - 0x205446, - 0x214684, - 0x3b1249, - 0x2220c9, - 0x24d307, - 0x26c307, - 0x358d09, - 0x210408, - 0x21040f, - 0x3477c6, - 0x20a0cb, - 0x2e9b05, - 0x2e9b07, - 0x2e9f49, - 0x20f346, - 0x20f347, - 0x3b3d85, - 0x232784, - 0x2633c6, - 0x201284, - 0x30ac07, - 0x345e08, - 0x46aee5c8, - 0x2eebc5, - 0x2eed07, - 0x238289, - 0x2740c4, - 0x3a3888, - 0x46f20b88, - 0x2c4204, - 0x2330c8, - 0x31d904, - 0x21f989, - 0x2230c5, - 0x47203e42, - 0x347805, - 0x220c85, - 0x29fc88, - 0x235347, - 0x47600cc2, - 0x2c81c5, - 0x246b46, - 0x256646, - 0x39a108, - 0x2ec008, - 0x2f8986, - 0x31b086, - 0x22a489, - 0x343786, - 0x37870b, - 0x30c145, - 0x20d7c6, - 0x390088, - 0x252606, - 0x28f506, - 0x21c64a, - 0x2ae4ca, - 0x24ce85, - 0x358487, - 0x2d8e06, - 0x47a03dc2, - 0x306a07, - 0x2c7a45, - 0x25fa84, - 0x25fa85, - 0x251306, - 0x270447, - 0x2144c5, - 0x222184, - 0x2712c8, - 0x28f5c5, - 0x2cebc7, - 0x39c105, - 0x216805, - 0x247b04, - 0x28cbc9, - 0x39f908, - 0x2f5ec6, - 0x36fcc6, - 0x2c3f06, - 0x47ef3648, - 0x2f3847, - 0x2f3fcd, - 0x2f460c, - 0x2f4c09, - 0x2f4e49, - 0x48351e02, - 0x3a4803, - 0x24cf03, - 0x29f6c5, - 0x38cbca, - 0x31af46, - 0x2f8e05, - 0x2fc144, - 0x2fc14b, - 0x30d10c, - 0x30d94c, - 0x30dc55, - 0x311acd, - 0x313a0f, - 0x313dd2, - 0x31424f, - 0x314612, - 0x314a93, - 0x314f4d, - 0x31550d, - 0x31588e, - 0x315d4e, - 0x31658c, - 0x31694c, - 0x316d8b, - 0x31710e, - 0x31a1d2, - 0x31ad0c, - 0x31b8d0, - 0x327e52, - 0x328dcc, - 0x32948d, - 0x3297cc, - 0x32d8d1, - 0x32e74d, - 0x336b0d, - 0x33710a, - 0x33738c, - 0x337c8c, - 0x3384cc, - 0x338d4c, - 0x33c9d3, - 0x33d050, - 0x33d450, - 0x33dccd, - 0x33e2cc, - 0x33efc9, - 0x3402cd, - 0x340613, - 0x342e51, - 0x343293, - 0x343b4f, - 0x343f0c, - 0x34420f, - 0x3445cd, - 0x344bcf, - 0x344f90, - 0x345a0e, - 0x34b48e, - 0x34bbd0, - 0x34c7cd, - 0x34d14e, - 0x34d4cc, - 0x34e493, - 0x34fc8e, - 0x3503d0, - 0x3507d1, - 0x350c0f, - 0x350fd3, - 0x35198d, - 0x351ccf, - 0x35208e, - 0x352990, - 0x352d89, - 0x3539d0, - 0x35400f, - 0x35468f, - 0x354a52, - 0x355ece, - 0x35788d, - 0x35998d, - 0x359ccd, - 0x35ac4d, - 0x35af8d, - 0x35b2d0, - 0x35b6cb, - 0x35c14c, - 0x35c4cc, - 0x35c7cc, - 0x35cace, - 0x372990, - 0x3744d2, - 0x37494b, - 0x3750ce, - 0x37544e, - 0x375cce, - 0x37728b, - 0x48777856, - 0x378ecd, - 0x379354, - 0x37a98d, - 0x37c655, - 0x37d78d, - 0x37e10f, - 0x37e94f, - 0x38150f, - 0x3818ce, - 0x382b0d, - 0x384151, - 0x386b0c, - 0x386e0c, - 0x38710b, - 0x387a0c, - 0x387dcf, - 0x388192, - 0x388b4d, - 0x389b0c, + 0x2d1784, + 0x28e04a, + 0x28e508, + 0x2c2149, + 0x3648c5, + 0x2951c6, + 0x226bca, + 0x387646, + 0x26f5c7, + 0x2c1e0d, + 0x2a1f09, + 0x3597c5, + 0x339dc7, + 0x368388, + 0x26a888, + 0x314d07, + 0x20b246, + 0x217807, + 0x221143, + 0x33c004, + 0x3607c5, + 0x38dcc7, + 0x391d09, + 0x22a8c8, + 0x33fac5, + 0x242844, + 0x2f5bc5, + 0x38174d, + 0x203742, + 0x386ac6, + 0x377a06, + 0x2c8bca, + 0x37e686, + 0x38b5c5, + 0x368fc5, + 0x368fc7, + 0x3a010c, + 0x279b0a, + 0x290586, + 0x225085, + 0x290706, + 0x290a47, + 0x292846, + 0x296d8c, + 0x296009, + 0x48e16087, + 0x299745, + 0x299746, + 0x299d08, + 0x236785, + 0x2a8b45, + 0x2a9548, + 0x2a974a, + 0x49258142, + 0x4960c2c2, + 0x2e8f85, + 0x28b9c3, + 0x22b108, + 0x241d03, + 0x2a99c4, + 0x2f350b, + 0x34ef48, + 0x305148, + 0x49b67ec9, + 0x2af389, + 0x2afac6, + 0x2b0a88, + 0x2b0c89, + 0x2b1786, + 0x2b1905, + 0x372a86, + 0x2b1e49, + 0x319d87, + 0x25c446, + 0x233147, + 0x20e947, + 0x362e04, + 0x49f453c9, + 0x2cd008, + 0x353408, + 0x383d07, + 0x2c8686, + 0x235389, + 0x210147, + 0x34970a, + 0x330d48, + 0x349407, + 0x3b1546, + 0x2e834a, + 0x2733c8, + 0x30de45, + 0x36dac5, + 0x2f9807, + 0x371d49, + 0x3097cb, + 0x31e0c8, + 0x32d389, + 0x253487, + 0x2bad4c, + 0x2bb74c, + 0x2bba4a, + 0x2bbccc, + 0x2c5c08, + 0x2c5e08, + 0x2c6004, + 0x2c63c9, + 0x2c6609, + 0x2c684a, + 0x2c6ac9, + 0x2c6e07, + 0x3a448c, + 0x24b946, + 0x35d588, + 0x387706, + 0x330c06, + 0x3596c7, + 0x238ec8, + 0x2618cb, + 0x303207, + 0x359c49, + 0x251489, + 0x25bbc7, + 0x2e1e04, + 0x3643c7, + 0x2e1246, + 0x214046, + 0x2eb045, + 0x2c7408, + 0x2976c4, + 0x2976c6, + 0x2799cb, + 0x212cc9, + 0x209e06, + 0x20c709, + 0x392486, + 0x3aae08, + 0x214183, + 0x2fb985, + 0x215a09, + 0x224085, + 0x2f9644, + 0x27acc6, + 0x2ed005, + 0x2f7346, + 0x309ec7, + 0x328dc6, + 0x3a174b, + 0x22c447, + 0x234886, + 0x3742c6, + 0x390446, + 0x291189, + 0x240aca, + 0x2b8ec5, + 0x21898d, + 0x2a9846, + 0x2babc6, + 0x2ded86, + 0x2f31c5, + 0x2e4d87, + 0x29fa87, + 0x22bd4e, + 0x20fbc3, + 0x2c8649, + 0x263709, + 0x32dd87, + 0x2804c7, + 0x2a2ec5, + 0x343f05, + 0x4a23734f, + 0x2d10c7, + 0x2d1288, + 0x2d25c4, + 0x2d2e86, + 0x4a64db02, + 0x2d7346, + 0x201686, + 0x2638ce, + 0x2ec54a, + 0x28b6c6, + 0x22d88a, + 0x209a09, + 0x323d05, + 0x393948, + 0x3aef46, + 0x359508, + 0x2392c8, + 0x34434b, + 0x233805, + 0x26ae88, + 0x2062cc, + 0x2bf807, + 0x252546, + 0x281fc8, + 0x2253c8, + 0x4aa09282, + 0x25604b, + 0x37b489, + 0x2cf2c9, + 0x2f02c7, + 0x3b3648, + 0x4ae289c8, + 0x20e64b, + 0x37dd09, + 0x33f58d, + 0x27d648, + 0x290188, + 0x4b201882, + 0x3b2f44, + 0x4b60dc42, + 0x2f9486, + 0x4ba038c2, + 0x2448ca, + 0x210546, + 0x22d208, + 0x289e88, + 0x2e4086, + 0x2eb5c6, + 0x2f7106, + 0x2f1b45, + 0x23c804, + 0x4be1de04, + 0x20ae86, + 0x29a787, + 0x4c2f7d07, + 0x2dc60b, + 0x32f089, + 0x227e0a, + 0x263144, + 0x369108, + 0x25c20d, + 0x2f1509, + 0x2f1748, + 0x2f2009, + 0x2f39c4, + 0x20ce44, + 0x283cc5, + 0x317b0b, + 0x34eec6, + 0x33c645, + 0x21fb89, + 0x233a08, + 0x263844, + 0x32db09, + 0x208f45, + 0x2c3788, + 0x238847, + 0x358c48, + 0x284e86, + 0x22b987, + 0x2984c9, + 0x311a89, + 0x372545, + 0x295645, + 0x4c626a82, + 0x306f44, + 0x30ce05, + 0x2c1846, + 0x327285, + 0x2b4707, + 0x20af85, + 0x27b7c4, + 0x3aca86, + 0x383487, + 0x232106, + 0x21e105, + 0x202608, + 0x233f85, + 0x205d07, + 0x20bc49, + 0x212e0a, + 0x2494c7, + 0x2494cc, + 0x24df46, + 0x2e9ac9, + 0x230505, + 0x2366c8, + 0x210c03, + 0x210c05, + 0x2f7b45, + 0x26e707, + 0x4ca27202, + 0x227a07, + 0x2e5206, + 0x345306, + 0x2e62c6, + 0x225306, + 0x2091c8, + 0x35b805, + 0x2e7b47, + 0x2e7b4d, + 0x21b083, + 0x21f305, + 0x21b087, + 0x26b308, + 0x21ac45, + 0x2281c8, + 0x2ab9c6, + 0x32b247, + 0x2c7bc5, + 0x233886, + 0x2cfac5, + 0x22dfca, + 0x2efec6, + 0x25dbc7, + 0x2bc605, + 0x2f44c7, + 0x2f8904, + 0x2f95c6, + 0x3624c5, + 0x32608b, + 0x2e10c9, + 0x23d74a, + 0x3725c8, + 0x3007c8, + 0x300f0c, + 0x306b47, + 0x308c48, + 0x30aa88, + 0x30dac5, + 0x338b4a, + 0x33aa09, + 0x4ce00202, + 0x200206, + 0x20d684, + 0x2ef889, + 0x275d49, + 0x27b1c7, + 0x2fa547, + 0x2b7c09, + 0x331308, + 0x33130f, + 0x2dfa86, + 0x2db10b, + 0x361545, + 0x361547, + 0x374c89, + 0x2171c6, + 0x32da87, + 0x2dde85, + 0x22f544, + 0x26e186, + 0x211984, + 0x2e6c47, + 0x34c5c8, + 0x4d2fb708, + 0x2fbe85, + 0x2fbfc7, + 0x245709, + 0x208444, + 0x208448, + 0x4d7190c8, + 0x23cdc4, + 0x230c88, + 0x3396c4, + 0x220389, + 0x333105, + 0x4da05bc2, + 0x2dfac5, + 0x2e6845, + 0x271b88, + 0x232847, + 0x4de03382, + 0x30cbc5, + 0x2d4e86, + 0x27a786, + 0x306f08, + 0x318308, + 0x327246, + 0x32e246, + 0x249009, + 0x345246, + 0x21708b, + 0x2a12c5, + 0x204686, + 0x382588, + 0x3152c6, + 0x298e46, + 0x21b94a, + 0x22f9ca, + 0x2e8245, + 0x35b8c7, + 0x2f0cc6, + 0x4e206602, + 0x21b1c7, + 0x2a9085, + 0x226b44, + 0x226b45, + 0x263046, + 0x27a447, + 0x20d405, + 0x22fb44, + 0x365008, + 0x298f05, + 0x33c8c7, + 0x39fa85, + 0x22df05, + 0x256b44, + 0x28fbc9, + 0x2ff988, + 0x2ecec6, + 0x2de9c6, + 0x2b9d46, + 0x4e700448, + 0x300647, + 0x3009cd, + 0x30120c, + 0x301809, + 0x301a49, + 0x4eb546c2, + 0x3a5503, + 0x20b303, + 0x2e1305, + 0x38ddca, + 0x327106, + 0x307905, + 0x30a084, + 0x30a08b, + 0x31bdcc, + 0x31c5cc, + 0x31c8d5, + 0x31d74d, + 0x31f44f, + 0x31f812, + 0x31fc8f, + 0x320052, + 0x3204d3, + 0x32098d, + 0x320f4d, + 0x3212ce, + 0x322a8e, + 0x3232cc, + 0x32368c, + 0x323acb, + 0x323e4e, + 0x324f92, + 0x326ecc, + 0x327610, + 0x3335d2, + 0x3347cc, + 0x334e8d, + 0x3351cc, + 0x337611, + 0x3384cd, + 0x33ac4d, + 0x33b24a, + 0x33b4cc, + 0x33bdcc, + 0x33c34c, + 0x33cbcc, + 0x33fc53, + 0x340450, + 0x340850, + 0x340e4d, + 0x34144c, + 0x342209, + 0x342f0d, + 0x343253, + 0x344911, + 0x344d53, + 0x34560f, + 0x3459cc, + 0x345ccf, + 0x34608d, + 0x34668f, + 0x346a50, + 0x3474ce, + 0x34ac8e, + 0x34b590, + 0x34ca8d, + 0x34d40e, + 0x34d78c, + 0x34e753, + 0x351e0e, + 0x352390, + 0x352791, + 0x352bcf, + 0x352f93, + 0x35424d, + 0x35458f, + 0x35494e, + 0x354fd0, + 0x3553c9, + 0x356390, + 0x356acf, + 0x35714f, + 0x357512, + 0x359e8e, + 0x35a74d, + 0x35cc4d, + 0x35cf8d, + 0x35f68d, + 0x35f9cd, + 0x35fd10, + 0x36010b, + 0x36058c, + 0x36090c, + 0x360c0c, + 0x360f0e, + 0x372c10, + 0x374452, + 0x3748cb, + 0x374ece, + 0x37524e, + 0x375ace, + 0x37604b, + 0x4ef76396, + 0x37724d, + 0x378354, + 0x378e0d, + 0x37ae55, + 0x37c04d, + 0x37c9cf, + 0x37d20f, + 0x38028f, + 0x38064e, + 0x380acd, + 0x382f11, + 0x385ecc, + 0x3861cc, + 0x3864cb, + 0x386c4c, + 0x38824f, + 0x388612, + 0x388fcd, 0x389f8c, - 0x38a28d, - 0x38a5cf, - 0x38a98e, - 0x38c88c, - 0x38ce4d, - 0x38d18b, - 0x38e9cc, - 0x38ef4d, - 0x38f28e, - 0x38f709, - 0x3909d3, - 0x3913cd, - 0x39170d, - 0x391d0c, - 0x39218e, - 0x392b0f, - 0x392ecc, - 0x3931cd, - 0x39350f, - 0x3938cc, - 0x393fcc, - 0x39444c, - 0x39474c, - 0x394e0d, - 0x395152, - 0x3957cc, - 0x395acc, - 0x395dd1, - 0x39620f, - 0x3965cf, - 0x396993, - 0x39764e, - 0x397bcf, - 0x397f8c, - 0x48b982ce, - 0x39864f, - 0x398a16, - 0x399c12, - 0x39b88c, - 0x39c24f, - 0x39c8cd, - 0x39cc0f, - 0x39cfcc, - 0x39d2cd, - 0x39d60d, - 0x39f4ce, - 0x3a058c, - 0x3a088c, - 0x3a0b90, - 0x3a3a91, - 0x3a3ecb, - 0x3a440c, - 0x3a470e, + 0x38a40c, + 0x38a70d, + 0x38aa4f, + 0x38ae0e, + 0x38da8c, + 0x38e04d, + 0x38e38b, + 0x38ee8c, + 0x38f40d, + 0x38f74e, + 0x38fac9, + 0x390c53, + 0x39118d, + 0x3914cd, + 0x391acc, + 0x391f4e, + 0x39290f, + 0x392ccc, + 0x392fcd, + 0x39330f, + 0x3936cc, + 0x3943cc, + 0x39484c, + 0x394b4c, + 0x39520d, + 0x395552, + 0x396c0c, + 0x396f0c, + 0x397211, + 0x39764f, + 0x397a0f, + 0x397dd3, + 0x398a8e, + 0x398e0f, + 0x3991cc, + 0x4f39950e, + 0x39988f, + 0x399c56, + 0x39b312, + 0x39d64c, + 0x39e14f, + 0x39e7cd, + 0x39eb0f, + 0x39eecc, + 0x39f1cd, + 0x39f50d, + 0x3a0c4e, + 0x3a2b8c, + 0x3a2e8c, + 0x3a3190, + 0x3a4991, + 0x3a4dcb, + 0x3a510c, + 0x3a540e, 0x3a7051, 0x3a748e, 0x3a780d, - 0x3ace8b, - 0x3adbcf, - 0x3aee94, - 0x21c2c2, - 0x21c2c2, - 0x205903, - 0x21c2c2, - 0x205903, - 0x21c2c2, - 0x205e02, - 0x3830c5, + 0x3aed0b, + 0x3afdcf, + 0x3b09d4, + 0x2630c2, + 0x2630c2, + 0x202583, + 0x2630c2, + 0x202583, + 0x2630c2, + 0x20ae82, + 0x372ac5, 0x3a6d4c, - 0x21c2c2, - 0x21c2c2, - 0x205e02, - 0x21c2c2, - 0x290d45, - 0x2a24c5, - 0x21c2c2, - 0x21c2c2, - 0x211d42, - 0x290d45, - 0x312d49, - 0x342b4c, - 0x21c2c2, - 0x21c2c2, - 0x21c2c2, - 0x21c2c2, - 0x3830c5, - 0x21c2c2, - 0x21c2c2, - 0x21c2c2, - 0x21c2c2, - 0x211d42, - 0x312d49, - 0x21c2c2, - 0x21c2c2, - 0x21c2c2, - 0x2a24c5, - 0x21c2c2, - 0x2a24c5, - 0x342b4c, + 0x2630c2, + 0x2630c2, + 0x20ae82, + 0x2630c2, + 0x29a385, + 0x212e05, + 0x2630c2, + 0x2630c2, + 0x211cc2, + 0x29a385, + 0x31e789, + 0x34460c, + 0x2630c2, + 0x2630c2, + 0x2630c2, + 0x2630c2, + 0x372ac5, + 0x2630c2, + 0x2630c2, + 0x2630c2, + 0x2630c2, + 0x211cc2, + 0x31e789, + 0x2630c2, + 0x2630c2, + 0x2630c2, + 0x212e05, + 0x2630c2, + 0x212e05, + 0x34460c, 0x3a6d4c, - 0x323ac3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x238483, - 0x2264c3, - 0x141388, - 0x4db44, - 0xed208, - 0x200882, - 0x49a16582, - 0x240003, - 0x22b944, - 0x208f43, - 0x21eb04, - 0x231ac6, - 0x31d243, - 0x34aa44, - 0x26cc05, - 0x211003, - 0x238483, - 0x2264c3, - 0x24690a, - 0x3a5946, - 0x3757cc, - 0x880c8, - 0x216582, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x2348c3, - 0x2cc5c6, - 0x238483, - 0x2264c3, - 0x21bd03, - 0xd42, - 0xdb7c7, - 0xca908, - 0xfd8e, - 0x85792, - 0x2ecb, - 0x4a71f345, - 0x4ab76d0c, - 0x131007, - 0x16e747, - 0x119b8a, - 0x3c550, - 0x2988, - 0x16e847, - 0xae14b, - 0x112089, - 0x173507, - 0x10ec87, - 0x77847, - 0x169c6, - 0x132d48, - 0x4b01e1c6, - 0xa910d, - 0x119550, - 0x4b400d82, - 0x131948, - 0x680c7, - 0x84109, - 0x53e06, - 0x908c8, - 0x5e82, - 0x9c34a, - 0x8e507, - 0xeaec7, - 0xa5449, - 0xa7188, - 0x157f45, - 0xe168e, - 0xe9ce, - 0x14c4f, - 0x15309, - 0x33c49, - 0x6528b, - 0x7cdcf, - 0x8cdcc, - 0xdcbcb, - 0xd99c8, - 0x12bd07, - 0xede48, - 0x11e50b, - 0x13e94c, - 0x14624c, - 0x14f98c, - 0x1524cd, - 0x2b848, - 0x30cc2, - 0x1afb89, - 0x14c24b, - 0xbdb06, - 0xce6c5, - 0xd2d10, - 0x1229c6, - 0x51f05, - 0xd6908, - 0xdc047, - 0xdc307, - 0x163287, - 0xeba4a, - 0xca78a, - 0x161186, - 0x8db8d, - 0x180248, - 0x45e88, - 0x47a49, - 0xeb58c, - 0x1526cb, - 0x171ac4, - 0xf3109, - 0x44bc6, - 0x6202, - 0x155646, - 0xfefc7, - 0x6c2, - 0xc0e85, - 0x481, - 0x3b583, - 0x4af9eb86, - 0x90c43, - 0x1f82, - 0x3a4c4, + 0x368883, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x204ac3, + 0x200383, + 0x1f08, + 0x15444, + 0xc1348, + 0x204cc2, + 0x5020d1c2, + 0x243403, + 0x24c944, + 0x202743, + 0x38e8c4, + 0x22e886, + 0x213843, + 0x31aa84, + 0x288845, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x25084a, + 0x241f46, + 0x3755cc, + 0x15f048, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x22d603, + 0x201686, + 0x204ac3, + 0x200383, + 0x21aa03, + 0xaa288, + 0x3942, + 0x513856c5, + 0x2ac47, + 0xd7a88, + 0xc0ce, + 0x8c792, + 0x16780b, + 0x516db445, + 0x51adb44c, + 0xf207, + 0x13ecc7, + 0x1698ca, + 0x3efd0, + 0x1acd05, + 0x16e1cb, + 0x1672c8, + 0x13edc7, + 0x2f64b, + 0x11dd09, + 0x150f47, + 0x1b3c47, + 0x81187, + 0x20586, + 0x94b88, + 0x52028b86, + 0xafbcd, + 0x169290, + 0x52401742, + 0xfb48, + 0x71f47, + 0x7f149, + 0x59b46, + 0x99f08, + 0x74842, + 0xa4f4a, + 0x2d587, + 0x3b9c7, + 0xaa689, + 0xad708, + 0x15ae05, + 0x194e8e, + 0x14d4e, + 0x26f4f, + 0x2ccc9, + 0x4c809, + 0x77e8b, + 0x878cf, + 0x8fdcc, + 0xadd8b, + 0xc4d08, + 0xdc507, + 0x162908, + 0xfe04b, + 0x12a54c, + 0x141acc, + 0x147f4c, + 0x14b0cd, + 0x17de88, + 0x42602, + 0x104389, + 0x18528b, + 0xc8886, + 0x116f8b, + 0xdd5ca, + 0xde185, + 0xe4a90, + 0x1294c6, + 0x63c85, + 0xe6448, + 0xee547, + 0xee807, + 0x5e987, + 0xf92ca, + 0xd790a, + 0x177ac6, + 0x97ccd, + 0x1ae208, + 0x56608, + 0x56a89, + 0xb9905, + 0x19de4c, + 0x14b2cb, + 0x171c84, + 0xff749, + 0x8146, + 0x16c2, + 0x125886, + 0x10d947, + 0x6c82, + 0xcb605, + 0x29b04, + 0x701, + 0x2bc43, + 0x51fadbc6, + 0x9a283, + 0x8a42, + 0x2d584, + 0x1442, + 0x4ae04, + 0x1342, + 0x2f82, + 0x3682, + 0x1124c2, + 0xe542, + 0xdb442, + 0x2ac2, + 0x1c402, + 0x26982, + 0x4d02, + 0x3b02, + 0x34682, + 0x31b83, + 0x7d02, + 0x1c2, + 0x41c2, + 0xda42, + 0x642, + 0xdc2, + 0x18d82, + 0x1a02, + 0x2282, + 0x1d42, + 0x4303, + 0xb02, + 0x2f02, + 0xb5e02, + 0x1b02, + 0x5d82, + 0x32c2, + 0x73c2, + 0x17c2, + 0x1f02, + 0x173102, + 0x73fc2, + 0x5e402, + 0x4ac3, + 0x2c2, + 0x9282, 0x1002, - 0x24104, - 0x9c2, - 0x1182, - 0x3182, - 0x4f882, - 0x2ec2, - 0x104e82, - 0x8c2, - 0x1dec2, - 0x37e42, + 0x14602, + 0x1724c5, + 0x6ec2, + 0x1202, + 0x41703, 0x682, - 0xf82, - 0xb1d82, - 0x343c3, - 0x8042, - 0x202, - 0x6ac2, - 0x21842, - 0xb2c2, - 0x32a02, - 0xf1c2, - 0x42, - 0x5602, - 0xa82, - 0x2243, - 0x74c2, - 0x1982, - 0xb09c2, - 0x9682, - 0xb402, - 0x61c2, - 0xa242, - 0x9a1c2, - 0x6742, - 0x172e82, - 0xe02, - 0x9f82, - 0x38483, - 0x1dc2, - 0x8382, - 0x25c2, - 0x2182, - 0x46045, - 0x6a42, - 0x41542, - 0x3e503, - 0x4b42, - 0x7982, - 0x1402, - 0x15c2, - 0x1882, - 0xcc2, - 0x3282, - 0x6202, - 0x6b247, - 0x212d03, - 0x200882, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x201d83, - 0x2348c3, - 0x238483, - 0x2025c3, - 0x2264c3, - 0x290c83, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x211003, - 0x238483, - 0x2025c3, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, + 0xd42, + 0x1702, + 0xe5c2, + 0x1ac2, + 0x3382, + 0x6902, + 0x16c2, + 0x73c07, + 0x213dc3, + 0x204cc2, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x201d43, + 0x22d603, + 0x204ac3, + 0x20abc3, + 0x200383, + 0x29a2c3, + 0x1a5c3, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x20fbc3, + 0x204ac3, + 0x20abc3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, 0x200041, - 0x211003, - 0x238483, - 0x201f43, - 0x2264c3, - 0x323ac3, - 0x22d183, - 0x2343c3, - 0x25f643, - 0x211cc3, - 0x3112c3, - 0x27cc03, - 0x201f83, - 0x25a603, - 0x21eb03, - 0x201604, - 0x238483, - 0x2264c3, - 0x223ec3, - 0x305fc4, - 0x21e143, - 0x4803, - 0x203e03, - 0x2a0cc8, - 0x332a44, - 0x317f8a, - 0x330786, - 0xda404, - 0x3a2e47, - 0x22138a, - 0x347689, - 0x3b3507, - 0x20054a, - 0x323ac3, - 0x3822cb, - 0x368b49, - 0x2c4005, - 0x2ca007, - 0x16582, - 0x22d183, - 0x326647, - 0x22a1c5, - 0x2d60c9, - 0x2343c3, - 0x227846, - 0x2ba0c3, - 0x9f543, - 0xfaa86, - 0x4f4c6, - 0x11d1c7, - 0x3a8786, - 0x213e45, - 0x20c507, - 0x338b87, - 0x4d61eb03, - 0x329007, - 0x35eec3, - 0x38e705, - 0x201604, - 0x221c08, - 0x2af6cc, - 0x2ad6c5, - 0x363c06, - 0x326507, - 0x224b47, - 0x205087, - 0x206e48, - 0x2597cf, - 0x280b05, - 0x240107, - 0x27e347, - 0x2a500a, - 0x2ee209, - 0x2d7185, - 0x2d830a, - 0xdea46, - 0x2ba145, - 0x374b84, - 0x2b7486, - 0x2fe5c7, - 0x230bc7, - 0x2a0a08, - 0x214805, - 0x22a0c6, - 0x3a8245, - 0x37a445, - 0x21fd44, - 0x31e907, - 0x347cca, - 0x365a48, - 0x2edb86, - 0x348c3, - 0x2cf145, - 0x238d06, - 0x200346, - 0x251e06, - 0x211003, - 0x388dc7, - 0x27e2c5, - 0x238483, - 0x3b378d, - 0x2025c3, - 0x2a0b08, - 0x3aac44, - 0x205fc5, - 0x2a4f06, - 0x236b06, - 0x20d6c7, - 0x355747, - 0x2641c5, - 0x2264c3, - 0x322c07, - 0x33b009, - 0x258f49, - 0x2434ca, - 0x242a42, - 0x38e6c4, - 0x2d7ac4, - 0x210d87, - 0x236d08, - 0x2dce89, - 0x3a3489, - 0x2df807, - 0x334206, - 0xe1406, - 0x2e26c4, - 0x2e2cca, - 0x2e5c88, - 0x2e64c9, - 0x2b6306, - 0x3003c5, - 0x365908, - 0x2bf10a, - 0x25b743, - 0x306146, - 0x2df907, - 0x207c85, - 0x3aab05, - 0x242083, - 0x252dc4, - 0x21bf45, - 0x27e747, - 0x39fa45, - 0x2f3bc6, - 0xfa705, - 0x212a43, - 0x21cdc9, - 0x238dcc, - 0x2ab90c, - 0x2c65c8, - 0x28f8c7, - 0x2ef748, - 0x2efa8a, - 0x2f0a4b, - 0x368c88, - 0x363d08, - 0x36ee86, - 0x341985, - 0x36498a, - 0x21ebc5, - 0x203e42, - 0x2bcdc7, - 0x26a8c6, - 0x353505, - 0x2f1949, - 0x38dec5, - 0x376785, - 0x38e2c9, - 0x238b86, - 0x261b88, - 0x2d1343, - 0x3a88c6, - 0x270f06, - 0x2fdcc5, - 0x2fdcc9, - 0x2dd5c9, - 0x242d87, - 0xfdb44, - 0x2fdb47, - 0x3a3389, - 0x221585, - 0x16f208, - 0x355545, - 0x355245, - 0x399309, - 0x201482, - 0x21df44, - 0x202e82, - 0x2074c2, - 0x293c45, - 0x2da188, - 0x374e45, - 0x2bc883, - 0x2bc885, - 0x2ca3c3, - 0x2111c2, - 0x264a04, - 0x233503, - 0x207a82, - 0x358704, - 0x2d8003, - 0x2014c2, - 0x293cc3, - 0x2898c4, - 0x2d7703, - 0x23a804, - 0x201bc2, - 0x21bc03, - 0x219283, - 0x208d82, - 0x35b202, - 0x2dd409, - 0x2011c2, - 0x286304, - 0x200dc2, - 0x365784, - 0x3341c4, - 0x3a1cc4, - 0x206202, - 0x23e802, - 0x20dc03, - 0x2f0083, - 0x23f704, - 0x27e8c4, - 0x2d13c4, - 0x2dd7c4, - 0x2fd083, - 0x3491c3, - 0x2de9c4, - 0x2fee04, - 0x2ff346, - 0x260dc2, - 0x216582, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x200882, - 0x323ac3, - 0x22d183, - 0x2343c3, - 0x205403, - 0x21eb03, - 0x201604, - 0x2dd6c4, - 0x212444, - 0x238483, - 0x2264c3, - 0x21bd03, - 0x2e3444, - 0x29e943, - 0x2b3783, - 0x3436c4, - 0x355346, - 0x20ca03, - 0x16e747, - 0x219b83, - 0x208143, - 0x2b1a03, - 0x206003, - 0x2348c3, - 0x376f85, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x2db443, - 0x230743, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x202243, - 0x238483, - 0x234fc4, - 0x2264c3, - 0x29b704, - 0x2b7285, - 0x16e747, - 0x216582, - 0x201a42, - 0x201f82, - 0x205902, - 0x201502, - 0x22d183, - 0x2374c4, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x212444, - 0x238483, - 0x2264c3, - 0x217643, - 0x224104, - 0x880c8, - 0x22d183, - 0x2025c3, - 0x245dc4, - 0x880c8, - 0x22d183, - 0x247344, - 0x201604, - 0x2025c3, - 0x202282, - 0x2264c3, - 0x215cc3, - 0x52dc4, - 0x2e9cc5, - 0x203e42, - 0x2fef43, - 0x200882, - 0x880c8, - 0x216582, - 0x2343c3, - 0x21eb03, - 0x200a82, - 0x2264c3, - 0x200882, - 0x200707, - 0x254705, - 0x29f844, - 0x385f86, - 0x366a4b, - 0x263a49, - 0x363b46, - 0x340a89, - 0x2b2c88, - 0x207103, - 0x880c8, - 0x22a807, - 0x364288, - 0x24f843, - 0x21d184, - 0x2226cb, - 0x259145, - 0x24b188, - 0x2f2ec9, - 0x25a203, - 0x22d183, - 0x205348, - 0x2ee787, - 0x24fe46, - 0x2343c3, - 0x24f947, - 0x21eb03, - 0x339b06, - 0x202243, - 0x22f9c7, - 0x33a6c7, - 0x390e87, - 0x31e885, - 0x209403, - 0x205dcb, - 0x36b4c8, - 0x227548, - 0x33b1c6, - 0x367989, - 0x335b07, - 0x2f9145, - 0x339444, - 0x3478c8, - 0x23d54a, - 0x23d789, - 0x346f03, - 0x2696c5, - 0x21bb83, - 0x3ad706, - 0x387704, - 0x2fdec8, - 0x38748b, - 0x346dc5, - 0x2b7006, - 0x2b8e85, - 0x2b9608, - 0x2ba287, - 0x206cc7, - 0x317b87, - 0x294544, - 0x30a5c7, - 0x294546, - 0x211003, - 0x2c2088, - 0x268383, - 0x2cab08, - 0x2d3f45, - 0x3251c8, - 0x2345c7, - 0x238483, - 0x2447c3, - 0x287dc4, - 0x323647, - 0x208fc3, - 0x33a78b, - 0x205003, - 0x268344, - 0x2e9d48, - 0x2264c3, - 0x2f3d45, - 0x311145, - 0x3250c6, - 0x2117c5, - 0x2d4304, - 0x202002, - 0x2e69c3, - 0x374c0a, - 0x3a1583, - 0x306709, - 0x30a2c6, - 0x204e48, - 0x289406, - 0x214347, - 0x2db948, - 0x39a588, - 0x2ebd43, - 0x293d03, - 0x272c09, - 0x2f4c83, - 0x2d8d06, - 0x254386, - 0x39f7c6, - 0x3a1e09, - 0x2fd784, - 0x20e3c3, - 0x2d6d05, - 0x349589, - 0x206dc3, - 0x35a244, - 0x2f2ac4, - 0x36fc84, - 0x35f906, - 0x3b4303, - 0x3b4308, - 0x256a08, - 0x39db86, - 0x2f8f4b, - 0x2f9288, - 0x2f948b, - 0x2fb949, - 0x2fa987, - 0x2fbdc8, - 0x2fc983, - 0x22ad86, - 0x3a9247, - 0x295245, - 0x34b789, - 0x33530d, - 0x204c91, - 0x22eb85, - 0x200882, - 0x216582, - 0x22d183, - 0x2343c3, - 0x22d684, - 0x21eb03, - 0x202243, - 0x211003, - 0x238483, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x2348c3, - 0x238483, - 0x2264c3, - 0x265903, - 0x217643, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x2348c3, - 0x238483, - 0x2264c3, - 0x221e42, + 0x20fbc3, + 0x204ac3, + 0x2104c3, + 0x200383, + 0x368883, + 0x2d0783, + 0x231b83, + 0x20fb43, + 0x2135c3, + 0x2300c3, + 0x287703, + 0x210503, + 0x234743, + 0x332ec3, + 0x2964c4, + 0x204ac3, + 0x200383, + 0x24abc3, + 0x200604, + 0x250c83, + 0x5283, + 0x3abd03, + 0x329b88, + 0x2e8384, + 0x2c264a, + 0x224906, + 0x10f9c4, + 0x38c2c7, + 0x21eeca, + 0x2df949, + 0x3a3b07, + 0x3a7dca, + 0x368883, + 0x2e900b, + 0x303349, + 0x2b9e45, + 0x2d7187, + 0xd1c2, + 0x2d0783, + 0x204d07, + 0x248d45, + 0x2e1cc9, + 0x231b83, + 0x233606, + 0x2c56c3, + 0xe1183, + 0x109686, + 0x60546, + 0x137c7, + 0x217546, + 0x222645, + 0x2cf187, + 0x2da587, + 0x54b32ec3, + 0x334a07, + 0x3642c3, + 0x3a2385, + 0x2964c4, + 0x32e3c8, + 0x2751cc, + 0x3a5745, + 0x2a2086, + 0x204bc7, + 0x37ef47, + 0x25b8c7, + 0x31f248, + 0x307ecf, + 0x2dfb85, + 0x243507, + 0x2394c7, + 0x2a9b0a, + 0x2d7e49, + 0x30bc85, + 0x32194a, + 0x1b06, + 0x2c5745, + 0x376284, + 0x289dc6, + 0x2f8cc7, + 0x242507, + 0x38cbc8, + 0x214185, + 0x248c46, + 0x20c545, + 0x387845, + 0x212c04, + 0x2e3f87, + 0x20900a, + 0x234d48, + 0x356946, + 0x2d603, + 0x2e0405, + 0x26c686, + 0x3a46c6, + 0x263b86, + 0x20fbc3, + 0x389247, + 0x239445, + 0x204ac3, + 0x2dd88d, + 0x20abc3, + 0x38ccc8, + 0x39a5c4, + 0x27ba85, + 0x2a9a06, + 0x2362c6, + 0x204587, + 0x2ae707, + 0x270b05, + 0x200383, + 0x27f2c7, + 0x329709, + 0x22b689, + 0x2f590a, + 0x24cd82, + 0x3a2344, + 0x2e76c4, + 0x261787, + 0x2278c8, + 0x2ef309, + 0x21f1c9, + 0x2f0487, + 0x303806, + 0xf22c6, + 0x2f39c4, + 0x2f3fca, + 0x2f6a08, + 0x2f6fc9, + 0x2bfe86, + 0x2b6ec5, + 0x234c08, + 0x2c9c4a, + 0x22c6c3, + 0x200786, + 0x2f0587, + 0x217f85, + 0x39a485, + 0x2717c3, + 0x258a04, + 0x36da85, + 0x288e47, + 0x2ffac5, + 0x2ed686, + 0xfff05, + 0x264a03, + 0x28b789, + 0x27b84c, + 0x2a7e0c, + 0x2d3bc8, + 0x3ade87, + 0x2fc8c8, + 0x2fcc0a, + 0x2fd84b, + 0x303488, + 0x33f408, + 0x2363c6, + 0x262685, + 0x200f4a, + 0x219545, + 0x205bc2, + 0x2c7a87, + 0x2a32c6, + 0x355ec5, + 0x38e989, + 0x26b785, + 0x285ec5, + 0x3a1f49, + 0x257cc6, + 0x3b1088, + 0x23e0c3, + 0x3b3306, + 0x27ac06, + 0x30ba85, + 0x30ba89, + 0x2bc289, + 0x24d0c7, + 0x10b904, + 0x30b907, + 0x21f0c9, + 0x23c905, + 0x4bbc8, + 0x3b3205, + 0x339505, + 0x376c89, + 0x205ac2, + 0x2e95c4, + 0x20d782, + 0x200b02, + 0x2ce985, + 0x30f748, + 0x2b9845, + 0x2c6fc3, + 0x2c6fc5, + 0x2d7543, + 0x210882, + 0x2e30c4, + 0x351903, + 0x204c82, + 0x35bb44, + 0x2e85c3, + 0x200e82, + 0x25e903, + 0x291704, + 0x2e7083, + 0x246f04, + 0x202602, + 0x21a903, + 0x215b43, + 0x206342, + 0x33c282, + 0x2bc0c9, + 0x202d82, + 0x28d304, + 0x201782, + 0x234a84, + 0x3037c4, + 0x2bcc44, + 0x2016c2, + 0x241a02, + 0x220883, + 0x225f83, + 0x387944, + 0x269e44, + 0x2bc484, + 0x2ce884, + 0x30b143, + 0x34f743, + 0x201a84, + 0x30d784, + 0x30e786, + 0x2e7782, + 0x20d1c2, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x204cc2, + 0x368883, + 0x2d0783, + 0x231b83, + 0x2001c3, + 0x332ec3, + 0x2964c4, + 0x2bc384, + 0x213184, + 0x204ac3, + 0x200383, + 0x21aa03, + 0x2f4684, + 0x32f983, + 0x2bf3c3, + 0x345184, + 0x3b3006, + 0x211503, + 0x13ecc7, + 0x234fc3, + 0x23a943, + 0x2b6703, + 0x265383, + 0x22d603, + 0x2db6c5, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2ed143, + 0x2ab343, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204303, + 0x204ac3, + 0x23ee04, + 0x200383, + 0x26a104, + 0x2c2d45, + 0x13ecc7, + 0x20d1c2, + 0x2000c2, + 0x208a42, + 0x202082, + 0x200382, + 0x2d0783, + 0x23a184, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x213184, + 0x204ac3, + 0x200383, + 0x213e83, + 0x24ae04, + 0x15f048, + 0x2d0783, + 0x20abc3, + 0x1a5c3, + 0x24fe44, + 0x15f048, + 0x2d0783, + 0x251304, + 0x2964c4, + 0x20abc3, + 0x201882, + 0x200383, + 0x2202c3, + 0x58a04, + 0x370145, + 0x205bc2, + 0x30d8c3, + 0x204cc2, + 0x15f048, + 0x20d1c2, + 0x231b83, + 0x332ec3, + 0x201d42, + 0x200383, + 0x204cc2, + 0x15f048, + 0x231b83, + 0x332ec3, + 0x204303, + 0x20fbc3, + 0x30b544, + 0x204cc2, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x2da904, + 0x332ec3, + 0x204303, + 0x20fbc3, + 0x204ac3, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x22d603, + 0x204ac3, + 0x200383, + 0x26a103, + 0x213e83, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x1a5c3, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x22d603, + 0x204ac3, + 0x200383, + 0x217082, 0x200141, - 0x200882, + 0x204cc2, 0x200001, - 0x313b02, - 0x880c8, - 0x220045, - 0x200481, - 0x2d183, - 0x200741, - 0x200081, - 0x200c81, - 0x2333c2, - 0x36e144, - 0x383043, - 0x2007c1, - 0x200901, - 0x200041, - 0x2001c1, - 0x2dda87, - 0x2b8f8f, - 0x2cacc6, + 0x31f542, + 0x15f048, + 0x21d105, + 0x200701, + 0xd0783, + 0x200101, 0x2000c1, - 0x25b806, - 0x200341, - 0x200ac1, - 0x341ece, - 0x201501, - 0x2264c3, - 0x2014c1, - 0x260e05, - 0x202002, - 0x241f85, - 0x200b81, + 0x201e41, + 0x29da82, + 0x36da04, + 0x372a43, + 0x200181, + 0x200941, + 0x200041, + 0x200081, + 0x2ed7c7, + 0x2eeccf, + 0x2fc146, + 0x201481, + 0x289786, + 0x200c01, + 0x2002c1, + 0x33168e, + 0x200381, + 0x200383, + 0x200e81, + 0x279e45, + 0x210582, + 0x2716c5, + 0x2003c1, + 0x200201, 0x200241, + 0x205bc2, 0x200a01, - 0x203e42, - 0x2002c1, - 0x204701, - 0x20dec1, - 0x200781, - 0x200641, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x21ca03, - 0x22d183, - 0x21eb03, - 0x89ec8, - 0x211003, - 0x238483, - 0x2264c3, - 0x14da788, - 0x880c8, - 0x441c4, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x238483, - 0x2264c3, - 0x204803, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x22d684, - 0x2264c3, - 0x28fb85, - 0x27f304, - 0x22d183, - 0x238483, - 0x2264c3, - 0xa014a, - 0x216582, - 0x22d183, - 0x2326c9, - 0x2343c3, - 0x23af09, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x2e24c8, - 0x2100c7, - 0x2e9cc5, - 0x200707, - 0x366a4b, - 0x365188, - 0x340a89, - 0x22a807, - 0x205348, - 0x339b06, - 0x33a6c7, - 0x227548, - 0x33b1c6, - 0x335b07, - 0x23d789, - 0x37c409, - 0x2b7006, - 0x2b7e45, - 0x2c2088, - 0x268383, - 0x2cab08, - 0x2345c7, - 0x208fc3, - 0x326387, - 0x2117c5, - 0x2dc608, - 0x310205, - 0x293d03, - 0x33b9c9, - 0x2aa9c7, - 0x35a244, - 0x2f2ac4, - 0x2f8f4b, - 0x2f9288, - 0x2fa987, - 0x22d183, - 0x2343c3, - 0x211cc3, - 0x2264c3, - 0x21e503, - 0x21eb03, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x653cb, - 0x200882, - 0x216582, - 0x2264c3, - 0x880c8, - 0x200882, - 0x216582, - 0x201f82, - 0x200a82, - 0x200342, - 0x238483, - 0x201502, - 0x200882, - 0x323ac3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x201f82, - 0x21eb03, - 0x202243, - 0x211003, - 0x212444, - 0x238483, - 0x21ab43, - 0x2264c3, - 0x2fd784, - 0x223ec3, - 0x21eb03, - 0x216582, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2025c3, - 0x2264c3, - 0x39bd47, - 0x22d183, - 0x256b87, - 0x2edfc6, - 0x219203, - 0x206ac3, - 0x21eb03, - 0x220883, - 0x201604, - 0x284804, - 0x2d43c6, - 0x20bac3, - 0x238483, - 0x2264c3, - 0x28fb85, - 0x20d4c4, - 0x31a083, - 0x217a03, - 0x2bcdc7, - 0x20b445, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x219f02, - 0x380383, - 0x2b2c83, - 0x323ac3, - 0x5822d183, - 0x22b782, - 0x2343c3, - 0x208f43, - 0x21eb03, - 0x201604, - 0x36b683, - 0x280b03, - 0x211003, - 0x212444, - 0x58606bc2, - 0x238483, - 0x2264c3, - 0x232dc3, - 0x245483, - 0x221e42, - 0x223ec3, - 0x880c8, - 0x21eb03, - 0x307e44, - 0x323ac3, - 0x216582, - 0x22d183, - 0x2374c4, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x202243, - 0x2f5d44, - 0x307b04, - 0x2cc5c6, - 0x212444, - 0x238483, - 0x2264c3, + 0x201a81, + 0x2005c1, + 0x2007c1, + 0x200cc1, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, 0x21bd03, - 0x26a8c6, - 0x1737cb, - 0x1e1c6, - 0x23d0a, - 0xfcb8a, - 0x880c8, - 0x3a8204, - 0x22d183, - 0x323a84, - 0x2343c3, - 0x247b84, - 0x21eb03, - 0x251283, - 0x211003, - 0x238483, - 0x2264c3, - 0x32248b, - 0x39d94a, - 0x3b298c, - 0x200882, - 0x216582, - 0x201f82, - 0x2a9c05, - 0x201604, - 0x206742, - 0x211003, - 0x307b04, - 0x205902, - 0x201502, - 0x217642, - 0x221e42, - 0x123ac3, - 0x357309, - 0x254208, - 0x301189, - 0x33a509, - 0x35bd8a, - 0x23808a, - 0x20cc82, - 0x21dec2, - 0x16582, - 0x22d183, - 0x200bc2, - 0x2402c6, - 0x354502, - 0x202982, - 0x3861ce, - 0x21bc4e, - 0x278107, - 0x32fe47, - 0x26b302, - 0x2343c3, - 0x21eb03, - 0x202842, - 0x200a82, - 0x23d1cf, - 0x204ec2, - 0x33b3c7, - 0x24cf87, - 0x256107, - 0x26204c, - 0x268b4c, - 0x2057c4, - 0x2696ca, - 0x21bb82, - 0x209682, - 0x2b2684, - 0x215bc2, - 0x2bb4c2, - 0x268d84, - 0x21ac42, - 0x20b402, - 0x33b247, - 0x233285, - 0x20a242, - 0x23d144, - 0x372e82, - 0x2cea08, - 0x238483, - 0x3a2308, - 0x203082, - 0x235885, - 0x3a25c6, - 0x2264c3, - 0x206a42, - 0x2dd0c7, - 0x2002, - 0x26ccc5, - 0x393e85, - 0x2166c2, - 0x226442, - 0x31864a, - 0x26404a, - 0x210fc2, - 0x376c04, - 0x201a02, - 0x38e588, + 0x2d0783, + 0x332ec3, + 0x91d48, + 0x20fbc3, + 0x204ac3, + 0x48803, + 0x200383, + 0x14ebc48, + 0x15f048, + 0x4dcc4, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x204ac3, + 0x200383, + 0x205283, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x2da904, + 0x200383, + 0x293ac5, + 0x343984, + 0x2d0783, + 0x204ac3, + 0x200383, + 0x16b18a, + 0x20d1c2, + 0x2d0783, + 0x22f489, + 0x231b83, + 0x2d2389, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x2f37c8, + 0x226647, + 0x370145, + 0x3a7f87, + 0x26b0cb, + 0x215cc8, + 0x32eac9, + 0x228087, + 0x200108, + 0x36f906, + 0x2344c7, + 0x29c108, + 0x2ab806, + 0x31d407, + 0x2aa449, + 0x2ba749, + 0x2c2ac6, + 0x2c38c5, + 0x2cce08, + 0x2b4783, + 0x2d7c88, + 0x231d87, + 0x206583, + 0x31d287, + 0x217905, + 0x2eeb08, + 0x359105, + 0x2cea43, + 0x23c289, + 0x2b0e87, + 0x35d504, + 0x2ff244, + 0x307ccb, + 0x308288, + 0x309587, + 0x2d0783, + 0x231b83, + 0x2135c3, + 0x200383, + 0x236ec3, + 0x332ec3, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x77fcb, 0x204cc2, - 0x2fd448, - 0x2f64c7, - 0x2f67c9, - 0x26cd42, - 0x2fbb85, - 0x2546c5, - 0x2148cb, - 0x2bfdcc, - 0x22f848, - 0x2fbf48, - 0x260dc2, - 0x20d782, - 0x200882, - 0x880c8, - 0x216582, - 0x22d183, - 0x201f82, - 0x205902, - 0x201502, - 0x2264c3, - 0x217642, - 0x200882, - 0x5a616582, - 0x5aa1eb03, - 0x332683, - 0x206742, - 0x238483, - 0x364e83, - 0x2264c3, - 0x2db083, - 0x26b346, - 0x1617643, - 0x880c8, - 0x51f05, - 0xa7dcd, - 0x5f007, - 0x5b200182, - 0x5b601002, - 0x5ba04802, - 0x5be01842, - 0x5c2108c2, - 0x5c602ec2, - 0x16e747, - 0x5ca16582, - 0x5ce30542, - 0x5d21e582, - 0x5d600f82, - 0x21bc43, - 0x1b4284, - 0x20ddc3, - 0x5da18fc2, - 0x5de038c2, - 0x47887, - 0x5e214b82, - 0x5e600902, - 0x5ea02ac2, - 0x5ee082c2, - 0x5f205602, - 0x5f600a82, - 0xb97c5, - 0x226743, - 0x30ec04, - 0x5fa15bc2, - 0x5fe16c82, - 0x60200102, - 0x7508b, - 0x60600982, - 0x60e09782, - 0x61206742, - 0x61600342, - 0x61a50042, - 0x61e03042, - 0x6220e842, - 0x62600e02, - 0x62a06bc2, - 0x62e01302, - 0x63205902, - 0x6361d302, - 0x63a04242, - 0x63e425c2, - 0x133184, - 0x371183, - 0x64206602, - 0x64613942, - 0x64a06942, - 0x64e03742, - 0x65201502, - 0x65607a82, - 0x65547, - 0x65a07442, - 0x65e07482, - 0x66217642, - 0x6660a442, - 0xeb58c, - 0x66a24982, - 0x66e6f2c2, - 0x6721dcc2, - 0x67603dc2, - 0x67a2d742, - 0x67e1eb82, - 0x68204702, - 0x68606f42, - 0x68a71282, - 0x68e15ac2, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x75803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x60b6b683, - 0x275803, - 0x377004, - 0x254106, - 0x2e6a83, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x36b683, - 0x275803, - 0x200482, - 0x200482, - 0x36b683, - 0x275803, - 0x6962d183, - 0x2343c3, - 0x2a0fc3, - 0x211003, - 0x238483, - 0x2264c3, - 0x880c8, - 0x216582, - 0x22d183, - 0x238483, - 0x2264c3, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x211003, - 0x238483, - 0x2264c3, - 0x245dc4, - 0x216582, - 0x22d183, - 0x308703, - 0x2343c3, - 0x247344, - 0x211cc3, - 0x21eb03, - 0x201604, - 0x202243, - 0x211003, - 0x238483, - 0x2264c3, - 0x215cc3, - 0x2e9cc5, - 0x241403, - 0x223ec3, - 0x216582, - 0x22d183, - 0x36b683, - 0x238483, - 0x2264c3, - 0x200882, - 0x323ac3, - 0x880c8, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x231ac6, - 0x201604, - 0x202243, - 0x212444, - 0x238483, - 0x2264c3, - 0x21bd03, - 0x22d183, - 0x2343c3, - 0x238483, - 0x2264c3, - 0x22d183, - 0x1e1c6, - 0x2343c3, - 0x21eb03, - 0xd1906, - 0x238483, - 0x2264c3, - 0x308a48, - 0x30b989, - 0x31bcc9, - 0x326c48, - 0x37efc8, - 0x37efc9, - 0x333c5, - 0x200882, - 0x20b285, - 0x231b43, - 0x6c216582, - 0x2343c3, - 0x21eb03, - 0x22f647, - 0x206003, - 0x211003, - 0x238483, - 0x201f43, - 0x210783, - 0x2025c3, - 0x2264c3, - 0x3a5946, - 0x203e42, - 0x223ec3, - 0x880c8, - 0x200882, - 0x323ac3, - 0x216582, - 0x22d183, - 0x2343c3, - 0x21eb03, - 0x201604, - 0x211003, - 0x238483, - 0x2264c3, - 0x217643, - 0x14fa806, + 0x20d1c2, + 0x200383, + 0x15f048, + 0x204cc2, + 0x20d1c2, + 0x208a42, + 0x201d42, + 0x203cc2, + 0x204ac3, + 0x200382, + 0x204cc2, + 0x368883, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x208a42, + 0x332ec3, + 0x204303, + 0x20fbc3, + 0x213184, + 0x204ac3, + 0x2183c3, + 0x200383, + 0x30b544, + 0x24abc3, + 0x332ec3, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x20abc3, + 0x200383, + 0x39db07, + 0x2d0783, + 0x26e5c7, + 0x362a86, + 0x215ac3, + 0x2041c3, + 0x332ec3, + 0x209e43, + 0x2964c4, + 0x38b704, + 0x30dbc6, + 0x201303, + 0x204ac3, + 0x200383, + 0x293ac5, + 0x318244, + 0x369dc3, + 0x37ed83, + 0x2c7a87, + 0x2387c5, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x203782, + 0x3ae343, + 0x2c2d43, + 0x368883, + 0x5fed0783, + 0x209c02, + 0x231b83, + 0x202743, + 0x332ec3, + 0x2964c4, + 0x23a0c3, + 0x2dfb83, + 0x20fbc3, + 0x213184, + 0x6020c002, + 0x204ac3, + 0x200383, + 0x209103, + 0x229b03, + 0x217082, + 0x24abc3, + 0x15f048, + 0x332ec3, + 0x1a5c3, + 0x2957c4, + 0x368883, + 0x20d1c2, + 0x2d0783, + 0x23a184, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x204303, + 0x2cee84, + 0x222044, + 0x201686, + 0x213184, + 0x204ac3, + 0x200383, + 0x21aa03, + 0x2a32c6, + 0x3ddcb, + 0x28b86, + 0x4aa0a, + 0x10adca, + 0x15f048, + 0x20c504, + 0x2d0783, + 0x368844, + 0x231b83, + 0x256bc4, + 0x332ec3, + 0x262fc3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x32e84b, + 0x39f84a, + 0x3b478c, + 0x204cc2, + 0x20d1c2, + 0x208a42, + 0x2b0405, + 0x2964c4, + 0x201f02, + 0x20fbc3, + 0x222044, + 0x202082, + 0x200382, + 0x20c4c2, + 0x217082, + 0x168883, + 0xd882, + 0x2b2409, + 0x259f88, + 0x332d49, + 0x234309, + 0x23b18a, + 0x24550a, + 0x20a182, + 0x21c402, + 0xd1c2, + 0x2d0783, + 0x220802, + 0x2436c6, + 0x356fc2, + 0x20a542, + 0x21ad8e, + 0x21a94e, + 0x281a47, + 0x204a47, + 0x221202, + 0x231b83, + 0x332ec3, + 0x20b502, + 0x201d42, + 0x4143, + 0x24058f, + 0x26b142, + 0x362cc7, + 0x2fa1c7, + 0x39d487, + 0x31e28c, + 0x364d0c, + 0x202444, + 0x283b0a, + 0x21a882, + 0x201b02, + 0x2bc744, + 0x22b1c2, + 0x2c5c02, + 0x364f44, + 0x2184c2, + 0x205d82, + 0x5d83, + 0x2ab887, + 0x33d885, + 0x2073c2, + 0x240504, + 0x373102, + 0x2df088, + 0x204ac3, + 0x203808, + 0x203ac2, + 0x232d85, + 0x203ac6, + 0x200383, + 0x206ec2, + 0x2ef547, + 0x10582, + 0x350845, + 0x31d185, + 0x207c82, + 0x236b82, + 0x3a860a, + 0x27098a, + 0x212bc2, + 0x353f84, + 0x2018c2, + 0x3a2208, + 0x219682, + 0x2a2588, + 0x304987, + 0x304c89, + 0x2037c2, + 0x309e45, + 0x247e85, + 0x21424b, + 0x2ca84c, + 0x22c208, + 0x3186c8, + 0x2e7782, + 0x204642, + 0x204cc2, + 0x15f048, + 0x20d1c2, + 0x2d0783, + 0x208a42, + 0x202082, + 0x200382, + 0x200383, + 0x20c4c2, + 0x204cc2, + 0x6260d1c2, + 0x62b32ec3, + 0x205d83, + 0x201f02, + 0x204ac3, + 0x3a8fc3, + 0x200383, + 0x2ec383, + 0x273d06, + 0x1613e83, + 0x15f048, + 0x63c85, + 0xae2cd, + 0xaafca, + 0x6ebc7, + 0x63201b82, + 0x63601442, + 0x63a00f82, + 0x63e02e02, + 0x642125c2, + 0x6460e542, + 0x13ecc7, + 0x64a0d1c2, + 0x64e0e482, + 0x6520fe42, + 0x65603b02, + 0x21a943, + 0x102c4, + 0x220a43, + 0x65a14002, + 0x65e023c2, + 0x51847, + 0x66214502, + 0x66600b82, + 0x66a00542, + 0x66e0a3c2, + 0x67202282, + 0x67601d42, + 0xbe445, + 0x221443, + 0x3b3bc4, + 0x67a2b1c2, + 0x67e42682, + 0x68202682, + 0x7e5cb, + 0x68600c02, + 0x68e513c2, + 0x69201f02, + 0x69603cc2, + 0x69a0bcc2, + 0x69e05f02, + 0x6a20b602, + 0x6a673fc2, + 0x6aa0c002, + 0x6ae04a02, + 0x6b202082, + 0x6b603702, + 0x6ba12982, + 0x6be31302, + 0x94fc4, + 0x358183, + 0x6c2126c2, + 0x6c61a582, + 0x6ca098c2, + 0x6ce00982, + 0x6d200382, + 0x6d604c82, + 0x78147, + 0x6da054c2, + 0x6de05502, + 0x6e20c4c2, + 0x6e609f42, + 0x19de4c, + 0x6ea22e82, + 0x6ee79242, + 0x6f200a02, + 0x6f606602, + 0x6fa019c2, + 0x6fe3b302, + 0x70206d02, + 0x70613882, + 0x70a7af82, + 0x70e43e02, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x75c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x68a3a0c3, + 0x2075c3, + 0x2db744, + 0x259e86, + 0x2f74c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x20d882, + 0x20d882, + 0x23a0c3, + 0x2075c3, + 0x716d0783, + 0x231b83, + 0x329e83, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x15f048, + 0x20d1c2, + 0x2d0783, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x24fe44, + 0x20d1c2, + 0x2d0783, + 0x3303c3, + 0x231b83, + 0x251304, + 0x2135c3, + 0x332ec3, + 0x2964c4, + 0x204303, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x2202c3, + 0x370145, + 0x2b2703, + 0x24abc3, + 0x20d1c2, + 0x2d0783, + 0x23a0c3, + 0x204ac3, + 0x200383, + 0x204cc2, + 0x368883, + 0x15f048, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x22e886, + 0x2964c4, + 0x204303, + 0x213184, + 0x204ac3, + 0x200383, + 0x21aa03, + 0x2d0783, + 0x231b83, + 0x204ac3, + 0x200383, + 0x2d0783, + 0x28b86, + 0x231b83, + 0x332ec3, + 0xe1946, + 0x204ac3, + 0x200383, + 0x315cc8, + 0x318509, + 0x327a09, + 0x332548, + 0x37d888, + 0x37d889, + 0x9da85, + 0x204cc2, + 0x238605, + 0x205d43, + 0x7420d1c2, + 0x231b83, + 0x332ec3, + 0x33e387, + 0x265383, + 0x20fbc3, + 0x204ac3, + 0x2104c3, + 0x212483, + 0x20abc3, + 0x200383, + 0x241f46, + 0x205bc2, + 0x24abc3, + 0x15f048, + 0x204cc2, + 0x368883, + 0x20d1c2, + 0x2d0783, + 0x231b83, + 0x332ec3, + 0x2964c4, + 0x20fbc3, + 0x204ac3, + 0x200383, + 0x213e83, + 0x153ca46, } // children is the list of nodes' children, the parent's wildcard bit and the @@ -8636,439 +8623,471 @@ var children = [...]uint32{ 0x40000000, 0x50000000, 0x60000000, - 0x185c611, - 0x1860617, - 0x1880618, - 0x19dc620, - 0x19f0677, - 0x1a0467c, - 0x1a14681, - 0x1a30685, - 0x1a3468c, - 0x1a4c68d, - 0x1a70693, - 0x1a7469c, - 0x1a8c69d, - 0x1a906a3, + 0x1860612, + 0x1864618, + 0x1884619, + 0x19e0621, + 0x19f4678, + 0x1a0867d, + 0x1a18682, + 0x1a34686, + 0x1a3868d, + 0x1a5068e, + 0x1a74694, + 0x1a7869d, + 0x1a9069e, 0x1a946a4, - 0x1ab86a5, - 0x1abc6ae, - 0x21ac46af, - 0x1b0c6b1, - 0x1b106c3, - 0x1b306c4, - 0x1b446cc, - 0x1b486d1, - 0x1b786d2, - 0x1b946de, - 0x1bbc6e5, - 0x1bc86ef, - 0x1bcc6f2, - 0x1c606f3, - 0x1c74718, - 0x1c8871d, - 0x1cb8722, - 0x1cc872e, - 0x1cdc732, - 0x1d00737, - 0x1e18740, - 0x1e1c786, - 0x1e88787, - 0x1e9c7a2, - 0x1eb07a7, - 0x1eb87ac, - 0x1ec87ae, - 0x1ecc7b2, - 0x1ee47b3, - 0x1f2c7b9, - 0x1f447cb, - 0x1f487d1, - 0x1f4c7d2, - 0x1f547d3, - 0x1f907d5, - 0x61f947e4, - 0x1fa87e5, - 0x1fac7ea, - 0x1fb07eb, - 0x1fc07ec, - 0x20707f0, - 0x207481c, - 0x2207c81d, - 0x2208081f, + 0x1a986a5, + 0x1ac06a6, + 0x1ac46b0, + 0x21acc6b1, + 0x1b146b3, + 0x1b186c5, + 0x1b386c6, + 0x1b4c6ce, + 0x1b506d3, + 0x1b806d4, + 0x1b9c6e0, + 0x1bc46e7, + 0x1bd06f1, + 0x1bd46f4, + 0x1c686f5, + 0x1c7c71a, + 0x1c9071f, + 0x1cc0724, + 0x1cd0730, + 0x1ce4734, + 0x1d08739, + 0x1e20742, + 0x1e24788, + 0x1e90789, + 0x1ea47a4, + 0x1eb87a9, + 0x1ec07ae, + 0x1ed07b0, + 0x1ed47b4, + 0x1eec7b5, + 0x1f347bb, + 0x1f4c7cd, + 0x1f507d3, + 0x1f547d4, + 0x1f5c7d5, + 0x1f987d7, + 0x61f9c7e6, + 0x1fb07e7, + 0x1fbc7ec, + 0x1fc07ef, + 0x1fd07f0, + 0x20807f4, 0x2084820, - 0x20b8821, - 0x20bc82e, - 0x24f482f, - 0x2254493d, - 0x22548951, - 0x2570952, - 0x257895c, - 0x2257c95e, - 0x258495f, - 0x22594961, - 0x22598965, - 0x25a4966, - 0x225a8969, - 0x25ac96a, + 0x22090821, + 0x22098824, + 0x20cc826, + 0x20d0833, + 0x2514834, + 0x225ac945, 0x225b096b, - 0x25cc96c, - 0x25e4973, - 0x25e8979, - 0x25f897a, - 0x260097e, - 0x22634980, - 0x263898d, - 0x264898e, - 0x267c992, + 0x225b496c, + 0x225c096d, + 0x225c4970, + 0x225d0971, + 0x225d4974, + 0x225d8975, + 0x225dc976, + 0x225e0977, + 0x225e4978, + 0x225f0979, + 0x225f497c, + 0x2260097d, + 0x22604980, + 0x22608981, + 0x2260c982, + 0x22610983, + 0x22614984, + 0x2618985, + 0x2261c986, + 0x22628987, + 0x2262c98a, + 0x263498b, + 0x2264498d, + 0x22648991, + 0x2654992, + 0x22658995, + 0x265c996, + 0x22660997, + 0x267c998, 0x269499f, - 0x26a89a5, - 0x26d09aa, - 0x26f09b4, - 0x27209bc, - 0x27489c8, - 0x274c9d2, - 0x27709d3, - 0x27749dc, - 0x27889dd, - 0x278c9e2, - 0x27909e3, - 0x27b09e4, - 0x27c09ec, - 0x27d09f0, - 0x27d49f4, - 0x28489f5, - 0x2864a12, - 0x2870a19, - 0x2884a1c, - 0x289ca21, - 0x28b0a27, - 0x28c8a2c, - 0x28e0a32, - 0x28f8a38, - 0x2914a3e, - 0x292ca45, - 0x298ca4b, - 0x29a4a63, - 0x29a8a69, - 0x29bca6a, - 0x2a00a6f, - 0x2a80a80, - 0x2aacaa0, - 0x2ab0aab, - 0x2ab8aac, - 0x2ad8aae, - 0x2adcab6, - 0x2afcab7, - 0x2b04abf, - 0x2b3cac1, - 0x2b78acf, - 0x2b7cade, - 0x2bbcadf, - 0x2bd4aef, - 0x2bf8af5, - 0x2c18afe, - 0x31dcb06, - 0x31e8c77, - 0x3208c7a, - 0x33c4c82, - 0x3494cf1, - 0x3504d25, - 0x355cd41, - 0x3644d57, - 0x369cd91, - 0x36d8da7, - 0x37d4db6, - 0x38a0df5, - 0x3938e28, - 0x39c8e4e, - 0x3a2ce72, - 0x3c64e8b, - 0x3d1cf19, - 0x3de8f47, - 0x3e34f7a, - 0x3ebcf8d, - 0x3ef8faf, - 0x3f48fbe, - 0x3fc0fd2, - 0x63fc4ff0, - 0x63fc8ff1, - 0x63fccff2, - 0x4048ff3, - 0x40ad012, - 0x412902b, - 0x41a104a, - 0x4221068, - 0x428d088, - 0x43b90a3, - 0x44110ee, - 0x64415104, - 0x44ad105, - 0x453512b, - 0x458114d, - 0x45e9160, - 0x469117a, - 0x47591a4, - 0x47c11d6, - 0x48d51f0, - 0x648d9235, - 0x648dd236, - 0x4939237, - 0x499524e, - 0x4a25265, - 0x4aa1289, - 0x4ae52a8, - 0x4bc92b9, - 0x4bfd2f2, - 0x4c5d2ff, - 0x4cd1317, - 0x4d59334, - 0x4d99356, - 0x4e09366, - 0x64e0d382, - 0x64e11383, - 0x24e15384, - 0x4e2d385, - 0x4e4938b, - 0x4e8d392, - 0x4e9d3a3, - 0x4eb53a7, - 0x4f2d3ad, - 0x4f353cb, - 0x4f493cd, - 0x4f613d2, - 0x4f893d8, - 0x4f8d3e2, - 0x4f953e3, - 0x4fa93e5, - 0x4fc53ea, - 0x4fc93f1, - 0x4fd13f2, - 0x500d3f4, - 0x5021403, - 0x5029408, - 0x503140a, - 0x503540c, - 0x505940d, - 0x507d416, - 0x509541f, - 0x5099425, - 0x50a1426, - 0x50a5428, - 0x50f9429, - 0x511d43e, - 0x513d447, - 0x515944f, - 0x5169456, - 0x517d45a, - 0x518145f, - 0x5189460, - 0x519d462, - 0x51ad467, - 0x51b146b, - 0x51cd46c, - 0x5a5d473, - 0x5a95697, - 0x5ac16a5, - 0x5ad96b0, - 0x5af96b6, - 0x5b196be, - 0x5b5d6c6, - 0x5b656d7, - 0x25b696d9, - 0x25b6d6da, - 0x5b716db, - 0x5c956dc, - 0x25c99725, - 0x25ca1726, - 0x25ca9728, - 0x25cb572a, - 0x5cb972d, - 0x5ce172e, - 0x5d09738, - 0x5d0d742, - 0x25d45743, - 0x5d59751, - 0x68b1756, - 0x68b5a2c, - 0x68b9a2d, - 0x268bda2e, - 0x68c1a2f, - 0x268c5a30, - 0x68c9a31, - 0x268d5a32, - 0x68d9a35, - 0x68dda36, - 0x268e1a37, - 0x68e5a38, - 0x268eda39, - 0x68f1a3b, - 0x68f5a3c, - 0x26905a3d, - 0x6909a41, - 0x690da42, - 0x6911a43, - 0x6915a44, - 0x26919a45, - 0x691da46, - 0x6921a47, - 0x6925a48, - 0x6929a49, - 0x26931a4a, - 0x6935a4c, - 0x6939a4d, - 0x693da4e, - 0x26941a4f, - 0x6945a50, - 0x2694da51, - 0x26951a53, - 0x696da54, - 0x6979a5b, - 0x69b9a5e, - 0x69bda6e, - 0x69e1a6f, - 0x6b31a78, - 0x26b39acc, - 0x26b3dace, - 0x26b41acf, - 0x6b49ad0, - 0x6c25ad2, - 0x6c29b09, - 0x6c55b0a, - 0x6c75b15, - 0x6c81b1d, - 0x6ca1b20, - 0x6cd9b28, - 0x6f71b36, - 0x702dbdc, - 0x7041c0b, - 0x7075c10, - 0x70a5c1d, - 0x70c1c29, - 0x70e5c30, - 0x7101c39, - 0x711dc40, - 0x7141c47, - 0x7151c50, - 0x7185c54, - 0x71a1c61, - 0x73adc68, - 0x73d1ceb, - 0x73f1cf4, - 0x7405cfc, - 0x7419d01, - 0x7439d06, - 0x74ddd0e, - 0x74f9d37, - 0x7515d3e, + 0x26989a5, + 0x26a89a6, + 0x26b09aa, + 0x26e49ac, + 0x26e89b9, + 0x26f89ba, + 0x27909be, + 0x227949e4, + 0x279c9e5, + 0x27a09e7, + 0x27b89e8, + 0x27cc9ee, + 0x27f49f3, + 0x28149fd, + 0x2844a05, + 0x286ca11, + 0x2870a1b, + 0x2894a1c, + 0x2898a25, + 0x28aca26, + 0x28b0a2b, + 0x28b4a2c, + 0x28d4a2d, + 0x28eca35, + 0x28f0a3b, + 0x228f4a3c, + 0x28f8a3d, + 0x2908a3e, + 0x290ca42, + 0x2984a43, + 0x29a0a61, + 0x29aca68, + 0x29c0a6b, + 0x29d8a70, + 0x29eca76, + 0x2a04a7b, + 0x2a1ca81, + 0x2a34a87, + 0x2a50a8d, + 0x2a68a94, + 0x2ac8a9a, + 0x2ae0ab2, + 0x2ae4ab8, + 0x2af8ab9, + 0x2b3cabe, + 0x2bbcacf, + 0x2be8aef, + 0x2becafa, + 0x2bf4afb, + 0x2c14afd, + 0x2c18b05, + 0x2c38b06, + 0x2c40b0e, + 0x2c78b10, + 0x2cb8b1e, + 0x2cbcb2e, + 0x2d0cb2f, + 0x2d10b43, + 0x22d14b44, + 0x2d2cb45, + 0x2d50b4b, + 0x2d70b54, + 0x3334b5c, + 0x3340ccd, + 0x3360cd0, + 0x351ccd8, + 0x35ecd47, + 0x365cd7b, + 0x36b4d97, + 0x379cdad, + 0x37f4de7, + 0x3830dfd, + 0x392ce0c, + 0x39f8e4b, + 0x3a90e7e, + 0x3b20ea4, + 0x3b84ec8, + 0x3dbcee1, + 0x3e74f6f, + 0x3f40f9d, + 0x3f8cfd0, + 0x4014fe3, + 0x4051005, + 0x40a1014, + 0x4119028, + 0x6411d046, + 0x64121047, + 0x64125048, + 0x41a1049, + 0x41fd068, + 0x427907f, + 0x42f109e, + 0x43710bc, + 0x43dd0dc, + 0x45090f7, + 0x4561142, + 0x64565158, + 0x45fd159, + 0x468517f, + 0x46d11a1, + 0x47391b4, + 0x47e11ce, + 0x48a91f8, + 0x491122a, + 0x4a25244, + 0x64a29289, + 0x64a2d28a, + 0x4a8928b, + 0x4ae52a2, + 0x4b752b9, + 0x4bf12dd, + 0x4c352fc, + 0x4d1930d, + 0x4d4d346, + 0x4dad353, + 0x4e2136b, + 0x4ea9388, + 0x4ee93aa, + 0x4f593ba, + 0x64f5d3d6, + 0x64f613d7, + 0x24f653d8, + 0x4f7d3d9, + 0x4f993df, + 0x4fdd3e6, + 0x4fed3f7, + 0x50053fb, + 0x507d401, + 0x508541f, + 0x5099421, + 0x50b1426, + 0x50d942c, + 0x50dd436, + 0x50e5437, + 0x50f9439, + 0x511543e, + 0x5119445, + 0x5121446, + 0x515d448, + 0x5171457, + 0x517945c, + 0x518145e, + 0x5185460, + 0x51a9461, + 0x51cd46a, + 0x51e5473, + 0x51e9479, + 0x51f147a, + 0x51f547c, + 0x524d47d, + 0x5271493, + 0x529149c, + 0x52ad4a4, + 0x52bd4ab, + 0x52d14af, + 0x52d54b4, + 0x52dd4b5, + 0x52f14b7, + 0x53014bc, + 0x53054c0, + 0x53214c1, + 0x5bb14c8, + 0x5be96ec, + 0x5c156fa, + 0x5c2d705, + 0x5c4d70b, + 0x5c6d713, + 0x5cb171b, + 0x5cb972c, + 0x25cbd72e, + 0x25cc172f, + 0x5cc5730, + 0x5e01731, + 0x25e05780, + 0x25e11781, + 0x25e19784, + 0x25e25786, + 0x5e29789, + 0x5e2d78a, + 0x5e5578b, + 0x5e7d795, + 0x5e8179f, + 0x5eb97a0, + 0x5ecd7ae, + 0x6a257b3, + 0x6a29a89, + 0x6a2da8a, + 0x26a31a8b, + 0x6a35a8c, + 0x26a39a8d, + 0x6a3da8e, + 0x26a49a8f, + 0x6a4da92, + 0x6a51a93, + 0x26a55a94, + 0x6a59a95, + 0x26a61a96, + 0x6a65a98, + 0x6a69a99, + 0x26a79a9a, + 0x6a7da9e, + 0x6a81a9f, + 0x6a85aa0, + 0x6a89aa1, + 0x26a8daa2, + 0x6a91aa3, + 0x6a95aa4, + 0x6a99aa5, + 0x6a9daa6, + 0x26aa5aa7, + 0x6aa9aa9, + 0x6aadaaa, + 0x6ab1aab, + 0x26ab5aac, + 0x6ab9aad, + 0x26ac1aae, + 0x26ac5ab0, + 0x6ae1ab1, + 0x6aedab8, + 0x6b2dabb, + 0x6b31acb, + 0x6b55acc, + 0x6b59ad5, + 0x6cc1ad6, + 0x26cc5b30, + 0x26ccdb31, + 0x26cd1b33, + 0x26cd5b34, + 0x6cddb35, + 0x6db9b37, + 0x6dbdb6e, + 0x6de9b6f, + 0x6dedb7a, + 0x6e0db7b, + 0x6e19b83, + 0x6e39b86, + 0x6e71b8e, + 0x7109b9c, + 0x71c5c42, + 0x71d9c71, + 0x720dc76, + 0x723dc83, + 0x7259c8f, + 0x727dc96, + 0x7299c9f, + 0x72b5ca6, + 0x72d9cad, + 0x72e9cb6, + 0x72edcba, + 0x7321cbb, + 0x733dcc8, + 0x7359ccf, + 0x737dcd6, + 0x739dcdf, + 0x73b1ce7, + 0x73c5cec, + 0x73c9cf1, + 0x73e9cf2, + 0x748dcfa, + 0x74a9d23, + 0x74c9d2a, + 0x74cdd32, + 0x74d1d33, + 0x74d5d34, + 0x74e9d35, + 0x7509d3a, + 0x7515d42, 0x7519d45, - 0x751dd46, - 0x7521d47, - 0x7535d48, - 0x7555d4d, - 0x7561d55, - 0x7565d58, - 0x7595d59, - 0x7615d65, - 0x7629d85, - 0x762dd8a, - 0x7645d8b, - 0x7649d91, - 0x7655d92, - 0x7659d95, - 0x7675d96, - 0x76b1d9d, - 0x76b5dac, - 0x76d5dad, - 0x7725db5, - 0x773ddc9, - 0x7791dcf, - 0x7795de4, - 0x7799de5, - 0x77ddde6, - 0x77eddf7, - 0x7825dfb, - 0x7855e09, - 0x7991e15, - 0x79b5e64, - 0x79e1e6d, - 0x79ede78, - 0x79f1e7b, - 0x7b01e7c, - 0x7b0dec0, - 0x7b19ec3, - 0x7b25ec6, - 0x7b31ec9, - 0x7b3decc, - 0x7b49ecf, - 0x7b55ed2, - 0x7b61ed5, - 0x7b6ded8, + 0x7549d46, + 0x75c9d52, + 0x75ddd72, + 0x75e1d77, + 0x75f9d78, + 0x75fdd7e, + 0x7609d7f, + 0x760dd82, + 0x7629d83, + 0x7665d8a, + 0x7669d99, + 0x7689d9a, + 0x76d9da2, + 0x76f1db6, + 0x7745dbc, + 0x7749dd1, + 0x774ddd2, + 0x7751dd3, + 0x7795dd4, + 0x77a5de5, + 0x77ddde9, + 0x780ddf7, + 0x7955e03, + 0x7979e55, + 0x79a5e5e, + 0x79b1e69, + 0x79b9e6c, + 0x7ac9e6e, + 0x7ad5eb2, + 0x7ae1eb5, + 0x7aedeb8, + 0x7af9ebb, + 0x7b05ebe, + 0x7b11ec1, + 0x7b1dec4, + 0x7b29ec7, + 0x7b35eca, + 0x7b41ecd, + 0x7b4ded0, + 0x7b59ed3, + 0x7b65ed6, + 0x7b6ded9, 0x7b79edb, 0x7b85ede, 0x7b91ee1, 0x7b9dee4, - 0x7ba5ee7, - 0x7bb1ee9, - 0x7bbdeec, - 0x7bc9eef, - 0x7bd5ef2, - 0x7be1ef5, - 0x7bedef8, - 0x7bf9efb, - 0x7c05efe, - 0x7c11f01, - 0x7c1df04, - 0x7c29f07, - 0x7c35f0a, - 0x7c41f0d, + 0x7ba9ee7, + 0x7bb5eea, + 0x7bc1eed, + 0x7bcdef0, + 0x7bd9ef3, + 0x7be5ef6, + 0x7bf1ef9, + 0x7bfdefc, + 0x7c09eff, + 0x7c15f02, + 0x7c21f05, + 0x7c2df08, + 0x7c39f0b, + 0x7c41f0e, 0x7c4df10, 0x7c59f13, 0x7c65f16, 0x7c71f19, - 0x7c79f1c, - 0x7c85f1e, - 0x7c91f21, - 0x7c9df24, - 0x7ca9f27, - 0x7cb5f2a, - 0x7cc1f2d, - 0x7ccdf30, - 0x7cd9f33, - 0x7ce5f36, + 0x7c7df1c, + 0x7c89f1f, + 0x7c95f22, + 0x7ca1f25, + 0x7cadf28, + 0x7cb9f2b, + 0x7cc5f2e, + 0x7cd1f31, + 0x7cddf34, + 0x7ce5f37, 0x7cf1f39, 0x7cfdf3c, 0x7d09f3f, 0x7d15f42, - 0x7d1df45, - 0x7d29f47, - 0x7d35f4a, - 0x7d41f4d, - 0x7d4df50, - 0x7d59f53, - 0x7d65f56, - 0x7d71f59, - 0x7d7df5c, - 0x7d81f5f, - 0x7d8df60, - 0x7da5f63, - 0x7da9f69, - 0x7db9f6a, - 0x7dd1f6e, - 0x7e15f74, - 0x7e29f85, - 0x7e5df8a, - 0x7e6df97, - 0x7e89f9b, - 0x7ea1fa2, - 0x7ea5fa8, - 0x27ee9fa9, - 0x7eedfba, - 0x7f19fbb, - 0x7f1dfc6, + 0x7d21f45, + 0x7d2df48, + 0x7d39f4b, + 0x7d45f4e, + 0x7d49f51, + 0x7d55f52, + 0x7d6df55, + 0x7d71f5b, + 0x7d81f5c, + 0x7d99f60, + 0x7dddf66, + 0x7df1f77, + 0x7e25f7c, + 0x7e35f89, + 0x7e51f8d, + 0x7e69f94, + 0x7e6df9a, + 0x27eb1f9b, + 0x7eb5fac, + 0x7ee1fad, + 0x7ee5fb8, } -// max children 434 (capacity 511) -// max text offset 27930 (capacity 32767) +// max children 466 (capacity 511) +// max text offset 28023 (capacity 32767) // max text length 36 (capacity 63) -// max hi 8135 (capacity 16383) -// max lo 8134 (capacity 16383) +// max hi 8121 (capacity 16383) +// max lo 8120 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go index 5433f3b17..f60c80e79 100644 --- a/vendor/golang.org/x/net/publicsuffix/table_test.go +++ b/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -541,6 +541,7 @@ var rules = [...]string{ "org.cw", "cx", "gov.cx", + "cy", "ac.cy", "biz.cy", "com.cy", @@ -2207,9 +2208,7 @@ var rules = [...]string{ "aso.kumamoto.jp", "choyo.kumamoto.jp", "gyokuto.kumamoto.jp", - "hitoyoshi.kumamoto.jp", "kamiamakusa.kumamoto.jp", - "kashima.kumamoto.jp", "kikuchi.kumamoto.jp", "kumamoto.kumamoto.jp", "mashiki.kumamoto.jp", @@ -3968,20 +3967,21 @@ var rules = [...]string{ "net.ng", "org.ng", "sch.ng", + "ni", + "ac.ni", + "biz.ni", + "co.ni", "com.ni", - "gob.ni", "edu.ni", - "org.ni", - "nom.ni", - "net.ni", - "mil.ni", - "co.ni", - "biz.ni", - "web.ni", - "int.ni", - "ac.ni", + "gob.ni", "in.ni", "info.ni", + "int.ni", + "mil.ni", + "net.ni", + "nom.ni", + "org.ni", + "web.ni", "nl", "bv.nl", "no", @@ -4775,6 +4775,7 @@ var rules = [...]string{ "net.om", "org.om", "pro.om", + "onion", "org", "pa", "ac.pa", @@ -5126,133 +5127,9 @@ var rules = [...]string{ "org.rs", "ru", "ac.ru", - "com.ru", "edu.ru", - "int.ru", - "net.ru", - "org.ru", - "pp.ru", - "adygeya.ru", - "altai.ru", - "amur.ru", - "arkhangelsk.ru", - "astrakhan.ru", - "bashkiria.ru", - "belgorod.ru", - "bir.ru", - "bryansk.ru", - "buryatia.ru", - "cbg.ru", - "chel.ru", - "chelyabinsk.ru", - "chita.ru", - "chukotka.ru", - "chuvashia.ru", - "dagestan.ru", - "dudinka.ru", - "e-burg.ru", - "grozny.ru", - "irkutsk.ru", - "ivanovo.ru", - "izhevsk.ru", - "jar.ru", - "joshkar-ola.ru", - "kalmykia.ru", - "kaluga.ru", - "kamchatka.ru", - "karelia.ru", - "kazan.ru", - "kchr.ru", - "kemerovo.ru", - "khabarovsk.ru", - "khakassia.ru", - "khv.ru", - "kirov.ru", - "koenig.ru", - "komi.ru", - "kostroma.ru", - "krasnoyarsk.ru", - "kuban.ru", - "kurgan.ru", - "kursk.ru", - "lipetsk.ru", - "magadan.ru", - "mari.ru", - "mari-el.ru", - "marine.ru", - "mordovia.ru", - "msk.ru", - "murmansk.ru", - "nalchik.ru", - "nnov.ru", - "nov.ru", - "novosibirsk.ru", - "nsk.ru", - "omsk.ru", - "orenburg.ru", - "oryol.ru", - "palana.ru", - "penza.ru", - "perm.ru", - "ptz.ru", - "rnd.ru", - "ryazan.ru", - "sakhalin.ru", - "samara.ru", - "saratov.ru", - "simbirsk.ru", - "smolensk.ru", - "spb.ru", - "stavropol.ru", - "stv.ru", - "surgut.ru", - "tambov.ru", - "tatarstan.ru", - "tom.ru", - "tomsk.ru", - "tsaritsyn.ru", - "tsk.ru", - "tula.ru", - "tuva.ru", - "tver.ru", - "tyumen.ru", - "udm.ru", - "udmurtia.ru", - "ulan-ude.ru", - "vladikavkaz.ru", - "vladimir.ru", - "vladivostok.ru", - "volgograd.ru", - "vologda.ru", - "voronezh.ru", - "vrn.ru", - "vyatka.ru", - "yakutia.ru", - "yamal.ru", - "yaroslavl.ru", - "yekaterinburg.ru", - "yuzhno-sakhalinsk.ru", - "amursk.ru", - "baikal.ru", - "cmw.ru", - "fareast.ru", - "jamal.ru", - "kms.ru", - "k-uralsk.ru", - "kustanai.ru", - "kuzbass.ru", - "mytis.ru", - "nakhodka.ru", - "nkz.ru", - "norilsk.ru", - "oskol.ru", - "pyatigorsk.ru", - "rubtsovsk.ru", - "snz.ru", - "syzran.ru", - "vdonsk.ru", - "zgrad.ru", "gov.ru", + "int.ru", "mil.ru", "test.ru", "rw", @@ -6379,7 +6256,6 @@ var rules = [...]string{ "education", "email", "emerck", - "emerson", "energy", "engineer", "engineering", @@ -6542,6 +6418,7 @@ var rules = [...]string{ "honda", "honeywell", "horse", + "hospital", "host", "hosting", "hot", @@ -7293,43 +7170,64 @@ var rules = [...]string{ "*.alces.network", "*.alwaysdata.net", "cloudfront.net", - "compute.amazonaws.com", - "ap-northeast-1.compute.amazonaws.com", - "ap-northeast-2.compute.amazonaws.com", - "ap-southeast-1.compute.amazonaws.com", - "ap-southeast-2.compute.amazonaws.com", - "eu-central-1.compute.amazonaws.com", - "eu-west-1.compute.amazonaws.com", - "sa-east-1.compute.amazonaws.com", - "us-gov-west-1.compute.amazonaws.com", - "us-west-1.compute.amazonaws.com", - "us-west-2.compute.amazonaws.com", - "compute-1.amazonaws.com", - "z-1.compute-1.amazonaws.com", - "z-2.compute-1.amazonaws.com", + "*.compute.amazonaws.com", + "*.compute-1.amazonaws.com", + "*.compute.amazonaws.com.cn", "us-east-1.amazonaws.com", - "compute.amazonaws.com.cn", - "cn-north-1.compute.amazonaws.com.cn", - "elasticbeanstalk.com", - "elb.amazonaws.com", - "s3.amazonaws.com", + "elasticbeanstalk.cn-north-1.amazonaws.com.cn", + "*.elasticbeanstalk.com", + "*.elb.amazonaws.com", + "*.elb.amazonaws.com.cn", + "*.s3.amazonaws.com", "s3-ap-northeast-1.amazonaws.com", "s3-ap-northeast-2.amazonaws.com", + "s3-ap-south-1.amazonaws.com", "s3-ap-southeast-1.amazonaws.com", "s3-ap-southeast-2.amazonaws.com", + "s3-ca-central-1.amazonaws.com", "s3-eu-central-1.amazonaws.com", "s3-eu-west-1.amazonaws.com", "s3-external-1.amazonaws.com", - "s3-external-2.amazonaws.com", "s3-fips-us-gov-west-1.amazonaws.com", "s3-sa-east-1.amazonaws.com", "s3-us-gov-west-1.amazonaws.com", + "s3-us-east-2.amazonaws.com", "s3-us-west-1.amazonaws.com", "s3-us-west-2.amazonaws.com", "s3.ap-northeast-2.amazonaws.com", + "s3.ap-south-1.amazonaws.com", "s3.cn-north-1.amazonaws.com.cn", + "s3.ca-central-1.amazonaws.com", "s3.eu-central-1.amazonaws.com", + "s3.us-east-2.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + "s3-website-us-east-1.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ca-central-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "t3l3p0rt.net", + "tele.amune.org", "on-aptible.com", + "user.party.eus", "pimienta.org", "poivron.org", "potager.org", @@ -7402,6 +7300,15 @@ var rules = [...]string{ "co.nl", "co.no", "*.platform.sh", + "dyn.cosidns.de", + "dynamisches-dns.de", + "dnsupdater.de", + "internet-dns.de", + "l-o-g-i-n.de", + "dynamic-dns.info", + "feste-ip.net", + "knx-server.net", + "static-access.net", "realm.cz", "*.cryptonomic.net", "cupcake.is", @@ -7701,8 +7608,19 @@ var rules = [...]string{ "webhop.org", "worse-than.tv", "writesthisblog.com", + "ddnss.de", + "dyn.ddnss.de", + "dyndns.ddnss.de", + "dyndns1.de", + "dyn-ip24.de", + "home-webserver.de", + "dyn.home-webserver.de", + "myhome-server.de", + "ddnss.org", "dynv6.net", "e4.cz", + "enonic.io", + "customer.enonic.io", "eu.org", "al.eu.org", "asso.eu.org", @@ -7764,11 +7682,14 @@ var rules = [...]string{ "us-1.evennode.com", "us-2.evennode.com", "apps.fbsbx.com", + "map.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", "a.ssl.fastly.net", "b.ssl.fastly.net", "global.ssl.fastly.net", - "a.prod.fastly.net", - "global.prod.fastly.net", + "fastlylb.net", + "map.fastlylb.net", "fhapp.xyz", "firebaseapp.com", "flynnhub.com", @@ -7778,9 +7699,12 @@ var rules = [...]string{ "fbxos.fr", "freebox-os.fr", "freeboxos.fr", + "myfusion.cloud", + "futurehosting.at", "futuremailing.at", "*.ex.ortsinfo.at", "*.kunden.ortsinfo.at", + "*.statics.cloud", "service.gov.uk", "github.io", "githubusercontent.com", @@ -7790,7 +7714,7 @@ var rules = [...]string{ "gist.githubcloud.com", "*.githubcloudusercontent.com", "gitlab.io", - "ro.com", + "homeoffice.gov.uk", "ro.im", "shop.ro", "goip.de", @@ -8036,6 +7960,7 @@ var rules = [...]string{ "pantheonsite.io", "gotpantheon.com", "mypep.link", + "on-web.fr", "xen.prgmr.com", "priv.at", "protonet.io", @@ -8054,19 +7979,34 @@ var rules = [...]string{ "sandcats.io", "logoip.de", "logoip.com", + "firewall-gateway.com", + "firewall-gateway.de", + "my-gateway.de", + "my-router.de", + "spdns.de", + "spdns.eu", + "firewall-gateway.net", + "my-firewall.org", + "myfirewall.org", + "spdns.org", "biz.ua", "co.ua", "pp.ua", + "shiftedit.io", "myshopblocks.com", + "1kapp.com", + "appchizi.com", + "applinzi.com", "sinaapp.com", "vipsinaapp.com", - "1kapp.com", "bounty-full.com", "alpha.bounty-full.com", "beta.bounty-full.com", "static.land", "dev.static.land", "sites.static.land", + "apps.lair.io", + "*.stolos.io", "spacekit.io", "stackspace.space", "diskstation.me", @@ -8094,16 +8034,41 @@ var rules = [...]string{ "*.transurl.eu", "*.transurl.nl", "tuxfamily.org", + "dd-dns.de", + "diskstation.eu", + "diskstation.org", + "dray-dns.de", + "draydns.de", + "dyn-vpn.de", + "dynvpn.de", + "mein-vigor.de", + "my-vigor.de", + "my-wan.de", + "syno-ds.de", + "synology-diskstation.de", + "synology-ds.de", "hk.com", "hk.org", "ltd.hk", "inc.hk", "lib.de.us", "router.management", + "remotewd.com", "wmflabs.org", "yolasite.com", + "ybo.faith", + "yombo.me", + "homelink.one", + "ybo.party", + "ybo.review", + "ybo.science", + "ybo.trade", "za.net", "za.org", + "now.sh", + "cc.ua", + "inf.ua", + "ltd.ua", } var nodeLabels = [...]string{ @@ -8497,7 +8462,6 @@ var nodeLabels = [...]string{ "eg", "email", "emerck", - "emerson", "energy", "engineer", "engineering", @@ -8693,6 +8657,7 @@ var nodeLabels = [...]string{ "honda", "honeywell", "horse", + "hospital", "host", "hosting", "hot", @@ -9034,6 +8999,7 @@ var nodeLabels = [...]string{ "omega", "one", "ong", + "onion", "onl", "online", "onyourside", @@ -9811,6 +9777,7 @@ var nodeLabels = [...]string{ "ac", "biz", "co", + "futurehosting", "futuremailing", "gv", "info", @@ -10134,6 +10101,8 @@ var nodeLabels = [...]string{ "gov", "mil", "magentosite", + "myfusion", + "statics", "cloudns", "co", "com", @@ -10186,8 +10155,9 @@ var nodeLabels = [...]string{ "amazonaws", "cn-north-1", "compute", + "elb", + "elasticbeanstalk", "s3", - "cn-north-1", "arts", "com", "edu", @@ -10209,6 +10179,8 @@ var nodeLabels = [...]string{ "africa", "alpha-myqnapcloud", "amazonaws", + "appchizi", + "applinzi", "appspot", "ar", "betainabox", @@ -10265,6 +10237,7 @@ var nodeLabels = [...]string{ "familyds", "fbsbx", "firebaseapp", + "firewall-gateway", "flynnhub", "freebox-os", "freeboxos", @@ -10429,8 +10402,8 @@ var nodeLabels = [...]string{ "qc", "quicksytes", "rackmaze", + "remotewd", "rhcloud", - "ro", "ru", "sa", "saves-the-whales", @@ -10472,39 +10445,75 @@ var nodeLabels = [...]string{ "xenapponazure", "yolasite", "za", + "ap-northeast-1", "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", "compute", "compute-1", "elb", "eu-central-1", + "eu-west-1", "s3", "s3-ap-northeast-1", "s3-ap-northeast-2", + "s3-ap-south-1", "s3-ap-southeast-1", "s3-ap-southeast-2", + "s3-ca-central-1", "s3-eu-central-1", "s3-eu-west-1", "s3-external-1", - "s3-external-2", "s3-fips-us-gov-west-1", "s3-sa-east-1", + "s3-us-east-2", "s3-us-gov-west-1", "s3-us-west-1", "s3-us-west-2", + "s3-website-ap-northeast-1", + "s3-website-ap-southeast-1", + "s3-website-ap-southeast-2", + "s3-website-eu-west-1", + "s3-website-sa-east-1", + "s3-website-us-east-1", + "s3-website-us-west-1", + "s3-website-us-west-2", + "sa-east-1", "us-east-1", + "us-east-2", + "dualstack", "s3", - "ap-northeast-1", - "ap-northeast-2", - "ap-southeast-1", - "ap-southeast-2", - "eu-central-1", - "eu-west-1", - "sa-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", - "z-1", - "z-2", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", "s3", "alpha", "beta", @@ -10559,17 +10568,46 @@ var nodeLabels = [...]string{ "realm", "blogspot", "com", + "cosidns", + "dd-dns", + "ddnss", "dnshome", + "dnsupdater", + "dray-dns", + "draydns", + "dyn-ip24", + "dyn-vpn", + "dynamisches-dns", + "dyndns1", + "dynvpn", + "firewall-gateway", "fuettertdasnetz", "goip", + "home-webserver", + "internet-dns", "isteingeek", "istmein", "keymachine", + "l-o-g-i-n", "lebtimnetz", "leitungsen", "logoip", + "mein-vigor", + "my-gateway", + "my-router", + "my-vigor", + "my-wan", + "myhome-server", + "spdns", + "syno-ds", + "synology-diskstation", + "synology-ds", "taifun-dns", "traeumtgerade", + "dyn", + "dyn", + "dyndns", + "dyn", "biz", "blogspot", "co", @@ -10648,9 +10686,14 @@ var nodeLabels = [...]string{ "net", "org", "cloudns", + "diskstation", "mycd", + "spdns", "transurl", "wellbeingzone", + "party", + "user", + "ybo", "aland", "blogspot", "dy", @@ -10679,6 +10722,7 @@ var nodeLabels = [...]string{ "medecin", "nom", "notaires", + "on-web", "pharmacien", "port", "prd", @@ -10878,6 +10922,7 @@ var nodeLabels = [...]string{ "barrell-of-knowledge", "cloudns", "dvrcam", + "dynamic-dns", "dyndns", "for-our", "groks-the", @@ -10896,16 +10941,22 @@ var nodeLabels = [...]string{ "com", "dedyn", "drud", + "enonic", "github", "gitlab", "hasura-app", "hzc", + "lair", "ngrok", "nid", "pantheonsite", "protonet", "sandcats", + "shiftedit", "spacekit", + "stolos", + "customer", + "apps", "com", "edu", "gov", @@ -12226,9 +12277,7 @@ var nodeLabels = [...]string{ "aso", "choyo", "gyokuto", - "hitoyoshi", "kamiamakusa", - "kashima", "kikuchi", "kumamoto", "mashiki", @@ -13289,6 +13338,7 @@ var nodeLabels = [...]string{ "priv", "synology", "webhop", + "yombo", "co", "com", "edu", @@ -13986,6 +14036,9 @@ var nodeLabels = [...]string{ "endofinternet", "familyds", "fastly", + "fastlylb", + "feste-ip", + "firewall-gateway", "from-az", "from-co", "from-la", @@ -14005,6 +14058,7 @@ var nodeLabels = [...]string{ "isa-geek", "jp", "kicks-ass", + "knx-server", "mydissent", "myeffect", "myfritz", @@ -14027,12 +14081,15 @@ var nodeLabels = [...]string{ "serveblog", "serveftp", "serveminecraft", + "static-access", "sytes", + "t3l3p0rt", "thruhere", "uk", "webhop", "za", "r", + "map", "prod", "ssl", "a", @@ -14040,6 +14097,7 @@ var nodeLabels = [...]string{ "a", "b", "global", + "map", "alces", "arts", "com", @@ -14883,7 +14941,9 @@ var nodeLabels = [...]string{ "net", "org", "pro", + "homelink", "ae", + "amune", "blogdns", "blogsite", "bmoattachments", @@ -14895,6 +14955,8 @@ var nodeLabels = [...]string{ "cloudns", "collegefan", "couchpotatofries", + "ddnss", + "diskstation", "dnsalias", "dnsdojo", "doesntexist", @@ -14942,6 +15004,8 @@ var nodeLabels = [...]string{ "kicks-ass", "misconfused", "mlbfan", + "my-firewall", + "myfirewall", "myftp", "mysecuritycamera", "nflfan", @@ -14957,6 +15021,7 @@ var nodeLabels = [...]string{ "servebbs", "serveftp", "servegame", + "spdns", "stuff-4-sale", "sweetpepper", "tunk", @@ -14967,6 +15032,7 @@ var nodeLabels = [...]string{ "wmflabs", "za", "zapto", + "tele", "c", "rsc", "origin", @@ -15040,6 +15106,7 @@ var nodeLabels = [...]string{ "nom", "org", "sld", + "ybo", "blogspot", "com", "edu", @@ -15359,6 +15426,7 @@ var nodeLabels = [...]string{ "blogspot", "com", "nom", + "ybo", "arts", "blogspot", "com", @@ -15380,136 +15448,12 @@ var nodeLabels = [...]string{ "in", "org", "ac", - "adygeya", - "altai", - "amur", - "amursk", - "arkhangelsk", - "astrakhan", - "baikal", - "bashkiria", - "belgorod", - "bir", "blogspot", - "bryansk", - "buryatia", - "cbg", - "chel", - "chelyabinsk", - "chita", - "chukotka", - "chuvashia", - "cmw", - "com", - "dagestan", - "dudinka", - "e-burg", "edu", - "fareast", "gov", - "grozny", "int", - "irkutsk", - "ivanovo", - "izhevsk", - "jamal", - "jar", - "joshkar-ola", - "k-uralsk", - "kalmykia", - "kaluga", - "kamchatka", - "karelia", - "kazan", - "kchr", - "kemerovo", - "khabarovsk", - "khakassia", - "khv", - "kirov", - "kms", - "koenig", - "komi", - "kostroma", - "krasnoyarsk", - "kuban", - "kurgan", - "kursk", - "kustanai", - "kuzbass", - "lipetsk", - "magadan", - "mari", - "mari-el", - "marine", "mil", - "mordovia", - "msk", - "murmansk", - "mytis", - "nakhodka", - "nalchik", - "net", - "nkz", - "nnov", - "norilsk", - "nov", - "novosibirsk", - "nsk", - "omsk", - "orenburg", - "org", - "oryol", - "oskol", - "palana", - "penza", - "perm", - "pp", - "ptz", - "pyatigorsk", - "rnd", - "rubtsovsk", - "ryazan", - "sakhalin", - "samara", - "saratov", - "simbirsk", - "smolensk", - "snz", - "spb", - "stavropol", - "stv", - "surgut", - "syzran", - "tambov", - "tatarstan", "test", - "tom", - "tomsk", - "tsaritsyn", - "tsk", - "tula", - "tuva", - "tver", - "tyumen", - "udm", - "udmurtia", - "ulan-ude", - "vdonsk", - "vladikavkaz", - "vladimir", - "vladivostok", - "volgograd", - "vologda", - "voronezh", - "vrn", - "vyatka", - "yakutia", - "yamal", - "yaroslavl", - "yekaterinburg", - "yuzhno-sakhalinsk", - "zgrad", "ac", "co", "com", @@ -15537,6 +15481,7 @@ var nodeLabels = [...]string{ "gov", "net", "org", + "ybo", "com", "edu", "gov", @@ -15598,6 +15543,7 @@ var nodeLabels = [...]string{ "hashbang", "mil", "net", + "now", "org", "platform", "blogspot", @@ -15761,6 +15707,7 @@ var nodeLabels = [...]string{ "web", "blogspot", "gov", + "ybo", "aero", "biz", "co", @@ -15809,6 +15756,7 @@ var nodeLabels = [...]string{ "sc", "tv", "biz", + "cc", "cherkassy", "cherkasy", "chernigov", @@ -15832,6 +15780,7 @@ var nodeLabels = [...]string{ "gov", "if", "in", + "inf", "ivano-frankivsk", "kh", "kharkiv", @@ -15849,6 +15798,7 @@ var nodeLabels = [...]string{ "kyiv", "lg", "lt", + "ltd", "lugansk", "lutsk", "lv", @@ -15910,6 +15860,7 @@ var nodeLabels = [...]string{ "blogspot", "no-ip", "wellbeingzone", + "homeoffice", "service", "ak", "al", diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go index a56909c10..e6bfa39e9 100644 --- a/vendor/golang.org/x/net/route/address.go +++ b/vendor/golang.org/x/net/route/address.go @@ -24,6 +24,39 @@ type LinkAddr struct { // Family implements the Family method of Addr interface. func (a *LinkAddr) Family() int { return sysAF_LINK } +func (a *LinkAddr) lenAndSpace() (int, int) { + l := 8 + len(a.Name) + len(a.Addr) + return l, roundup(l) +} + +func (a *LinkAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + nlen, alen := len(a.Name), len(a.Addr) + if nlen > 255 || alen > 255 { + return 0, errInvalidAddr + } + b[0] = byte(l) + b[1] = sysAF_LINK + if a.Index > 0 { + nativeEndian.PutUint16(b[2:4], uint16(a.Index)) + } + data := b[8:] + if nlen > 0 { + b[5] = byte(nlen) + copy(data[:nlen], a.Addr) + data = data[nlen:] + } + if alen > 0 { + b[6] = byte(alen) + copy(data[:alen], a.Name) + data = data[alen:] + } + return ll, nil +} + func parseLinkAddr(b []byte) (Addr, error) { if len(b) < 8 { return nil, errInvalidAddr @@ -90,6 +123,21 @@ type Inet4Addr struct { // Family implements the Family method of Addr interface. func (a *Inet4Addr) Family() int { return sysAF_INET } +func (a *Inet4Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet, roundup(sizeofSockaddrInet) +} + +func (a *Inet4Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET + copy(b[4:8], a.IP[:]) + return ll, nil +} + // An Inet6Addr represents an internet address for IPv6. type Inet6Addr struct { IP [16]byte // IP address @@ -99,18 +147,36 @@ type Inet6Addr struct { // Family implements the Family method of Addr interface. func (a *Inet6Addr) Family() int { return sysAF_INET6 } +func (a *Inet6Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6) +} + +func (a *Inet6Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET6 + copy(b[8:24], a.IP[:]) + if a.ZoneID > 0 { + nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID)) + } + return ll, nil +} + // parseInetAddr parses b as an internet address for IPv4 or IPv6. func parseInetAddr(af int, b []byte) (Addr, error) { switch af { case sysAF_INET: - if len(b) < 16 { + if len(b) < sizeofSockaddrInet { return nil, errInvalidAddr } a := &Inet4Addr{} copy(a.IP[:], b[4:8]) return a, nil case sysAF_INET6: - if len(b) < 28 { + if len(b) < sizeofSockaddrInet6 { return nil, errInvalidAddr } a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} @@ -174,7 +240,7 @@ func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { off6 = 8 // offset of in6_addr ) switch { - case b[0] == 28: // size of sockaddr_in6 + case b[0] == sizeofSockaddrInet6: a := &Inet6Addr{} copy(a.IP[:], b[off6:off6+16]) return int(b[0]), a, nil @@ -186,7 +252,7 @@ func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { copy(a.IP[:], b[l-off6:l]) } return int(b[0]), a, nil - case b[0] == 16: // size of sockaddr_in + case b[0] == sizeofSockaddrInet: a := &Inet4Addr{} copy(a.IP[:], b[off4:off4+4]) return int(b[0]), a, nil @@ -211,6 +277,24 @@ type DefaultAddr struct { // Family implements the Family method of Addr interface. func (a *DefaultAddr) Family() int { return a.af } +func (a *DefaultAddr) lenAndSpace() (int, int) { + l := len(a.Raw) + return l, roundup(l) +} + +func (a *DefaultAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + if l > 255 { + return 0, errInvalidAddr + } + b[1] = byte(l) + copy(b[:l], a.Raw) + return ll, nil +} + func parseDefaultAddr(b []byte) (Addr, error) { if len(b) < 2 || len(b) < int(b[0]) { return nil, errInvalidAddr @@ -219,6 +303,66 @@ func parseDefaultAddr(b []byte) (Addr, error) { return a, nil } +func addrsSpace(as []Addr) int { + var l int + for _, a := range as { + switch a := a.(type) { + case *LinkAddr: + _, ll := a.lenAndSpace() + l += ll + case *Inet4Addr: + _, ll := a.lenAndSpace() + l += ll + case *Inet6Addr: + _, ll := a.lenAndSpace() + l += ll + case *DefaultAddr: + _, ll := a.lenAndSpace() + l += ll + } + } + return l +} + +// marshalAddrs marshals as and returns a bitmap indicating which +// address is stored in b. +func marshalAddrs(b []byte, as []Addr) (uint, error) { + var attrs uint + for i, a := range as { + switch a := a.(type) { + case *LinkAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet4Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet6Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *DefaultAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + } + } + return attrs, nil +} + func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { var as [sysRTAX_MAX]Addr af := int(sysAF_UNSPEC) diff --git a/vendor/golang.org/x/net/route/binary.go b/vendor/golang.org/x/net/route/binary.go index 4c561631b..6910520ec 100644 --- a/vendor/golang.org/x/net/route/binary.go +++ b/vendor/golang.org/x/net/route/binary.go @@ -9,7 +9,7 @@ package route // This file contains duplicates of encoding/binary package. // // This package is supposed to be used by the net package of standard -// library. Therefore a package set used in the package must be the +// library. Therefore the package set used in the package must be the // same as net package. var ( diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go index f452ad14c..e7716442d 100644 --- a/vendor/golang.org/x/net/route/defs_darwin.go +++ b/vendor/golang.org/x/net/route/defs_darwin.go @@ -13,6 +13,8 @@ package route #include #include #include + +#include */ import "C" @@ -23,6 +25,8 @@ const ( sysAF_LINK = C.AF_LINK sysAF_INET6 = C.AF_INET6 + sysSOCK_RAW = C.SOCK_RAW + sysNET_RT_DUMP = C.NET_RT_DUMP sysNET_RT_FLAGS = C.NET_RT_FLAGS sysNET_RT_IFLIST = C.NET_RT_IFLIST @@ -103,4 +107,8 @@ const ( sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 ) diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go index c737751d7..dd31de269 100644 --- a/vendor/golang.org/x/net/route/defs_dragonfly.go +++ b/vendor/golang.org/x/net/route/defs_dragonfly.go @@ -13,6 +13,8 @@ package route #include #include #include + +#include */ import "C" @@ -23,6 +25,8 @@ const ( sysAF_LINK = C.AF_LINK sysAF_INET6 = C.AF_INET6 + sysSOCK_RAW = C.SOCK_RAW + sysNET_RT_DUMP = C.NET_RT_DUMP sysNET_RT_FLAGS = C.NET_RT_FLAGS sysNET_RT_IFLIST = C.NET_RT_IFLIST @@ -102,4 +106,8 @@ const ( sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 ) diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go index 8f834e81d..d95594d8e 100644 --- a/vendor/golang.org/x/net/route/defs_freebsd.go +++ b/vendor/golang.org/x/net/route/defs_freebsd.go @@ -14,6 +14,8 @@ package route #include #include +#include + struct if_data_freebsd7 { u_char ifi_type; u_char ifi_physical; @@ -222,6 +224,8 @@ const ( sysAF_LINK = C.AF_LINK sysAF_INET6 = C.AF_INET6 + sysSOCK_RAW = C.SOCK_RAW + sysNET_RT_DUMP = C.NET_RT_DUMP sysNET_RT_FLAGS = C.NET_RT_FLAGS sysNET_RT_IFLIST = C.NET_RT_IFLIST @@ -326,4 +330,8 @@ const ( sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 ) diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go index b18d85e01..b0abd549a 100644 --- a/vendor/golang.org/x/net/route/defs_netbsd.go +++ b/vendor/golang.org/x/net/route/defs_netbsd.go @@ -13,6 +13,8 @@ package route #include #include #include + +#include */ import "C" @@ -23,6 +25,8 @@ const ( sysAF_LINK = C.AF_LINK sysAF_INET6 = C.AF_INET6 + sysSOCK_RAW = C.SOCK_RAW + sysNET_RT_DUMP = C.NET_RT_DUMP sysNET_RT_FLAGS = C.NET_RT_FLAGS sysNET_RT_IFLIST = C.NET_RT_IFLIST @@ -101,4 +105,8 @@ const ( sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 ) diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go index 5df7a43bc..0f66d3619 100644 --- a/vendor/golang.org/x/net/route/defs_openbsd.go +++ b/vendor/golang.org/x/net/route/defs_openbsd.go @@ -13,6 +13,8 @@ package route #include #include #include + +#include */ import "C" @@ -23,6 +25,8 @@ const ( sysAF_LINK = C.AF_LINK sysAF_INET6 = C.AF_INET6 + sysSOCK_RAW = C.SOCK_RAW + sysNET_RT_DUMP = C.NET_RT_DUMP sysNET_RT_FLAGS = C.NET_RT_FLAGS sysNET_RT_IFLIST = C.NET_RT_IFLIST @@ -91,3 +95,11 @@ const ( sysRTAX_LABEL = C.RTAX_LABEL sysRTAX_MAX = C.RTAX_MAX ) + +const ( + sizeofRtMsghdr = C.sizeof_struct_rt_msghdr + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go index d7ae0eb50..0fa7e09f4 100644 --- a/vendor/golang.org/x/net/route/message.go +++ b/vendor/golang.org/x/net/route/message.go @@ -7,9 +7,6 @@ package route // A Message represents a routing message. -// -// Note: This interface will be changed to support Marshal method in -// future version. type Message interface { // Sys returns operating system-specific information. Sys() []Sys @@ -52,11 +49,10 @@ func ParseRIB(typ RIBType, b []byte) ([]Message, error) { b = b[l:] continue } - mtyp := int(b[3]) - if fn, ok := parseFns[mtyp]; !ok { + if w, ok := wireFormats[int(b[3])]; !ok { nskips++ } else { - m, err := fn(typ, b) + m, err := w.parse(typ, b) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go index c0c7c57a9..b3bc60c62 100644 --- a/vendor/golang.org/x/net/route/message_test.go +++ b/vendor/golang.org/x/net/route/message_test.go @@ -33,11 +33,28 @@ func TestFetchAndParseRIB(t *testing.T) { } } +var ( + rtmonSock int + rtmonErr error +) + +func init() { + // We need to keep rtmonSock alive to avoid treading on + // recycled socket descriptors. + rtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) +} + +// TestMonitorAndParseRIB leaks a worker goroutine and a socket +// descriptor but that's intentional. func TestMonitorAndParseRIB(t *testing.T) { if testing.Short() || os.Getuid() != 0 { t.Skip("must be root") } + if rtmonErr != nil { + t.Fatal(rtmonErr) + } + // We suppose that using an IPv4 link-local address and the // dot1Q ID for Token Ring and FDDI doesn't harm anyone. pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} @@ -49,16 +66,18 @@ func TestMonitorAndParseRIB(t *testing.T) { } pv.teardown() - s, err := syscall.Socket(syscall.AF_ROUTE, syscall.SOCK_RAW, syscall.AF_UNSPEC) - if err != nil { - t.Fatal(err) - } - defer syscall.Close(s) - go func() { b := make([]byte, os.Getpagesize()) for { - n, err := syscall.Read(s, b) + // There's no easy way to unblock this read + // call because the routing message exchange + // over routing socket is a connectionless + // message-oriented protocol, no control plane + // for signaling connectivity, and we cannot + // use the net package of standard library due + // to the lack of support for routing socket + // and circular dependency. + n, err := syscall.Read(rtmonSock, b) if err != nil { return } @@ -116,3 +135,99 @@ func TestParseRIBWithFuzz(t *testing.T) { } } } + +func TestRouteMessage(t *testing.T) { + s, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) + if err != nil { + t.Fatal(err) + } + defer syscall.Close(s) + + var ms []RouteMessage + for _, af := range []int{sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, sysNET_RT_DUMP) + if err != nil || len(rs) == 0 { + continue + } + switch af { + case sysAF_INET: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet4Addr{}, + nil, + &Inet4Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + }, + }, + }...) + case sysAF_INET6: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet6Addr{}, + nil, + &Inet6Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + }, + }, + }...) + } + } + for i, m := range ms { + m.ID = uintptr(os.Getpid()) + m.Seq = i + 1 + wb, err := m.Marshal() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + if _, err := syscall.Write(s, wb); err != nil { + t.Fatalf("%v: %v", m, err) + } + rb := make([]byte, os.Getpagesize()) + n, err := syscall.Read(s, rb) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + rms, err := ParseRIB(0, rb[:n]) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, rm := range rms { + err := rm.(*RouteMessage).Err + if err != nil { + t.Errorf("%v: %v", m, err) + } + } + ss, err := msgs(rms).validate() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, s := range ss { + t.Log(s) + } + + } +} diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go index c986e29eb..081da0d5c 100644 --- a/vendor/golang.org/x/net/route/route.go +++ b/vendor/golang.org/x/net/route/route.go @@ -24,21 +24,70 @@ var ( errMessageTooShort = errors.New("message too short") errInvalidMessage = errors.New("invalid message") errInvalidAddr = errors.New("invalid address") + errShortBuffer = errors.New("short buffer") ) // A RouteMessage represents a message conveying an address prefix, a // nexthop address and an output interface. +// +// Unlike other messages, this message can be used to query adjacency +// information for the given address prefix, to add a new route, and +// to delete or modify the existing route from the routing information +// base inside the kernel by writing and reading route messages on a +// routing socket. +// +// For the manipulation of routing information, the route message must +// contain appropriate fields that include: +// +// Version = +// Type = +// Flags = +// Index = +// ID = +// Seq = +// Addrs = +// +// The Type field specifies a type of manipulation, the Flags field +// specifies a class of target information and the Addrs field +// specifies target information like the following: +// +// route.RouteMessage{ +// Version: RTM_VERSION, +// Type: RTM_GET, +// Flags: RTF_UP | RTF_HOST, +// ID: uintptr(os.Getpid()), +// Seq: 1, +// Addrs: []route.Addrs{ +// RTAX_DST: &route.Inet4Addr{ ... }, +// RTAX_IFP: &route.LinkAddr{ ... }, +// RTAX_BRD: &route.Inet4Addr{ ... }, +// }, +// } +// +// The values for the above fields depend on the implementation of +// each operating system. +// +// The Err field on a response message contains an error value on the +// requested operation. If non-nil, the requested operation is failed. type RouteMessage struct { - Version int // message version - Type int // message type - Flags int // route flags - Index int // interface index when atatched - Addrs []Addr // addresses + Version int // message version + Type int // message type + Flags int // route flags + Index int // interface index when atatched + ID uintptr // sender's identifier; usually process ID + Seq int // sequence number + Err error // error on requested operation + Addrs []Addr // addresses extOff int // offset of header extension raw []byte // raw message } +// Marshal returns the binary encoding of m. +func (m *RouteMessage) Marshal() ([]byte, error) { + return m.marshal() +} + // A RIBType reprensents a type of routing information base. type RIBType int diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go index d333c6aa5..61b2bb4ad 100644 --- a/vendor/golang.org/x/net/route/route_classic.go +++ b/vendor/golang.org/x/net/route/route_classic.go @@ -6,6 +6,36 @@ package route +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + w, ok := wireFormats[m.Type] + if !ok { + return nil, errUnsupportedMessage + } + l := w.bodyOff + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint32(b[8:12], uint32(m.Flags)) + nativeEndian.PutUint16(b[4:6], uint16(m.Index)) + nativeEndian.PutUint32(b[16:20], uint32(m.ID)) + nativeEndian.PutUint32(b[20:24], uint32(m.Seq)) + attrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { if len(b) < w.bodyOff { return nil, errMessageTooShort @@ -19,9 +49,15 @@ func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { Type: int(b[3]), Flags: int(nativeEndian.Uint32(b[8:12])), Index: int(nativeEndian.Uint16(b[4:6])), + ID: uintptr(nativeEndian.Uint32(b[16:20])), + Seq: int(nativeEndian.Uint32(b[20:24])), extOff: w.extOff, raw: b[:l], } + errno := syscall.Errno(nativeEndian.Uint32(b[28:32])) + if errno != 0 { + m.Err = errno + } var err error m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) if err != nil { diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go index 76eae40d8..daf2e90c4 100644 --- a/vendor/golang.org/x/net/route/route_openbsd.go +++ b/vendor/golang.org/x/net/route/route_openbsd.go @@ -4,8 +4,35 @@ package route +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + l := sizeofRtMsghdr + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr)) + nativeEndian.PutUint32(b[16:20], uint32(m.Flags)) + nativeEndian.PutUint16(b[6:8], uint16(m.Index)) + nativeEndian.PutUint32(b[24:28], uint32(m.ID)) + nativeEndian.PutUint32(b[28:32], uint32(m.Seq)) + attrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < 40 { + if len(b) < sizeofRtMsghdr { return nil, errMessageTooShort } l := int(nativeEndian.Uint16(b[:2])) @@ -17,12 +44,18 @@ func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { Type: int(b[3]), Flags: int(nativeEndian.Uint32(b[16:20])), Index: int(nativeEndian.Uint16(b[6:8])), + ID: uintptr(nativeEndian.Uint32(b[24:28])), + Seq: int(nativeEndian.Uint32(b[28:32])), raw: b[:l], } ll := int(nativeEndian.Uint16(b[4:6])) if len(b) < ll { return nil, errInvalidMessage } + errno := syscall.Errno(nativeEndian.Uint32(b[32:36])) + if errno != 0 { + m.Err = errno + } as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) if err != nil { return nil, err diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go index 80ca83ae1..3d0ee9b14 100644 --- a/vendor/golang.org/x/net/route/sys.go +++ b/vendor/golang.org/x/net/route/sys.go @@ -11,7 +11,7 @@ import "unsafe" var ( nativeEndian binaryByteOrder kernelAlign int - parseFns map[int]parseFn + wireFormats map[int]*wireFormat ) func init() { @@ -22,7 +22,7 @@ func init() { } else { nativeEndian = bigEndian } - kernelAlign, parseFns = probeRoutingStack() + kernelAlign, wireFormats = probeRoutingStack() } func roundup(l int) int { @@ -32,9 +32,8 @@ func roundup(l int) int { return (l + kernelAlign - 1) & ^(kernelAlign - 1) } -type parseFn func(RIBType, []byte) (Message, error) - type wireFormat struct { extOff int // offset of header extension bodyOff int // offset of message body + parse func(RIBType, []byte) (Message, error) } diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go index fff3a0fd1..e742c919d 100644 --- a/vendor/golang.org/x/net/route/sys_darwin.go +++ b/vendor/golang.org/x/net/route/sys_darwin.go @@ -49,32 +49,39 @@ func (m *InterfaceMessage) Sys() []Sys { } } -func probeRoutingStack() (int, map[int]parseFn) { +func probeRoutingStack() (int, map[int]*wireFormat) { rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} + rtm.parse = rtm.parseRouteMessage rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} + rtm2.parse = rtm2.parseRouteMessage ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} + ifm.parse = ifm.parseInterfaceMessage ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} + ifm2.parse = ifm2.parseInterfaceMessage ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} + ifam.parse = ifam.parseInterfaceAddrMessage ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} + ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage // Darwin kernels require 32-bit aligned access to routing facilities. - return 4, map[int]parseFn{ - sysRTM_ADD: rtm.parseRouteMessage, - sysRTM_DELETE: rtm.parseRouteMessage, - sysRTM_CHANGE: rtm.parseRouteMessage, - sysRTM_GET: rtm.parseRouteMessage, - sysRTM_LOSING: rtm.parseRouteMessage, - sysRTM_REDIRECT: rtm.parseRouteMessage, - sysRTM_MISS: rtm.parseRouteMessage, - sysRTM_LOCK: rtm.parseRouteMessage, - sysRTM_RESOLVE: rtm.parseRouteMessage, - sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, - sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, - sysRTM_IFINFO: ifm.parseInterfaceMessage, - sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage, - sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage, - sysRTM_IFINFO2: ifm2.parseInterfaceMessage, - sysRTM_NEWMADDR2: ifmam2.parseInterfaceMulticastAddrMessage, - sysRTM_GET2: rtm2.parseRouteMessage, + return 4, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFINFO2: ifm2, + sysRTM_NEWMADDR2: ifmam2, + sysRTM_GET2: rtm2, } } diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go index da848b3d0..b175cb18c 100644 --- a/vendor/golang.org/x/net/route/sys_dragonfly.go +++ b/vendor/golang.org/x/net/route/sys_dragonfly.go @@ -44,28 +44,33 @@ func (m *InterfaceMessage) Sys() []Sys { } } -func probeRoutingStack() (int, map[int]parseFn) { +func probeRoutingStack() (int, map[int]*wireFormat) { var p uintptr rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} + rtm.parse = rtm.parseRouteMessage ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} + ifm.parse = ifm.parseInterfaceMessage ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} + ifam.parse = ifam.parseInterfaceAddrMessage ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} - return int(unsafe.Sizeof(p)), map[int]parseFn{ - sysRTM_ADD: rtm.parseRouteMessage, - sysRTM_DELETE: rtm.parseRouteMessage, - sysRTM_CHANGE: rtm.parseRouteMessage, - sysRTM_GET: rtm.parseRouteMessage, - sysRTM_LOSING: rtm.parseRouteMessage, - sysRTM_REDIRECT: rtm.parseRouteMessage, - sysRTM_MISS: rtm.parseRouteMessage, - sysRTM_LOCK: rtm.parseRouteMessage, - sysRTM_RESOLVE: rtm.parseRouteMessage, - sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, - sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, - sysRTM_IFINFO: ifm.parseInterfaceMessage, - sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage, - sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage, - sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage, + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, } } diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go index 7b05c1a5a..010d4ae78 100644 --- a/vendor/golang.org/x/net/route/sys_freebsd.go +++ b/vendor/golang.org/x/net/route/sys_freebsd.go @@ -54,7 +54,7 @@ func (m *InterfaceMessage) Sys() []Sys { } } -func probeRoutingStack() (int, map[int]parseFn) { +func probeRoutingStack() (int, map[int]*wireFormat) { var p uintptr wordSize := int(unsafe.Sizeof(p)) align := int(unsafe.Sizeof(p)) @@ -130,21 +130,26 @@ func probeRoutingStack() (int, map[int]parseFn) { ifm.bodyOff = sizeofIfMsghdrFreeBSD11 } } - return align, map[int]parseFn{ - sysRTM_ADD: rtm.parseRouteMessage, - sysRTM_DELETE: rtm.parseRouteMessage, - sysRTM_CHANGE: rtm.parseRouteMessage, - sysRTM_GET: rtm.parseRouteMessage, - sysRTM_LOSING: rtm.parseRouteMessage, - sysRTM_REDIRECT: rtm.parseRouteMessage, - sysRTM_MISS: rtm.parseRouteMessage, - sysRTM_LOCK: rtm.parseRouteMessage, - sysRTM_RESOLVE: rtm.parseRouteMessage, - sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, - sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, - sysRTM_IFINFO: ifm.parseInterfaceMessage, - sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage, - sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage, - sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage, + rtm.parse = rtm.parseRouteMessage + ifm.parse = ifm.parseInterfaceMessage + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return align, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, } } diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go index 4d8076b51..b4e330140 100644 --- a/vendor/golang.org/x/net/route/sys_netbsd.go +++ b/vendor/golang.org/x/net/route/sys_netbsd.go @@ -42,26 +42,30 @@ func (m *InterfaceMessage) Sys() []Sys { } } -func probeRoutingStack() (int, map[int]parseFn) { +func probeRoutingStack() (int, map[int]*wireFormat) { rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} + rtm.parse = rtm.parseRouteMessage ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} + ifm.parse = ifm.parseInterfaceMessage ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} + ifam.parse = ifam.parseInterfaceAddrMessage ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage // NetBSD 6 and above kernels require 64-bit aligned access to // routing facilities. - return 8, map[int]parseFn{ - sysRTM_ADD: rtm.parseRouteMessage, - sysRTM_DELETE: rtm.parseRouteMessage, - sysRTM_CHANGE: rtm.parseRouteMessage, - sysRTM_GET: rtm.parseRouteMessage, - sysRTM_LOSING: rtm.parseRouteMessage, - sysRTM_REDIRECT: rtm.parseRouteMessage, - sysRTM_MISS: rtm.parseRouteMessage, - sysRTM_LOCK: rtm.parseRouteMessage, - sysRTM_RESOLVE: rtm.parseRouteMessage, - sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, - sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, - sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage, - sysRTM_IFINFO: ifm.parseInterfaceMessage, + return 8, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_IFINFO: ifm, } } diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go index 26d043869..8798dc4ca 100644 --- a/vendor/golang.org/x/net/route/sys_openbsd.go +++ b/vendor/golang.org/x/net/route/sys_openbsd.go @@ -51,22 +51,29 @@ func (m *InterfaceMessage) Sys() []Sys { } } -func probeRoutingStack() (int, map[int]parseFn) { +func probeRoutingStack() (int, map[int]*wireFormat) { var p uintptr - nooff := &wireFormat{extOff: -1, bodyOff: -1} - return int(unsafe.Sizeof(p)), map[int]parseFn{ - sysRTM_ADD: nooff.parseRouteMessage, - sysRTM_DELETE: nooff.parseRouteMessage, - sysRTM_CHANGE: nooff.parseRouteMessage, - sysRTM_GET: nooff.parseRouteMessage, - sysRTM_LOSING: nooff.parseRouteMessage, - sysRTM_REDIRECT: nooff.parseRouteMessage, - sysRTM_MISS: nooff.parseRouteMessage, - sysRTM_LOCK: nooff.parseRouteMessage, - sysRTM_RESOLVE: nooff.parseRouteMessage, - sysRTM_NEWADDR: nooff.parseInterfaceAddrMessage, - sysRTM_DELADDR: nooff.parseInterfaceAddrMessage, - sysRTM_IFINFO: nooff.parseInterfaceMessage, - sysRTM_IFANNOUNCE: nooff.parseInterfaceAnnounceMessage, + rtm := &wireFormat{extOff: -1, bodyOff: -1} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: -1, bodyOff: -1} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: -1, bodyOff: -1} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: -1, bodyOff: -1} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_IFANNOUNCE: ifanm, } } diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go index d136325a3..c211188b1 100644 --- a/vendor/golang.org/x/net/route/syscall.go +++ b/vendor/golang.org/x/net/route/syscall.go @@ -11,10 +11,6 @@ import ( "unsafe" ) -// TODO: replace with runtime.KeepAlive when available -//go:noescape -func keepAlive(p unsafe.Pointer) - var zero uintptr func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { @@ -25,7 +21,6 @@ func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) p = unsafe.Pointer(&zero) } _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - keepAlive(p) if errno != 0 { return error(errno) } diff --git a/vendor/golang.org/x/net/route/syscall.s b/vendor/golang.org/x/net/route/syscall.s deleted file mode 100644 index fa6297f0a..000000000 --- a/vendor/golang.org/x/net/route/syscall.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT ·keepAlive(SB),NOSPLIT,$0 - RET diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go index 265b81cd5..4e2e1ab09 100644 --- a/vendor/golang.org/x/net/route/zsys_darwin.go +++ b/vendor/golang.org/x/net/route/zsys_darwin.go @@ -10,6 +10,8 @@ const ( sysAF_LINK = 0x12 sysAF_INET6 = 0x1e + sysSOCK_RAW = 0x3 + sysNET_RT_DUMP = 0x1 sysNET_RT_FLAGS = 0x2 sysNET_RT_IFLIST = 0x3 @@ -90,4 +92,8 @@ const ( sizeofRtMsghdrDarwin15 = 0x5c sizeofRtMsghdr2Darwin15 = 0x5c sizeofRtMetricsDarwin15 = 0x38 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c ) diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go index dd36dece0..719c88d11 100644 --- a/vendor/golang.org/x/net/route/zsys_dragonfly.go +++ b/vendor/golang.org/x/net/route/zsys_dragonfly.go @@ -10,6 +10,8 @@ const ( sysAF_LINK = 0x12 sysAF_INET6 = 0x1c + sysSOCK_RAW = 0x3 + sysNET_RT_DUMP = 0x1 sysNET_RT_FLAGS = 0x2 sysNET_RT_IFLIST = 0x3 @@ -89,4 +91,8 @@ const ( sizeofRtMsghdrDragonFlyBSD4 = 0x98 sizeofRtMetricsDragonFlyBSD4 = 0x70 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c ) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go index 9bac2e390..b03bc01f6 100644 --- a/vendor/golang.org/x/net/route/zsys_freebsd_386.go +++ b/vendor/golang.org/x/net/route/zsys_freebsd_386.go @@ -10,6 +10,8 @@ const ( sysAF_LINK = 0x12 sysAF_INET6 = 0x1c + sysSOCK_RAW = 0x3 + sysNET_RT_DUMP = 0x1 sysNET_RT_FLAGS = 0x2 sysNET_RT_IFLIST = 0x3 @@ -117,4 +119,8 @@ const ( sizeofIfDataFreeBSD9Emu = 0x98 sizeofIfDataFreeBSD10Emu = 0x98 sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c ) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go index b1920d7ac..0b675b3d3 100644 --- a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go +++ b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go @@ -10,6 +10,8 @@ const ( sysAF_LINK = 0x12 sysAF_INET6 = 0x1c + sysSOCK_RAW = 0x3 + sysNET_RT_DUMP = 0x1 sysNET_RT_FLAGS = 0x2 sysNET_RT_IFLIST = 0x3 @@ -114,4 +116,8 @@ const ( sizeofIfDataFreeBSD9Emu = 0x98 sizeofIfDataFreeBSD10Emu = 0x98 sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c ) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go index a034d6fcb..58f8ea16f 100644 --- a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go +++ b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go @@ -10,6 +10,8 @@ const ( sysAF_LINK = 0x12 sysAF_INET6 = 0x1c + sysSOCK_RAW = 0x3 + sysNET_RT_DUMP = 0x1 sysNET_RT_FLAGS = 0x2 sysNET_RT_IFLIST = 0x3 @@ -114,4 +116,8 @@ const ( sizeofIfDataFreeBSD9Emu = 0x60 sizeofIfDataFreeBSD10Emu = 0x60 sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c ) diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go index aa4aad161..e0df45e8b 100644 --- a/vendor/golang.org/x/net/route/zsys_netbsd.go +++ b/vendor/golang.org/x/net/route/zsys_netbsd.go @@ -10,6 +10,8 @@ const ( sysAF_LINK = 0x12 sysAF_INET6 = 0x18 + sysSOCK_RAW = 0x3 + sysNET_RT_DUMP = 0x1 sysNET_RT_FLAGS = 0x2 sysNET_RT_IFLIST = 0x5 @@ -88,4 +90,8 @@ const ( sizeofRtMsghdrNetBSD7 = 0x78 sizeofRtMetricsNetBSD7 = 0x50 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c ) diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go index 4fadc4e8f..f5a1ff967 100644 --- a/vendor/golang.org/x/net/route/zsys_openbsd.go +++ b/vendor/golang.org/x/net/route/zsys_openbsd.go @@ -10,6 +10,8 @@ const ( sysAF_LINK = 0x12 sysAF_INET6 = 0x18 + sysSOCK_RAW = 0x3 + sysNET_RT_DUMP = 0x1 sysNET_RT_FLAGS = 0x2 sysNET_RT_IFLIST = 0x3 @@ -78,3 +80,11 @@ const ( sysRTAX_LABEL = 0xa sysRTAX_MAX = 0xb ) + +const ( + sizeofRtMsghdr = 0x60 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 1b6495603..26bc838de 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -23,7 +23,7 @@ import ( "golang.org/x/oauth2/internal" ) -// Client Credentials Config describes a 2-legged OAuth2 flow, with both the +// Config describes a 2-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. type Config struct { // ClientID is the application's ID. diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go index 061b43bf9..a18e95a58 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go @@ -48,8 +48,8 @@ func TestTokenRequest(t *testing.T) { if err != nil { t.Errorf("failed reading request body: %s.", err) } - if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { - t.Errorf("payload = %q; want %q", string(body), "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2") + if string(body) != "grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("payload = %q; want %q", string(body), "grant_type=client_credentials&scope=scope1+scope2") } w.Header().Set("Content-Type", "application/x-www-form-urlencoded") w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer")) @@ -84,7 +84,7 @@ func TestTokenRefreshRequest(t *testing.T) { t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) } body, _ := ioutil.ReadAll(r.Body) - if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { + if string(body) != "grant_type=client_credentials&scope=scope1+scope2" { t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) } })) diff --git a/vendor/golang.org/x/oauth2/example_test.go b/vendor/golang.org/x/oauth2/example_test.go index d861fe7e3..378c70dc1 100644 --- a/vendor/golang.org/x/oauth2/example_test.go +++ b/vendor/golang.org/x/oauth2/example_test.go @@ -8,6 +8,8 @@ import ( "context" "fmt" "log" + "net/http" + "time" "golang.org/x/oauth2" ) @@ -45,3 +47,25 @@ func ExampleConfig() { client := conf.Client(ctx, tok) client.Get("...") } + +func ExampleHTTPClient() { + hc := &http.Client{Timeout: 2 * time.Second} + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, hc) + + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://provider.com/o/oauth2/auth", + TokenURL: "https://provider.com/o/oauth2/token", + }, + } + + // Exchange request will be made by the custom + // HTTP client, hc. + _, err := conf.Exchange(ctx, "foo") + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index 4243f4cb9..50d918b87 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -14,8 +14,8 @@ import ( "golang.org/x/oauth2" ) -// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. -var appengineVM bool +// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. +var appengineFlex bool // Set at init time by appengine_hook.go. If nil, we're not on App Engine. var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go index 6f6641141..56669eaa9 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine +// +build appengine appenginevm package google diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go new file mode 100644 index 000000000..5d0231af2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +func init() { + appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. +} diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go deleted file mode 100644 index 10747801f..000000000 --- a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm - -package google - -import "google.golang.org/appengine" - -func init() { - appengineVM = true - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index b45e79616..004ed4eab 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -81,7 +81,7 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede } // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil && !appengineVM { + if appengineTokenFunc != nil && !appengineFlex { return &DefaultCredentials{ ProjectID: appengineAppIDFunc(ctx), TokenSource: AppEngineTokenSource(ctx, scope...), diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index fbe1028d6..e31541b39 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -42,7 +42,7 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) { func ParseINI(ini io.Reader) (map[string]map[string]string, error) { result := map[string]map[string]string{ - "": map[string]string{}, // root section + "": {}, // root section } scanner := bufio.NewScanner(ini) currentSection := "" diff --git a/vendor/golang.org/x/oauth2/internal/oauth2_test.go b/vendor/golang.org/x/oauth2/internal/oauth2_test.go index c61585542..0aafc7f43 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2_test.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2_test.go @@ -23,8 +23,8 @@ bar = hop ini = nin `, map[string]map[string]string{ - "": map[string]string{"root": "toor"}, - "foo": map[string]string{"bar": "hop", "ini": "nin"}, + "": {"root": "toor"}, + "foo": {"bar": "hop", "ini": "nin"}, }, }, { @@ -33,9 +33,9 @@ ini = nin empty= `, map[string]map[string]string{ - "": map[string]string{}, - "empty": map[string]string{}, - "section": map[string]string{"empty": ""}, + "": {}, + "empty": {}, + "section": {"empty": ""}, }, }, { @@ -45,7 +45,7 @@ empty= ;comment=true `, map[string]map[string]string{ - "": map[string]string{}, + "": {}, }, }, } diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 1c0ec76d0..ba90a3414 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -91,6 +91,7 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { var brokenAuthHeaderProviders = []string{ "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", "https://api.dropbox.com/", "https://api.dropboxapi.com/", "https://api.instagram.com/", @@ -101,6 +102,7 @@ var brokenAuthHeaderProviders = []string{ "https://api.twitch.tv/", "https://app.box.com/", "https://connect.stripe.com/", + "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214 "https://login.microsoftonline.com/", "https://login.salesforce.com/", "https://oauth.sandbox.trainingpeaks.com/", @@ -118,7 +120,6 @@ var brokenAuthHeaderProviders = []string{ "https://www.wunderlist.com/oauth/", "https://api.patreon.com/", "https://sandbox.codeswholesale.com/oauth/token", - "https://api.codeswholesale.com/oauth/token", } func RegisterBrokenAuthHeaderProvider(tokenURL string) { @@ -153,9 +154,9 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, if err != nil { return nil, err } - v.Set("client_id", clientID) bustedAuth := !providerAuthHeaderWorks(tokenURL) if bustedAuth && clientSecret != "" { + v.Set("client_id", clientID) v.Set("client_secret", clientSecret) } req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 7b06bfe1e..3e4835d7e 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -180,7 +180,6 @@ func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { "grant_type": {"authorization_code"}, "code": {code}, "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), }) } diff --git a/vendor/golang.org/x/oauth2/oauth2_test.go b/vendor/golang.org/x/oauth2/oauth2_test.go index e98c01ae6..e757b0f10 100644 --- a/vendor/golang.org/x/oauth2/oauth2_test.go +++ b/vendor/golang.org/x/oauth2/oauth2_test.go @@ -89,7 +89,7 @@ func TestExchangeRequest(t *testing.T) { if err != nil { t.Errorf("Failed reading request body: %s.", err) } - if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" { t.Errorf("Unexpected exchange payload, %v is found.", string(body)) } w.Header().Set("Content-Type", "application/x-www-form-urlencoded") @@ -133,7 +133,7 @@ func TestExchangeRequest_JSONResponse(t *testing.T) { if err != nil { t.Errorf("Failed reading request body: %s.", err) } - if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" { t.Errorf("Unexpected exchange payload, %v is found.", string(body)) } w.Header().Set("Content-Type", "application/json") @@ -325,7 +325,7 @@ func TestPasswordCredentialsTokenRequest(t *testing.T) { if err != nil { t.Errorf("Failed reading request body: %s.", err) } - expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1" + expected = "grant_type=password&password=password1&scope=scope1+scope2&username=user1" if string(body) != expected { t.Errorf("res.Body = %q; want %q", string(body), expected) } @@ -364,7 +364,7 @@ func TestTokenRefreshRequest(t *testing.T) { t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) } body, _ := ioutil.ReadAll(r.Body) - if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + if string(body) != "grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) } })) diff --git a/vendor/golang.org/x/oauth2/yandex/yandex.go b/vendor/golang.org/x/oauth2/yandex/yandex.go new file mode 100644 index 000000000..5ebf666d2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/yandex/yandex.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package yandex provides constants for using OAuth2 to access Yandex APIs. +package yandex // import "golang.org/x/oauth2/yandex" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is the Yandex OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.yandex.com/authorize", + TokenURL: "https://oauth.yandex.com/token", +} diff --git a/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-api.json b/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-api.json index 160514149..7ecce109a 100644 --- a/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-api.json +++ b/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-api.json @@ -1,67 +1,51 @@ { - "id": "acceleratedmobilepageurl:v1", - "description": "This API contains a single method, batchGet. Call this method to retrieve the AMP URL (and equivalent AMP Cache URL) for given public URL(s).\n", - "protocol": "rest", - "title": "Accelerated Mobile Pages (AMP) URL API", - "resources": { - "ampUrls": { - "methods": { - "batchGet": { - "id": "acceleratedmobilepageurl.ampUrls.batchGet", - "response": { - "$ref": "BatchGetAmpUrlsResponse" - }, - "parameterOrder": [], - "description": "Returns AMP URL(s) and equivalent\n[AMP Cache URL(s)](/amp/cache/overview#amp-cache-url-format).", - "request": { - "$ref": "BatchGetAmpUrlsRequest" + "schemas": { + "BatchGetAmpUrlsResponse": { + "description": "Batch AMP URL response.", + "type": "object", + "properties": { + "ampUrls": { + "description": "For each URL in BatchAmpUrlsRequest, the URL response. The response might\nnot be in the same order as URLs in the batch request.\nIf BatchAmpUrlsRequest contains duplicate URLs, AmpUrl is generated\nonly once.", + "type": "array", + "items": { + "$ref": "AmpUrl" + } + }, + "urlErrors": { + "type": "array", + "items": { + "$ref": "AmpUrlError" }, - "flatPath": "v1/ampUrls:batchGet", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/ampUrls:batchGet" + "description": "The errors for requested URLs that have no AMP URL." } - } - } - }, - "schemas": { + }, + "id": "BatchGetAmpUrlsResponse" + }, "AmpUrl": { "description": "AMP URL response for a requested URL.", "type": "object", "properties": { - "ampUrl": { - "description": "The AMP URL pointing to the publisher's web server.", + "cdnAmpUrl": { + "description": "The [AMP Cache URL](/amp/cache/overview#amp-cache-url-format) pointing to\nthe cached document in the Google AMP Cache.", "type": "string" }, "originalUrl": { "description": "The original non-AMP URL.", "type": "string" }, - "cdnAmpUrl": { - "description": "The [AMP Cache URL](/amp/cache/overview#amp-cache-url-format) pointing to\nthe cached document in the Google AMP Cache.", + "ampUrl": { + "description": "The AMP URL pointing to the publisher's web server.", "type": "string" } }, "id": "AmpUrl" }, "AmpUrlError": { + "id": "AmpUrlError", "description": "AMP URL Error resource for a requested URL that couldn't be found.", "type": "object", "properties": { - "originalUrl": { - "description": "The original non-AMP URL.", - "type": "string" - }, "errorCode": { - "description": "The error code of an API call.", - "enum": [ - "ERROR_CODE_UNSPECIFIED", - "INPUT_URL_NOT_FOUND", - "NO_AMP_URL", - "APPLICATION_ERROR", - "URL_IS_VALID_AMP", - "URL_IS_INVALID_AMP" - ], "enumDescriptions": [ "Not specified error.", "Indicates the requested URL is not found in the index, possibly because\nit's unable to be found, not able to be accessed by Googlebot, or some\nother error.", @@ -70,30 +54,40 @@ "DEPRECATED: Indicates the requested URL is a valid AMP URL. This is a\nnon-error state, should not be relied upon as a sign of success or\nfailure. It will be removed in future versions of the API.", "Indicates that an AMP URL has been found that corresponds to the request\nURL, but it is not valid AMP HTML." ], + "enum": [ + "ERROR_CODE_UNSPECIFIED", + "INPUT_URL_NOT_FOUND", + "NO_AMP_URL", + "APPLICATION_ERROR", + "URL_IS_VALID_AMP", + "URL_IS_INVALID_AMP" + ], + "description": "The error code of an API call.", + "type": "string" + }, + "originalUrl": { + "description": "The original non-AMP URL.", "type": "string" }, "errorMessage": { "description": "An optional descriptive error message.", "type": "string" } - }, - "id": "AmpUrlError" + } }, "BatchGetAmpUrlsRequest": { - "description": "AMP URL request for a batch of URLs.", - "type": "object", "properties": { "lookupStrategy": { "description": "The lookup_strategy being requested.", - "enum": [ - "FETCH_LIVE_DOC", - "IN_INDEX_DOC" - ], + "type": "string", "enumDescriptions": [ "FETCH_LIVE_DOC strategy involves live document fetch of URLs not found in\nthe index. Any request URL not found in the index is crawled in realtime\nto validate if there is a corresponding AMP URL. This strategy has higher\ncoverage but with extra latency introduced by realtime crawling. This is\nthe default strategy. Applications using this strategy should set higher\nHTTP timeouts of the API calls.", "IN_INDEX_DOC strategy skips fetching live documents of URL(s) not found\nin index. For applications which need low latency use of IN_INDEX_DOC\nstrategy is recommended." ], - "type": "string" + "enum": [ + "FETCH_LIVE_DOC", + "IN_INDEX_DOC" + ] }, "urls": { "description": "List of URLs to look up for the paired AMP URLs.\nThe URLs are case-sensitive. Up to 50 URLs per lookup\n(see [Usage Limits](/amp/cache/reference/limits)).", @@ -103,134 +97,140 @@ } } }, - "id": "BatchGetAmpUrlsRequest" - }, - "BatchGetAmpUrlsResponse": { - "description": "Batch AMP URL response.", - "type": "object", - "properties": { - "urlErrors": { - "description": "The errors for requested URLs that have no AMP URL.", - "type": "array", - "items": { - "$ref": "AmpUrlError" - } - }, - "ampUrls": { - "description": "For each URL in BatchAmpUrlsRequest, the URL response. The response might\nnot be in the same order as URLs in the batch request.\nIf BatchAmpUrlsRequest contains duplicate URLs, AmpUrl is generated\nonly once.", - "type": "array", - "items": { - "$ref": "AmpUrl" - } - } - }, - "id": "BatchGetAmpUrlsResponse" + "id": "BatchGetAmpUrlsRequest", + "description": "AMP URL request for a batch of URLs.", + "type": "object" } }, - "revision": "20160928", - "basePath": "", + "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version_module": "True", - "discoveryVersion": "v1", + "version": "v1", "baseUrl": "https://acceleratedmobilepageurl.googleapis.com/", + "kind": "discovery#restDescription", + "description": "This API contains a single method, batchGet. Call this method to retrieve the AMP URL (and equivalent AMP Cache URL) for given public URL(s).\n", + "servicePath": "", + "rootUrl": "https://acceleratedmobilepageurl.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", "name": "acceleratedmobilepageurl", + "batchPath": "batch", + "documentationLink": "https://developers.google.com/amp/cache/", + "id": "acceleratedmobilepageurl:v1", + "revision": "20170126", + "title": "Accelerated Mobile Pages (AMP) URL API", + "discoveryVersion": "v1", + "ownerName": "Google", + "version_module": "True", + "resources": { + "ampUrls": { + "methods": { + "batchGet": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "BatchGetAmpUrlsResponse" + }, + "parameters": {}, + "flatPath": "v1/ampUrls:batchGet", + "id": "acceleratedmobilepageurl.ampUrls.batchGet", + "path": "v1/ampUrls:batchGet", + "request": { + "$ref": "BatchGetAmpUrlsRequest" + }, + "description": "Returns AMP URL(s) and equivalent\n[AMP Cache URL(s)](/amp/cache/overview#amp-cache-url-format)." + } + } + } + }, "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, "fields": { + "location": "query", "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "callback": { + "description": "JSONP", "type": "string", "location": "query" }, - "alt": { - "description": "Data format for response.", + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], "location": "query", "enum": [ - "json", - "media", - "proto" + "1", + "2" ], - "default": "json", + "description": "V1 error format.", + "type": "string" + }, + "alt": { "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", + "location": "query", + "description": "Data format for response.", + "default": "json", "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" + "json", + "media", + "proto" ], + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "type": "string", "location": "query" }, - "callback": { - "description": "JSONP", + "access_token": { "type": "string", - "location": "query" + "location": "query", + "description": "OAuth access token." }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", "type": "string", "location": "query" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", "location": "query" }, "bearer_token": { + "location": "query", "description": "OAuth bearer token.", - "type": "string", - "location": "query" + "type": "string" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" }, "upload_protocol": { "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", "location": "query" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" } - }, - "documentationLink": "https://developers.google.com/amp/cache/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1", - "rootUrl": "https://acceleratedmobilepageurl.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-gen.go b/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-gen.go index 9c8d36488..3a18935ca 100644 --- a/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-gen.go +++ b/vendor/google.golang.org/api/acceleratedmobilepageurl/v1/acceleratedmobilepageurl-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only AmpUrls *AmpUrlsService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAmpUrlsService(s *Service) *AmpUrlsService { rs := &AmpUrlsService{s: s} return rs @@ -312,6 +317,7 @@ func (c *AmpUrlsBatchGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchgetampurlsrequest) if err != nil { diff --git a/vendor/google.golang.org/api/adexchangebuyer/v1.2/adexchangebuyer-gen.go b/vendor/google.golang.org/api/adexchangebuyer/v1.2/adexchangebuyer-gen.go index 104292d6f..05d2b1eee 100644 --- a/vendor/google.golang.org/api/adexchangebuyer/v1.2/adexchangebuyer-gen.go +++ b/vendor/google.golang.org/api/adexchangebuyer/v1.2/adexchangebuyer-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} return rs @@ -583,6 +588,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -719,6 +725,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -834,6 +841,7 @@ func (c *AccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -968,6 +976,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -1114,6 +1123,7 @@ func (c *CreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1249,6 +1259,7 @@ func (c *CreativesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -1405,6 +1416,7 @@ func (c *CreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/adexchangebuyer/v1.3/adexchangebuyer-gen.go b/vendor/google.golang.org/api/adexchangebuyer/v1.3/adexchangebuyer-gen.go index 3c2ef1eab..b6350948a 100644 --- a/vendor/google.golang.org/api/adexchangebuyer/v1.3/adexchangebuyer-gen.go +++ b/vendor/google.golang.org/api/adexchangebuyer/v1.3/adexchangebuyer-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} return rs @@ -1519,6 +1524,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1655,6 +1661,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1770,6 +1777,7 @@ func (c *AccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -1904,6 +1912,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -2048,6 +2057,7 @@ func (c *BillingInfoGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2185,6 +2195,7 @@ func (c *BillingInfoListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2311,6 +2322,7 @@ func (c *BudgetGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2453,6 +2465,7 @@ func (c *BudgetPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.budget) if err != nil { @@ -2600,6 +2613,7 @@ func (c *BudgetUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.budget) if err != nil { @@ -2755,6 +2769,7 @@ func (c *CreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2890,6 +2905,7 @@ func (c *CreativesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -3065,6 +3081,7 @@ func (c *CreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3252,6 +3269,7 @@ func (c *DirectDealsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3388,6 +3406,7 @@ func (c *DirectDealsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3529,6 +3548,7 @@ func (c *PerformanceReportListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3682,6 +3702,7 @@ func (c *PretargetingConfigDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "pretargetingconfigs/{accountId}/{configId}") @@ -3800,6 +3821,7 @@ func (c *PretargetingConfigGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3938,6 +3960,7 @@ func (c *PretargetingConfigInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pretargetingconfig) if err != nil { @@ -4082,6 +4105,7 @@ func (c *PretargetingConfigListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4214,6 +4238,7 @@ func (c *PretargetingConfigPatchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pretargetingconfig) if err != nil { @@ -4359,6 +4384,7 @@ func (c *PretargetingConfigUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pretargetingconfig) if err != nil { diff --git a/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-api.json b/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-api.json index 32b07de97..aefa7d13f 100644 --- a/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-api.json +++ b/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/gBf-L8hU2vSAjlFpvr_ZRh1Vf_I\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/XJ8vjwOJUtuz-unNR2L_5stMJIc\"", "discoveryVersion": "v1", "id": "adexchangebuyer:v1.4", "name": "adexchangebuyer", "canonicalName": "Ad Exchange Buyer", "version": "v1.4", - "revision": "20170118", + "revision": "20170215", "title": "Ad Exchange Buyer API", "description": "Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.", "ownerDomain": "google.com", @@ -396,7 +396,7 @@ }, "advertiserName": { "type": "string", - "description": "The name of the company being advertised in the creative.", + "description": "The name of the company being advertised in the creative. The value provided must exist in the advertisers.txt file.", "annotations": { "required": [ "adexchangebuyer.creatives.insert" @@ -415,7 +415,7 @@ }, "attribute": { "type": "array", - "description": "All attributes for the ads that may be shown from this snippet.", + "description": "List of buyer selectable attributes for the ads that may be shown from this snippet. Each attribute is represented by an integer as defined in buyer-declarable-creative-attributes.txt.", "items": { "type": "integer", "format": "int32" @@ -529,7 +529,7 @@ }, "filteringStatus": { "type": "integer", - "description": "The filtering status code. Please refer to the creative-status-codes.txt file for different statuses.", + "description": "The filtering status code as defined in creative-status-codes.txt.", "format": "int32" } } @@ -676,7 +676,7 @@ }, "productCategories": { "type": "array", - "description": "Detected product categories, if any. Read-only. This field should not be set in requests.", + "description": "Detected product categories, if any. Each category is represented by an integer as defined in ad-product-categories.txt. Read-only. This field should not be set in requests.", "items": { "type": "integer", "format": "int32" @@ -684,7 +684,7 @@ }, "restrictedCategories": { "type": "array", - "description": "All restricted categories for the ads that may be shown from this snippet.", + "description": "All restricted categories for the ads that may be shown from this snippet. Each category is represented by an integer as defined in the ad-restricted-categories.txt.", "items": { "type": "integer", "format": "int32" @@ -692,7 +692,7 @@ }, "sensitiveCategories": { "type": "array", - "description": "Detected sensitive categories, if any. Read-only. This field should not be set in requests.", + "description": "Detected sensitive categories, if any. Each category is represented by an integer as defined in ad-sensitive-categories.txt. Read-only. This field should not be set in requests.", "items": { "type": "integer", "format": "int32" @@ -723,7 +723,7 @@ }, "geoCriteriaId": { "type": "array", - "description": "Only set when contextType=LOCATION. Represents the geo criterias this restriction applies to.", + "description": "Only set when contextType=LOCATION. Represents the geo criterias this restriction applies to. Impressions are considered to match a context if either the user location or publisher location matches a given geoCriteriaId.", "items": { "type": "integer", "format": "int32" @@ -768,7 +768,7 @@ }, "vendorType": { "type": "array", - "description": "All vendor types for the ads that may be shown from this snippet.", + "description": "List of vendor types for the ads that may be shown from this snippet. Each vendor type is represented by an integer as defined in vendors.txt.", "items": { "type": "integer", "format": "int32" @@ -857,6 +857,10 @@ "id": "DealServingMetadata", "type": "object", "properties": { + "alcoholAdsAllowed": { + "type": "boolean", + "description": "True if alcohol ads are allowed for this deal (read-only). This field is only populated when querying for finalized orders using the method GetFinalizedOrderDeals" + }, "dealPauseStatus": { "$ref": "DealServingMetadataDealPauseStatus", "description": "Tracks which parties (if any) have paused a deal. (readonly, except via PauseResumeOrderDeals action)" diff --git a/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-gen.go b/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-gen.go index a86bf7975..4e9f07f82 100644 --- a/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-gen.go +++ b/vendor/google.golang.org/api/adexchangebuyer/v1.4/adexchangebuyer-gen.go @@ -72,9 +72,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -108,6 +109,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} return rs @@ -758,7 +763,7 @@ type Creative struct { AdvertiserId googleapi.Int64s `json:"advertiserId,omitempty"` // AdvertiserName: The name of the company being advertised in the - // creative. + // creative. The value provided must exist in the advertisers.txt file. AdvertiserName string `json:"advertiserName,omitempty"` // AgencyId: The agency id for this creative. @@ -770,8 +775,9 @@ type Creative struct { // timestamp). ApiUploadTimestamp string `json:"apiUploadTimestamp,omitempty"` - // Attribute: All attributes for the ads that may be shown from this - // snippet. + // Attribute: List of buyer selectable attributes for the ads that may + // be shown from this snippet. Each attribute is represented by an + // integer as defined in buyer-declarable-creative-attributes.txt. Attribute []int64 `json:"attribute,omitempty"` // BuyerCreativeId: A buyer-specific id identifying the creative in this @@ -828,16 +834,21 @@ type Creative struct { // ServingRestrictions directly. OpenAuctionStatus string `json:"openAuctionStatus,omitempty"` - // ProductCategories: Detected product categories, if any. Read-only. - // This field should not be set in requests. + // ProductCategories: Detected product categories, if any. Each category + // is represented by an integer as defined in + // ad-product-categories.txt. Read-only. This field should not be set in + // requests. ProductCategories []int64 `json:"productCategories,omitempty"` // RestrictedCategories: All restricted categories for the ads that may - // be shown from this snippet. + // be shown from this snippet. Each category is represented by an + // integer as defined in the ad-restricted-categories.txt. RestrictedCategories []int64 `json:"restrictedCategories,omitempty"` - // SensitiveCategories: Detected sensitive categories, if any. - // Read-only. This field should not be set in requests. + // SensitiveCategories: Detected sensitive categories, if any. Each + // category is represented by an integer as defined in + // ad-sensitive-categories.txt. Read-only. This field should not be set + // in requests. SensitiveCategories []int64 `json:"sensitiveCategories,omitempty"` // ServingRestrictions: The granular status of this ad in specific @@ -847,8 +858,9 @@ type Creative struct { // set in requests. ServingRestrictions []*CreativeServingRestrictions `json:"servingRestrictions,omitempty"` - // VendorType: All vendor types for the ads that may be shown from this - // snippet. + // VendorType: List of vendor types for the ads that may be shown from + // this snippet. Each vendor type is represented by an integer as + // defined in vendors.txt. VendorType []int64 `json:"vendorType,omitempty"` // Version: The version for this creative. Read-only. This field should @@ -1003,8 +1015,8 @@ type CreativeFilteringReasonsReasons struct { // exchange. FilteringCount int64 `json:"filteringCount,omitempty,string"` - // FilteringStatus: The filtering status code. Please refer to the - // creative-status-codes.txt file for different statuses. + // FilteringStatus: The filtering status code as defined in + // creative-status-codes.txt. FilteringStatus int64 `json:"filteringStatus,omitempty"` // ForceSendFields is a list of field names (e.g. "FilteringCount") to @@ -1261,7 +1273,9 @@ type CreativeServingRestrictionsContexts struct { ContextType string `json:"contextType,omitempty"` // GeoCriteriaId: Only set when contextType=LOCATION. Represents the geo - // criterias this restriction applies to. + // criterias this restriction applies to. Impressions are considered to + // match a context if either the user location or publisher location + // matches a given geoCriteriaId. GeoCriteriaId []int64 `json:"geoCriteriaId,omitempty"` // Platform: Only set when contextType=PLATFORM. Represents the @@ -1432,19 +1446,24 @@ func (s *CreativesList) MarshalJSON() ([]byte, error) { } type DealServingMetadata struct { + // AlcoholAdsAllowed: True if alcohol ads are allowed for this deal + // (read-only). This field is only populated when querying for finalized + // orders using the method GetFinalizedOrderDeals + AlcoholAdsAllowed bool `json:"alcoholAdsAllowed,omitempty"` + // DealPauseStatus: Tracks which parties (if any) have paused a deal. // (readonly, except via PauseResumeOrderDeals action) DealPauseStatus *DealServingMetadataDealPauseStatus `json:"dealPauseStatus,omitempty"` - // ForceSendFields is a list of field names (e.g. "DealPauseStatus") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AlcoholAdsAllowed") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DealPauseStatus") to + // NullFields is a list of field names (e.g. "AlcoholAdsAllowed") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -3792,6 +3811,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3928,6 +3948,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4051,6 +4072,7 @@ func (c *AccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -4198,6 +4220,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -4347,6 +4370,7 @@ func (c *BillingInfoGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4484,6 +4508,7 @@ func (c *BillingInfoListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4610,6 +4635,7 @@ func (c *BudgetGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4752,6 +4778,7 @@ func (c *BudgetPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.budget) if err != nil { @@ -4899,6 +4926,7 @@ func (c *BudgetUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.budget) if err != nil { @@ -5044,6 +5072,7 @@ func (c *CreativesAddDealCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "creatives/{accountId}/{buyerCreativeId}/addDeal/{dealId}") @@ -5171,6 +5200,7 @@ func (c *CreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5306,6 +5336,7 @@ func (c *CreativesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -5504,6 +5535,7 @@ func (c *CreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5712,6 +5744,7 @@ func (c *CreativesListDealsCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5851,6 +5884,7 @@ func (c *CreativesRemoveDealCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "creatives/{accountId}/{buyerCreativeId}/removeDeal/{dealId}") @@ -5966,6 +6000,7 @@ func (c *MarketplacedealsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.deleteorderdealsrequest) if err != nil { @@ -6099,6 +6134,7 @@ func (c *MarketplacedealsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.addorderdealsrequest) if err != nil { @@ -6248,6 +6284,7 @@ func (c *MarketplacedealsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6382,6 +6419,7 @@ func (c *MarketplacedealsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.editallorderdealsrequest) if err != nil { @@ -6515,6 +6553,7 @@ func (c *MarketplacenotesInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.addordernotesrequest) if err != nil { @@ -6666,6 +6705,7 @@ func (c *MarketplacenotesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6799,6 +6839,7 @@ func (c *MarketplaceprivateauctionUpdateproposalCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateprivateauctionproposalrequest) if err != nil { @@ -6931,6 +6972,7 @@ func (c *PerformanceReportListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7084,6 +7126,7 @@ func (c *PretargetingConfigDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "pretargetingconfigs/{accountId}/{configId}") @@ -7202,6 +7245,7 @@ func (c *PretargetingConfigGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7340,6 +7384,7 @@ func (c *PretargetingConfigInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pretargetingconfig) if err != nil { @@ -7484,6 +7529,7 @@ func (c *PretargetingConfigListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7616,6 +7662,7 @@ func (c *PretargetingConfigPatchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pretargetingconfig) if err != nil { @@ -7761,6 +7808,7 @@ func (c *PretargetingConfigUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pretargetingconfig) if err != nil { @@ -7913,6 +7961,7 @@ func (c *ProductsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8055,6 +8104,7 @@ func (c *ProductsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8185,6 +8235,7 @@ func (c *ProposalsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8311,6 +8362,7 @@ func (c *ProposalsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createordersrequest) if err != nil { @@ -8435,6 +8487,7 @@ func (c *ProposalsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.proposal) if err != nil { @@ -8615,6 +8668,7 @@ func (c *ProposalsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8735,6 +8789,7 @@ func (c *ProposalsSetupcompleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "proposals/{proposalId}/setupcomplete") @@ -8836,6 +8891,7 @@ func (c *ProposalsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.proposal) if err != nil { @@ -9011,6 +9067,7 @@ func (c *PubprofilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-api.json b/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-api.json index e73cafdb4..488ebdec6 100644 --- a/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-api.json +++ b/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-api.json @@ -1,476 +1,58 @@ { + "version": "v2beta1", + "baseUrl": "https://adexchangebuyer.googleapis.com/", + "servicePath": "", + "description": "Accesses the latest features for managing Ad Exchange accounts, Real-Time Bidding configurations and auction metrics, and Marketplace programmatic deals.", + "kind": "discovery#restDescription", + "basePath": "", + "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest/guides/client-access/", + "revision": "20170222", "id": "adexchangebuyer2:v2beta1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/adexchange.buyer": { - "description": "Manage your Ad Exchange buyer account configuration" + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "ClientUserInvitation": { + "description": "An invitation for a new client user to get access to the Ad Exchange\nBuyer UI.\nAll fields are required unless otherwise specified.", + "type": "object", + "properties": { + "email": { + "description": "The email address to which the invitation is sent. Email\naddresses should be unique among all client users under each sponsor\nbuyer.", + "type": "string" + }, + "clientAccountId": { + "description": "Numerical account ID of the client buyer\nthat the invited user is associated with.\nThe value of this field is ignored in create operations.", + "format": "int64", + "type": "string" + }, + "invitationId": { + "description": "The unique numerical ID of the invitation that is sent to the user.\nThe value of this field is ignored in create operations.", + "format": "int64", + "type": "string" + } + }, + "id": "ClientUserInvitation" + }, + "AuctionContext": { + "id": "AuctionContext", + "description": "@OutputOnly The auction type the restriction applies to.", + "type": "object", + "properties": { + "auctionTypes": { + "description": "The auction types this restriction applies to.", + "type": "array", + "items": { + "enum": [ + "OPEN_AUCTION", + "DIRECT_DEALS" + ], + "type": "string" + }, + "enumDescriptions": [ + "The restriction applies to open auction.", + "The restriction applies to direct deals." + ] } } - } - }, - "description": "Accesses the latest features for managing Ad Exchange accounts, Real-Time Bidding configurations and auction metrics, and Marketplace programmatic deals.", - "protocol": "rest", - "title": "Ad Exchange Buyer API II", - "resources": { - "accounts": { - "resources": { - "clients": { - "resources": { - "users": { - "methods": { - "update": { - "id": "adexchangebuyer2.accounts.clients.users.update", - "response": { - "$ref": "ClientUser" - }, - "parameterOrder": [ - "accountId", - "clientAccountId", - "userId" - ], - "description": "Updates an existing client user.\nOnly the user status can be changed on update.", - "request": { - "$ref": "ClientUser" - }, - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", - "httpMethod": "PUT", - "parameters": { - "clientAccountId": { - "description": "Numerical account ID of the client buyer that the user to be retrieved\nis associated with. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "userId": { - "description": "Numerical identifier of the user to retrieve. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "accountId": { - "description": "Numerical account ID of the client's sponsor buyer. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - }, - "get": { - "id": "adexchangebuyer2.accounts.clients.users.get", - "response": { - "$ref": "ClientUser" - }, - "parameterOrder": [ - "accountId", - "clientAccountId", - "userId" - ], - "description": "Retrieves an existing client user.", - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", - "httpMethod": "GET", - "parameters": { - "clientAccountId": { - "description": "Numerical account ID of the client buyer\nthat the user to be retrieved is associated with. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "userId": { - "description": "Numerical identifier of the user to retrieve. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "accountId": { - "description": "Numerical account ID of the client's sponsor buyer. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - }, - "list": { - "id": "adexchangebuyer2.accounts.clients.users.list", - "response": { - "$ref": "ListClientUsersResponse" - }, - "parameterOrder": [ - "accountId", - "clientAccountId" - ], - "description": "Lists all the known client users for a specified\nsponsor buyer account ID.", - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", - "httpMethod": "GET", - "parameters": { - "clientAccountId": { - "description": "The account ID of the client buyer to list users for. (required)\nYou must specify either a string representation of a\nnumerical account identifier or the `-` character\nto list all the client users for all the clients\nof a given sponsor buyer.", - "required": true, - "location": "path", - "type": "string" - }, - "pageSize": { - "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "accountId": { - "description": "Numerical account ID of the sponsor buyer of the client to list users for.\n(required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "pageToken": { - "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUsersResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.users.list method.", - "location": "query", - "type": "string" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - } - } - }, - "invitations": { - "methods": { - "create": { - "id": "adexchangebuyer2.accounts.clients.invitations.create", - "response": { - "$ref": "ClientUserInvitation" - }, - "parameterOrder": [ - "accountId", - "clientAccountId" - ], - "description": "Creates and sends out an email invitation to access\nan Ad Exchange client buyer account.", - "request": { - "$ref": "ClientUserInvitation" - }, - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", - "httpMethod": "POST", - "parameters": { - "clientAccountId": { - "description": "Numerical account ID of the client buyer that the user\nshould be associated with. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "accountId": { - "description": "Numerical account ID of the client's sponsor buyer. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - }, - "get": { - "id": "adexchangebuyer2.accounts.clients.invitations.get", - "response": { - "$ref": "ClientUserInvitation" - }, - "parameterOrder": [ - "accountId", - "clientAccountId", - "invitationId" - ], - "description": "Retrieves an existing client user invitation.", - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}", - "httpMethod": "GET", - "parameters": { - "clientAccountId": { - "description": "Numerical account ID of the client buyer that the user invitation\nto be retrieved is associated with. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "invitationId": { - "description": "Numerical identifier of the user invitation to retrieve. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "accountId": { - "description": "Numerical account ID of the client's sponsor buyer. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - }, - "list": { - "id": "adexchangebuyer2.accounts.clients.invitations.list", - "response": { - "$ref": "ListClientUserInvitationsResponse" - }, - "parameterOrder": [ - "accountId", - "clientAccountId" - ], - "description": "Lists all the client users invitations for a client\nwith a given account ID.", - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", - "httpMethod": "GET", - "parameters": { - "clientAccountId": { - "description": "Numerical account ID of the client buyer to list invitations for.\n(required)\nYou must either specify a string representation of a\nnumerical account identifier or the `-` character\nto list all the invitations for all the clients\nof a given sponsor buyer.", - "required": true, - "location": "path", - "type": "string" - }, - "pageSize": { - "description": "Requested page size. Server may return fewer clients than requested.\nIf unspecified, server will pick an appropriate default.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "accountId": { - "description": "Numerical account ID of the client's sponsor buyer. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "pageToken": { - "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUserInvitationsResponse.nextPageToken\nreturned from the previous call to the\nclients.invitations.list\nmethod.", - "location": "query", - "type": "string" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - } - } - } - }, - "methods": { - "update": { - "id": "adexchangebuyer2.accounts.clients.update", - "response": { - "$ref": "Client" - }, - "parameterOrder": [ - "accountId", - "clientAccountId" - ], - "description": "Updates an existing client buyer.", - "request": { - "$ref": "Client" - }, - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", - "httpMethod": "PUT", - "parameters": { - "clientAccountId": { - "description": "Unique numerical account ID of the client to update. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "accountId": { - "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to update a client for. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - }, - "get": { - "id": "adexchangebuyer2.accounts.clients.get", - "response": { - "$ref": "Client" - }, - "parameterOrder": [ - "accountId", - "clientAccountId" - ], - "description": "Gets a client buyer with a given client account ID.", - "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", - "httpMethod": "GET", - "parameters": { - "clientAccountId": { - "description": "Numerical account ID of the client buyer to retrieve. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "accountId": { - "description": "Numerical account ID of the client's sponsor buyer. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - } - }, - "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - }, - "create": { - "id": "adexchangebuyer2.accounts.clients.create", - "response": { - "$ref": "Client" - }, - "parameterOrder": [ - "accountId" - ], - "description": "Creates a new client buyer.", - "request": { - "$ref": "Client" - }, - "flatPath": "v2beta1/accounts/{accountId}/clients", - "httpMethod": "POST", - "parameters": { - "accountId": { - "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to create a client for. (required)", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - } - }, - "path": "v2beta1/accounts/{accountId}/clients", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - }, - "list": { - "id": "adexchangebuyer2.accounts.clients.list", - "response": { - "$ref": "ListClientsResponse" - }, - "parameterOrder": [ - "accountId" - ], - "description": "Lists all the clients for the current sponsor buyer.", - "flatPath": "v2beta1/accounts/{accountId}/clients", - "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "accountId": { - "description": "Unique numerical account ID of the sponsor buyer to list the clients for.", - "required": true, - "location": "path", - "type": "string", - "format": "int64" - }, - "pageToken": { - "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientsResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.list method.", - "location": "query", - "type": "string" - } - }, - "path": "v2beta1/accounts/{accountId}/clients", - "scopes": [ - "https://www.googleapis.com/auth/adexchange.buyer" - ] - } - } - } - } - } - }, - "schemas": { - "ClientUser": { - "description": "A client user is created under a client buyer and has restricted access to\nthe Ad Exchange Marketplace and certain other sections\nof the Ad Exchange Buyer UI based on the role\ngranted to the associated client buyer.\n\nThe only way a new client user can be created is via accepting an\nemail invitation\n(see the\naccounts.clients.invitations.create\nmethod).\n\nAll fields are required unless otherwise specified.", - "type": "object", - "properties": { - "email": { - "description": "User's email address. The value of this field\nis ignored in an update operation.", - "type": "string" - }, - "clientAccountId": { - "description": "Numerical account ID of the client buyer\nwith which the user is associated; the\nbuyer must be a client of the current sponsor buyer.\nThe value of this field is ignored in an update operation.", - "type": "string", - "format": "int64" - }, - "status": { - "description": "The status of the client user.", - "enum": [ - "USER_STATUS_UNSPECIFIED", - "PENDING", - "ACTIVE", - "DISABLED" - ], - "enumDescriptions": [ - "A placeholder for an undefined user status.", - "A user who was already created but hasn't accepted the invitation yet.", - "A user that is currently active.", - "A user that is currently disabled." - ], - "type": "string" - }, - "userId": { - "description": "The unique numerical ID of the client user\nthat has accepted an invitation.\nThe value of this field is ignored in an update operation.", - "type": "string", - "format": "int64" - } - }, - "id": "ClientUser" - }, - "ClientUserInvitation": { - "description": "An invitation for a new client user to get access to the Ad Exchange\nBuyer UI.\nAll fields are required unless otherwise specified.", - "type": "object", - "properties": { - "email": { - "description": "The email address to which the invitation is sent. Email\naddresses should be unique among all client users under each sponsor\nbuyer.", - "type": "string" - }, - "clientAccountId": { - "description": "Numerical account ID of the client buyer\nthat the invited user is associated with.\nThe value of this field is ignored in create operations.", - "type": "string", - "format": "int64" - }, - "invitationId": { - "description": "The unique numerical ID of the invitation that is sent to the user.\nThe value of this field is ignored in create operations.", - "type": "string", - "format": "int64" - } - }, - "id": "ClientUserInvitation" }, "ListClientUserInvitationsResponse": { "type": "object", @@ -480,11 +62,11 @@ "type": "string" }, "invitations": { - "description": "The returned list of client users.", "type": "array", "items": { "$ref": "ClientUserInvitation" - } + }, + "description": "The returned list of client users." } }, "id": "ListClientUserInvitationsResponse" @@ -492,138 +74,1874 @@ "ListClientUsersResponse": { "type": "object", "properties": { + "nextPageToken": { + "description": "A token to retrieve the next page of results.\nPass this value in the\nListClientUsersRequest.pageToken\nfield in the subsequent call to the\nclients.invitations.list\nmethod to retrieve the next\npage of results.", + "type": "string" + }, "users": { "description": "The returned list of client users.", "type": "array", "items": { "$ref": "ClientUser" } - }, - "nextPageToken": { - "description": "A token to retrieve the next page of results.\nPass this value in the\nListClientUsersRequest.pageToken\nfield in the subsequent call to the\nclients.invitations.list\nmethod to retrieve the next\npage of results.", - "type": "string" } }, "id": "ListClientUsersResponse" }, - "Client": { - "description": "A client resource represents a client buyer—an agency,\na brand, or an advertiser customer of the sponsor buyer.\nUsers associated with the client buyer have restricted access to\nthe Ad Exchange Marketplace and certain other sections\nof the Ad Exchange Buyer UI based on the role\ngranted to the client buyer.\nAll fields are required unless otherwise specified.", + "LocationContext": { + "description": "@OutputOnly The Geo criteria the restriction applies to.", "type": "object", "properties": { - "visibleToSeller": { - "description": "Whether the client buyer will be visible to sellers.", - "type": "boolean" - }, - "status": { - "description": "The status of the client buyer.", - "enum": [ - "CLIENT_STATUS_UNSPECIFIED", - "DISABLED", - "ACTIVE" + "geoCriteriaIds": { + "description": "IDs representing the geo location for this context.\nPlease refer to the\n[geo-table.csv](https://storage.googleapis.com/adx-rtb-dictionaries/geo-table.csv)\nfile for different geo criteria IDs.", + "type": "array", + "items": { + "format": "int32", + "type": "integer" + } + } + }, + "id": "LocationContext" + }, + "PlatformContext": { + "type": "object", + "properties": { + "platforms": { + "enumDescriptions": [ + "Desktop platform.", + "Android platform.", + "iOS platform." ], + "description": "The platforms this restriction applies to.", + "type": "array", + "items": { + "enum": [ + "DESKTOP", + "ANDROID", + "IOS" + ], + "type": "string" + } + } + }, + "id": "PlatformContext", + "description": "@OutputOnly The type of platform the restriction applies to." + }, + "ClientUser": { + "description": "A client user is created under a client buyer and has restricted access to\nthe Ad Exchange Marketplace and certain other sections\nof the Ad Exchange Buyer UI based on the role\ngranted to the associated client buyer.\n\nThe only way a new client user can be created is via accepting an\nemail invitation\n(see the\naccounts.clients.invitations.create\nmethod).\n\nAll fields are required unless otherwise specified.", + "type": "object", + "properties": { + "status": { + "type": "string", "enumDescriptions": [ - "A placeholder for an undefined client status.", - "A client that is currently disabled.", - "A client that is currently active." + "A placeholder for an undefined user status.", + "A user who was already created but hasn't accepted the invitation yet.", + "A user that is currently active.", + "A user that is currently disabled." + ], + "enum": [ + "USER_STATUS_UNSPECIFIED", + "PENDING", + "ACTIVE", + "DISABLED" ], + "description": "The status of the client user." + }, + "userId": { + "description": "The unique numerical ID of the client user\nthat has accepted an invitation.\nThe value of this field is ignored in an update operation.", + "format": "int64", "type": "string" }, - "entityType": { - "description": "The type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.", + "email": { + "description": "User's email address. The value of this field\nis ignored in an update operation.", + "type": "string" + }, + "clientAccountId": { + "type": "string", + "description": "Numerical account ID of the client buyer\nwith which the user is associated; the\nbuyer must be a client of the current sponsor buyer.\nThe value of this field is ignored in an update operation.", + "format": "int64" + } + }, + "id": "ClientUser" + }, + "CreativeDealAssociation": { + "id": "CreativeDealAssociation", + "description": "The association between a creative and a deal.", + "type": "object", + "properties": { + "creativeId": { + "description": "The ID of the creative associated with the deal.", + "type": "string" + }, + "dealsId": { + "description": "The externalDealId for the deal associated with the creative.", + "type": "string" + }, + "accountId": { + "description": "The account the creative belongs to.", + "type": "string" + } + } + }, + "FilteringStats": { + "type": "object", + "properties": { + "reasons": { + "description": "The set of filtering reasons for this date.", + "type": "array", + "items": { + "$ref": "Reason" + } + }, + "date": { + "$ref": "Date", + "description": "The day during which the data was collected.\nThe data is collected from 00:00:00 to 23:59:59 PT.\nDuring switches from PST to PDT and back, the day may\ncontain 23 or 25 hours of data instead of the usual 24." + } + }, + "id": "FilteringStats", + "description": "@OutputOnly Filtering reasons for this creative during a period of a single\nday (from midnight to midnight Pacific)." + }, + "Creative": { + "id": "Creative", + "description": "A creative and its classification data.", + "type": "object", + "properties": { + "impressionTrackingUrls": { + "description": "The set of URLs to be called to record an impression.", + "type": "array", + "items": { + "type": "string" + } + }, + "html": { + "$ref": "HtmlContent", + "description": "An HTML creative." + }, + "detectedProductCategories": { + "description": "@OutputOnly Detected product categories, if any.\nSee the ad-product-categories.txt file in the technical documentation\nfor a list of IDs.", + "type": "array", + "items": { + "format": "int32", + "type": "integer" + } + }, + "dealsStatus": { + "enumDescriptions": [ + "The status is unknown.", + "The creative has not been checked.", + "The creative has been conditionally approved.\nSee serving_restrictions for details.", + "The creative has been approved.", + "The creative has been disapproved." + ], "enum": [ - "ENTITY_TYPE_UNSPECIFIED", - "ADVERTISER", - "BRAND", - "AGENCY" + "STATUS_UNSPECIFIED", + "NOT_CHECKED", + "CONDITIONALLY_APPROVED", + "APPROVED", + "DISAPPROVED" + ], + "description": "@OutputOnly The top-level deals status of this creative.\nIf disapproved, an entry for 'auctionType=DIRECT_DEALS' (or 'ALL') in\nserving_restrictions will also exist. Note\nthat this may be nuanced with other contextual restrictions, in which case,\nit may be preferable to read from serving_restrictions directly.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "type": "string" + }, + "openAuctionStatus": { + "enum": [ + "STATUS_UNSPECIFIED", + "NOT_CHECKED", + "CONDITIONALLY_APPROVED", + "APPROVED", + "DISAPPROVED" + ], + "description": "@OutputOnly The top-level open auction status of this creative.\nIf disapproved, an entry for 'auctionType = OPEN_AUCTION' (or 'ALL') in\nserving_restrictions will also exist. Note\nthat this may be nuanced with other contextual restrictions, in which case,\nit may be preferable to read from serving_restrictions directly.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "type": "string", + "enumDescriptions": [ + "The status is unknown.", + "The creative has not been checked.", + "The creative has been conditionally approved.\nSee serving_restrictions for details.", + "The creative has been approved.", + "The creative has been disapproved." + ] + }, + "advertiserName": { + "description": "The name of the company being advertised in the creative.", + "type": "string" + }, + "detectedAdvertiserIds": { + "description": "@OutputOnly Detected advertiser IDs, if any.", + "type": "array", + "items": { + "type": "string", + "format": "int64" + } + }, + "detectedDomains": { + "type": "array", + "items": { + "type": "string" + }, + "description": "@OutputOnly\nThe detected domains for this creative." + }, + "filteringStats": { + "$ref": "FilteringStats", + "description": "@OutputOnly The filtering stats for this creative." + }, + "attributes": { + "enumDescriptions": [ + "Do not use. This is a placeholder value only.", + "The creative is tagged.", + "The creative is cookie targeted.", + "The creative is user interest targeted.", + "The creative does not expand.", + "The creative expands up.", + "The creative expands down.", + "The creative expands left.", + "The creative expands right.", + "The creative expands up and left.", + "The creative expands up and right.", + "The creative expands down and left.", + "The creative expands down and right.", + "The creative expands up or down.", + "The creative expands left or right.", + "The creative expands on any diagonal.", + "The creative expands when rolled over.", + "The instream vast video type is vpaid flash.", + "The creative is MRAID", + "The creative is SSL.", + "The creative is an interstitial.", + "The creative is eligible for native.", + "The creative is not eligible for native.", + "The creative can dynamically resize to fill a variety of slot sizes." + ], + "description": "All attributes for the ads that may be shown from this creative.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "type": "array", + "items": { + "type": "string", + "enum": [ + "ATTRIBUTE_UNSPECIFIED", + "IS_TAGGED", + "IS_COOKIE_TARGETED", + "IS_USER_INTEREST_TARGETED", + "EXPANDING_DIRECTION_NONE", + "EXPANDING_DIRECTION_UP", + "EXPANDING_DIRECTION_DOWN", + "EXPANDING_DIRECTION_LEFT", + "EXPANDING_DIRECTION_RIGHT", + "EXPANDING_DIRECTION_UP_LEFT", + "EXPANDING_DIRECTION_UP_RIGHT", + "EXPANDING_DIRECTION_DOWN_LEFT", + "EXPANDING_DIRECTION_DOWN_RIGHT", + "EXPANDING_DIRECTION_UP_OR_DOWN", + "EXPANDING_DIRECTION_LEFT_OR_RIGHT", + "EXPANDING_DIRECTION_ANY_DIAGONAL", + "EXPANDING_ACTION_ROLLOVER_TO_EXPAND", + "INSTREAM_VAST_VIDEO_TYPE_VPAID_FLASH", + "RICH_MEDIA_CAPABILITY_TYPE_MRAID", + "RICH_MEDIA_CAPABILITY_TYPE_SSL", + "RICH_MEDIA_CAPABILITY_TYPE_INTERSTITIAL", + "NATIVE_ELIGIBILITY_ELIGIBLE", + "NATIVE_ELIGIBILITY_NOT_ELIGIBLE", + "RENDERING_SIZELESS_ADX" + ] + } + }, + "apiUpdateTime": { + "type": "string", + "description": "@OutputOnly The last update timestamp of the creative via API.", + "format": "google-datetime" + }, + "detectedLanguages": { + "description": "@OutputOnly\nThe detected languages for this creative. The order is arbitrary. The codes\nare 2 or 5 characters and are documented at\nhttps://developers.google.com/adwords/api/docs/appendix/languagecodes.", + "type": "array", + "items": { + "type": "string" + } + }, + "creativeId": { + "description": "The buyer-defined creative ID of this creative.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "type": "string" + }, + "accountId": { + "description": "The account that this creative belongs to.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "type": "string" + }, + "native": { + "$ref": "NativeContent", + "description": "A native creative." + }, + "video": { + "$ref": "VideoContent", + "description": "A video creative." + }, + "servingRestrictions": { + "type": "array", + "items": { + "$ref": "ServingRestriction" + }, + "description": "@OutputOnly The granular status of this ad in specific contexts.\nA context here relates to where something ultimately serves (for example,\na physical location, a platform, an HTTPS vs HTTP request, or the type\nof auction)." + }, + "agencyId": { + "description": "The agency ID for this creative.", + "format": "int64", + "type": "string" + }, + "clickThroughUrls": { + "description": "The set of destination URLs for the creative.", + "type": "array", + "items": { + "type": "string" + } + }, + "adChoicesDestinationUrl": { + "description": "The link to AdChoices destination page.", + "type": "string" + }, + "detectedSensitiveCategories": { + "description": "@OutputOnly Detected sensitive categories, if any.\nSee the ad-sensitive-categories.txt file in the technical documentation for\na list of IDs. You should use these IDs along with the\nexcluded-sensitive-category field in the bid request to filter your bids.", + "type": "array", + "items": { + "type": "integer", + "format": "int32" + } + }, + "restrictedCategories": { + "enumDescriptions": [ + "The ad has no restricted categories", + "The alcohol restricted category." ], + "description": "All restricted categories for the ads that may be shown from this creative.", + "type": "array", + "items": { + "enum": [ + "NO_RESTRICTED_CATEGORIES", + "ALCOHOL" + ], + "type": "string" + } + }, + "corrections": { + "description": "@OutputOnly Shows any corrections that were applied to this creative.", + "type": "array", + "items": { + "$ref": "Correction" + } + }, + "version": { + "description": "@OutputOnly The version of this creative.", + "format": "int32", + "type": "integer" + }, + "vendorIds": { + "description": "All vendor IDs for the ads that may be shown from this creative.\nSee https://storage.googleapis.com/adx-rtb-dictionaries/vendors.txt\nfor possible values.", + "type": "array", + "items": { + "type": "integer", + "format": "int32" + } + } + } + }, + "RemoveDealAssociationRequest": { + "description": "A request for removing the association between a deal and a creative.", + "type": "object", + "properties": { + "association": { + "$ref": "CreativeDealAssociation", + "description": "The association between a creative and a deal that should be removed." + } + }, + "id": "RemoveDealAssociationRequest" + }, + "Client": { + "description": "A client resource represents a client buyer—an agency,\na brand, or an advertiser customer of the sponsor buyer.\nUsers associated with the client buyer have restricted access to\nthe Ad Exchange Marketplace and certain other sections\nof the Ad Exchange Buyer UI based on the role\ngranted to the client buyer.\nAll fields are required unless otherwise specified.", + "type": "object", + "properties": { + "entityType": { + "description": "The type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`.", + "type": "string", "enumDescriptions": [ "A placeholder for an undefined client entity type. Should not be used.", "An advertiser.", "A brand.", "An advertising agency." ], + "enum": [ + "ENTITY_TYPE_UNSPECIFIED", + "ADVERTISER", + "BRAND", + "AGENCY" + ] + }, + "clientName": { + "description": "Name used to represent this client to publishers.\nYou may have multiple clients that map to the same entity,\nbut for each client the combination of `clientName` and entity\nmust be unique.\nYou can specify this field as empty.", "type": "string" }, "role": { - "description": "The role which is assigned to the client buyer. Each role implies a set of\npermissions granted to the client. Must be one of `CLIENT_DEAL_VIEWER`,\n`CLIENT_DEAL_NEGOTIATOR` or `CLIENT_DEAL_APPROVER`.", - "enum": [ - "CLIENT_ROLE_UNSPECIFIED", - "CLIENT_DEAL_VIEWER", - "CLIENT_DEAL_NEGOTIATOR", - "CLIENT_DEAL_APPROVER" - ], + "type": "string", "enumDescriptions": [ "A placeholder for an undefined client role.", "Users associated with this client can see publisher deal offers\nin the Marketplace.\nThey can neither negotiate proposals nor approve deals.\nIf this client is visible to publishers, they can send deal proposals\nto this client.", "Users associated with this client can respond to deal proposals\nsent to them by publishers. They can also initiate deal proposals\nof their own.", "Users associated with this client can approve eligible deals\non your behalf. Some deals may still explicitly require publisher\nfinalization. If this role is not selected, the sponsor buyer\nwill need to manually approve each of their deals." ], - "type": "string" + "enum": [ + "CLIENT_ROLE_UNSPECIFIED", + "CLIENT_DEAL_VIEWER", + "CLIENT_DEAL_NEGOTIATOR", + "CLIENT_DEAL_APPROVER" + ], + "description": "The role which is assigned to the client buyer. Each role implies a set of\npermissions granted to the client. Must be one of `CLIENT_DEAL_VIEWER`,\n`CLIENT_DEAL_NEGOTIATOR` or `CLIENT_DEAL_APPROVER`." }, - "clientName": { - "description": "Name used to represent this client to publishers.\nYou may have multiple clients that map to the same entity,\nbut for each client the combination of `clientName` and entity\nmust be unique.\nYou can specify this field as empty.", + "visibleToSeller": { + "description": "Whether the client buyer will be visible to sellers.", + "type": "boolean" + }, + "entityId": { + "description": "Numerical identifier of the client entity.\nThe entity can be an advertiser, a brand, or an agency.\nThis identifier is unique among all the entities with the same type.\n\nA list of all known advertisers with their identifiers is available in the\n[advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt)\nfile.\n\nA list of all known brands with their identifiers is available in the\n[brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt)\nfile.\n\nA list of all known agencies with their identifiers is available in the\n[agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt)\nfile.", + "format": "int64", "type": "string" }, "clientAccountId": { "description": "The globally-unique numerical ID of the client.\nThe value of this field is ignored in create and update operations.", - "type": "string", - "format": "int64" - }, - "entityId": { - "description": "Numerical identifier of the client entity.\nThe entity can be an advertiser, a brand, or an agency.\nThis identifier is unique among all the entities with the same type.\n\nA list of all known advertisers with their identifiers is available in the\n[advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt)\nfile.\n\nA list of all known brands with their identifiers is available in the\n[brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt)\nfile.\n\nA list of all known agencies with their identifiers is available in the\n[agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt)\nfile.", - "type": "string", - "format": "int64" + "format": "int64", + "type": "string" }, "entityName": { "description": "The name of the entity. This field is automatically fetched based on\nthe type and ID.\nThe value of this field is ignored in create and update operations.", "type": "string" + }, + "status": { + "description": "The status of the client buyer.", + "type": "string", + "enumDescriptions": [ + "A placeholder for an undefined client status.", + "A client that is currently disabled.", + "A client that is currently active." + ], + "enum": [ + "CLIENT_STATUS_UNSPECIFIED", + "DISABLED", + "ACTIVE" + ] } }, "id": "Client" }, - "ListClientsResponse": { + "Correction": { + "description": "@OutputOnly Shows any corrections that were applied to this creative.", "type": "object", "properties": { - "nextPageToken": { - "description": "A token to retrieve the next page of results.\nPass this value in the\nListClientsRequest.pageToken\nfield in the subsequent call to the\naccounts.clients.list method\nto retrieve the next page of results.", + "type": { + "enumDescriptions": [ + "The correction type is unknown. Refer to the details for more information.", + "The ad's declared vendors did not match the vendors that were detected.\nThe detected vendors were added.", + "The ad had the SSL attribute declared but was not SSL-compliant.\nThe SSL attribute was removed.", + "The ad was declared as Flash-free but contained Flash, so the Flash-free\nattribute was removed.", + "The ad was not declared as Flash-free but it did not reference any flash\ncontent, so the Flash-free attribute was added.", + "The ad did not declare a required creative attribute.\nThe attribute was added.", + "The ad did not declare a required technology vendor.\nThe technology vendor was added.", + "The ad did not declare the SSL attribute but was SSL-compliant, so the\nSSL attribute was added.", + "Properties consistent with In-banner video were found, so an\nIn-Banner Video attribute was added.", + "The ad makes calls to the MRAID API so the MRAID attribute was added.", + "The ad unnecessarily declared the Flash attribute, so the Flash attribute\nwas removed.", + "The ad contains video content." + ], + "enum": [ + "CORRECTION_TYPE_UNSPECIFIED", + "VENDOR_IDS_ADDED", + "SSL_ATTRIBUTE_REMOVED", + "FLASH_FREE_ATTRIBUTE_REMOVED", + "FLASH_FREE_ATTRIBUTE_ADDED", + "REQUIRED_ATTRIBUTE_ADDED", + "REQUIRED_VENDOR_ADDED", + "SSL_ATTRIBUTE_ADDED", + "IN_BANNER_VIDEO_ATTRIBUTE_ADDED", + "MRAID_ATTRIBUTE_ADDED", + "FLASH_ATTRIBUTE_REMOVED", + "VIDEO_IN_SNIPPET_ATTRIBUTE_ADDED" + ], + "description": "The type of correction that was applied to the creative.", "type": "string" }, - "clients": { - "description": "The returned list of clients.", + "contexts": { + "description": "The contexts for the correction.", "type": "array", "items": { - "$ref": "Client" + "$ref": "ServingContext" + } + }, + "details": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Additional details about what was corrected." + } + }, + "id": "Correction" + }, + "AddDealAssociationRequest": { + "properties": { + "association": { + "$ref": "CreativeDealAssociation", + "description": "The association between a creative and a deal that should be added." + } + }, + "id": "AddDealAssociationRequest", + "description": "A request for associating a deal and a creative.", + "type": "object" + }, + "ListDealAssociationsResponse": { + "description": "A response for listing creative and deal associations", + "type": "object", + "properties": { + "associations": { + "description": "The list of associations.", + "type": "array", + "items": { + "$ref": "CreativeDealAssociation" + } + }, + "nextPageToken": { + "type": "string", + "description": "A token to retrieve the next page of results.\nPass this value in the\nListDealAssociationsRequest.page_token\nfield in the subsequent call to 'ListDealAssociation' method to retrieve\nthe next page of results." + } + }, + "id": "ListDealAssociationsResponse" + }, + "Disapproval": { + "description": "@OutputOnly The reason and details for a disapproval.", + "type": "object", + "properties": { + "details": { + "description": "Additional details about the reason for disapproval.", + "type": "array", + "items": { + "type": "string" + } + }, + "reason": { + "type": "string", + "enumDescriptions": [ + "The length of the image animation is longer than allowed.", + "The click through URL doesn't work properly.", + "Something is wrong with the creative itself.", + "The ad makes a fourth party call to an unapproved vendor.", + "The ad targets consumers using remarketing lists and/or collects\ndata for subsequent use in retargeting, but does not correctly declare\nthat use.", + "Clicking on the ad leads to an error page.", + "The ad size when rendered does not match the declaration.", + "Ads with a white background require a border, which was missing.", + "The creative attempts to set cookies from a fourth party that is not\ncertified.", + "The creative sets an LSO object.", + "The ad serves a blank.", + "The ad uses rotation, but not all destination URLs were declared.", + "There is a problem with the way the click macro is used.", + "The ad technology declaration is not accurate.", + "The actual destination URL does not match the declared destination URL.", + "The declared expanding direction does not match the actual direction.", + "The ad does not expand in a supported direction.", + "The ad uses an expandable vendor that is not supported.", + "There was an issue with the expandable ad.", + "The ad uses a video vendor that is not supported.", + "The length of the video ad is not supported.", + "The format of the video ad is not supported.", + "There was an issue with the video ad.", + "The landing page does not conform to Ad Exchange policy.", + "The ad or the landing page may contain malware.", + "The ad contains adult images or video content.", + "The ad contains text that is unclear or inaccurate.", + "The ad promotes counterfeit designer goods.", + "The ad causes a popup window to appear.", + "The creative does not follow policies set for the RTB protocol.", + "The ad contains a URL that uses a numeric IP address for the domain.", + "The ad or landing page contains unacceptable content because it initiated\na software or executable download.", + "The ad set an unauthorized cookie on a Google domain.", + "Flash content found when no flash was declared.", + "SSL support declared but not working correctly.", + "Rich Media - Direct Download in Ad (ex. PDF download).", + "Maximum download size exceeded.", + "Bad Destination URL: Site Not Crawlable.", + "Bad URL: Legal disapproval.", + "Pharmaceuticals, Gambling, Alcohol not allowed and at least one was\ndetected.", + "Dynamic DNS at Destination URL.", + "Poor Image / Video Quality.", + "For example, Image Trick to Click.", + "Incorrect Image Layout.", + "Irrelevant Image / Video.", + "Broken back button.", + "Misleading/Inaccurate claims in ads.", + "Restricted Products.", + "Unacceptable content. For example, malware.", + "The ad automatically redirects to the destination site without a click,\nor reports a click when none were made.", + "The ad uses URL protocols that do not exist or are not allowed on AdX.", + "Restricted content (for example, alcohol) was found in the ad but not\ndeclared.", + "Violation of the remarketing list policy.", + "The destination site's robot.txt file prevents it from being crawled.", + "Click to download must link to an app.", + "A review extension must be an accurate review.", + "Sexually explicit content.", + "The ad tries to gain an unfair traffic advantage.", + "The ad tries to circumvent Google's advertising systems.", + "The ad promotes dangerous knives.", + "The ad promotes explosives.", + "The ad promotes guns & parts.", + "The ad promotes recreational drugs/services & related equipment.", + "The ad promotes tobacco products/services & related equipment.", + "The ad promotes weapons.", + "The ad is unclear or irrelevant to the destination site.", + "The ad does not meet professional standards.", + "The promotion is unnecessarily difficult to navigate.", + "Violation of Google's policy for interest-based ads.", + "Misuse of personal information.", + "Omission of relevant information.", + "Unavailable promotions.", + "Misleading or unrealistic promotions.", + "Offensive or inappropriate content.", + "Capitalizing on sensitive events.", + "Shocking content.", + "Products & Services that enable dishonest behavior.", + "The ad does not meet technical requirements.", + "Restricted political content.", + "Unsupported content.", + "Invalid bidding method.", + "Video length exceeds limits.", + "Unacceptable content: Japanese healthcare.", + "Online pharmacy ID required.", + "Unacceptable content: Abortion.", + "Unacceptable content: Birth control.", + "Restricted in China.", + "Unacceptable content: Korean healthcare.", + "Non-family safe or adult content.", + "Clinical trial recruitment.", + "Maximum number of HTTP calls exceeded.", + "Maximum number of cookies exceeded.", + "Financial service ad does not adhere to specifications.", + "Flash content was found in an unsupported context." + ], + "enum": [ + "LENGTH_OF_IMAGE_ANIMATION", + "BROKEN_URL", + "MEDIA_NOT_FUNCTIONAL", + "INVALID_FOURTH_PARTY_CALL", + "INCORRECT_REMARKETING_DECLARATION", + "LANDING_PAGE_ERROR", + "AD_SIZE_DOES_NOT_MATCH_AD_SLOT", + "NO_BORDER", + "FOURTH_PARTY_BROWSER_COOKIES", + "LSO_OBJECTS", + "BLANK_CREATIVE", + "DESTINATION_URLS_UNDECLARED", + "PROBLEM_WITH_CLICK_MACRO", + "INCORRECT_AD_TECHNOLOGY_DECLARATION", + "INCORRECT_DESTINATION_URL_DECLARATION", + "EXPANDABLE_INCORRECT_DIRECTION", + "EXPANDABLE_DIRECTION_NOT_SUPPORTED", + "EXPANDABLE_INVALID_VENDOR", + "EXPANDABLE_FUNCTIONALITY", + "VIDEO_INVALID_VENDOR", + "VIDEO_UNSUPPORTED_LENGTH", + "VIDEO_UNSUPPORTED_FORMAT", + "VIDEO_FUNCTIONALITY", + "LANDING_PAGE_DISABLED", + "MALWARE_SUSPECTED", + "ADULT_IMAGE_OR_VIDEO", + "INACCURATE_AD_TEXT", + "COUNTERFEIT_DESIGNER_GOODS", + "POP_UP", + "INVALID_RTB_PROTOCOL_USAGE", + "RAW_IP_ADDRESS_IN_SNIPPET", + "UNACCEPTABLE_CONTENT_SOFTWARE", + "UNAUTHORIZED_COOKIE_ON_GOOGLE_DOMAIN", + "UNDECLARED_FLASH_OBJECTS", + "INVALID_SSL_DECLARATION", + "DIRECT_DOWNLOAD_IN_AD", + "MAXIMUM_DOWNLOAD_SIZE_EXCEEDED", + "DESTINATION_URL_SITE_NOT_CRAWLABLE", + "BAD_URL_LEGAL_DISAPPROVAL", + "PHARMA_GAMBLING_ALCOHOL_NOT_ALLOWED", + "DYNAMIC_DNS_AT_DESTINATION_URL", + "POOR_IMAGE_OR_VIDEO_QUALITY", + "UNACCEPTABLE_IMAGE_CONTENT", + "INCORRECT_IMAGE_LAYOUT", + "IRRELEVANT_IMAGE_OR_VIDEO", + "DESTINATION_SITE_DOES_NOT_ALLOW_GOING_BACK", + "MISLEADING_CLAIMS_IN_AD", + "RESTRICTED_PRODUCTS", + "UNACCEPTABLE_CONTENT", + "AUTOMATED_AD_CLICKING", + "INVALID_URL_PROTOCOL", + "UNDECLARED_RESTRICTED_CONTENT", + "INVALID_REMARKETING_LIST_USAGE", + "DESTINATION_SITE_NOT_CRAWLABLE_ROBOTS_TXT", + "CLICK_TO_DOWNLOAD_NOT_AN_APP", + "INACCURATE_REVIEW_EXTENSION", + "SEXUALLY_EXPLICIT_CONTENT", + "GAINING_AN_UNFAIR_ADVANTAGE", + "GAMING_THE_GOOGLE_NETWORK", + "DANGEROUS_PRODUCTS_KNIVES", + "DANGEROUS_PRODUCTS_EXPLOSIVES", + "DANGEROUS_PRODUCTS_GUNS", + "DANGEROUS_PRODUCTS_DRUGS", + "DANGEROUS_PRODUCTS_TOBACCO", + "DANGEROUS_PRODUCTS_WEAPONS", + "UNCLEAR_OR_IRRELEVANT_AD", + "PROFESSIONAL_STANDARDS", + "DYSFUNCTIONAL_PROMOTION", + "INVALID_INTEREST_BASED_AD", + "MISUSE_OF_PERSONAL_INFORMATION", + "OMISSION_OF_RELEVANT_INFORMATION", + "UNAVAILABLE_PROMOTIONS", + "MISLEADING_PROMOTIONS", + "INAPPROPRIATE_CONTENT", + "SENSITIVE_EVENTS", + "SHOCKING_CONTENT", + "ENABLING_DISHONEST_BEHAVIOR", + "TECHNICAL_REQUIREMENTS", + "RESTRICTED_POLITICAL_CONTENT", + "UNSUPPORTED_CONTENT", + "INVALID_BIDDING_METHOD", + "VIDEO_TOO_LONG", + "VIOLATES_JAPANESE_PHARMACY_LAW", + "UNACCREDITED_PET_PHARMACY", + "ABORTION", + "CONTRACEPTIVES", + "NEED_CERTIFICATES_TO_ADVERTISE_IN_CHINA", + "KCDSP_REGISTRATION", + "NOT_FAMILY_SAFE", + "CLINICAL_TRIAL_RECRUITMENT", + "MAXIMUM_NUMBER_OF_HTTP_CALLS_EXCEEDED", + "MAXIMUM_NUMBER_OF_COOKIES_EXCEEDED", + "PERSONAL_LOANS", + "UNSUPPORTED_FLASH_CONTENT" + ], + "description": "The categorized reason for disapproval." + } + }, + "id": "Disapproval" + }, + "StopWatchingCreativeRequest": { + "description": "A request for stopping notifications for changes to creative Status.", + "type": "object", + "properties": {}, + "id": "StopWatchingCreativeRequest" + }, + "ServingRestriction": { + "description": "@OutputOnly A representation of the status of an ad in a\nspecific context. A context here relates to where something ultimately serves\n(for example, a user or publisher geo, a platform, an HTTPS vs HTTP request,\nor the type of auction).", + "type": "object", + "properties": { + "disapprovalReasons": { + "description": "Any disapprovals bound to this restriction.\nOnly present if status=DISAPPROVED.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "type": "array", + "items": { + "$ref": "Disapproval" + } + }, + "contexts": { + "type": "array", + "items": { + "$ref": "ServingContext" + }, + "description": "The contexts for the restriction." + }, + "status": { + "type": "string", + "enumDescriptions": [ + "The status is not known.", + "The ad was disapproved in this context.", + "The ad is pending review in this context." + ], + "enum": [ + "STATUS_UNSPECIFIED", + "DISAPPROVAL", + "PENDING_REVIEW" + ], + "description": "The status of the creative in this context (for example, it has been\nexplicitly disapproved or is pending review)." + } + }, + "id": "ServingRestriction" + }, + "Date": { + "id": "Date", + "description": "Represents a whole calendar date, e.g. date of birth. The time of day and\ntime zone are either specified elsewhere or are not significant. The date\nis relative to the Proleptic Gregorian Calendar. The day may be 0 to\nrepresent a year and month where the day is not significant, e.g. credit card\nexpiration date. The year may be 0 to represent a month and day independent\nof year, e.g. anniversary date. Related types are google.type.TimeOfDay\nand `google.protobuf.Timestamp`.", + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of date. Must be from 1 to 9999, or 0 if specifying a date without\na year.", + "format": "int32" + }, + "day": { + "description": "Day of month. Must be from 1 to 31 and valid for the year and month, or 0\nif specifying a year/month where the day is not significant.", + "format": "int32", + "type": "integer" + }, + "month": { + "description": "Month of year. Must be from 1 to 12.", + "format": "int32", + "type": "integer" + } + } + }, + "Empty": { + "properties": {}, + "id": "Empty", + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object" + }, + "WatchCreativeRequest": { + "description": "A request for watching changes to creative Status.", + "type": "object", + "properties": { + "topic": { + "description": "The Pub/Sub topic to publish notifications to.\nThis topic must already exist and must give permission to\nad-exchange-buyside-reports@google.com to write to the topic.\nThis should be the full resource name in\n\"projects/{project_id}/topics/{topic_id}\" format.", + "type": "string" + } + }, + "id": "WatchCreativeRequest" + }, + "AppContext": { + "description": "@OutputOnly The app type the restriction applies to for mobile device.", + "type": "object", + "properties": { + "appTypes": { + "description": "The app types this restriction applies to.", + "type": "array", + "items": { + "enum": [ + "NATIVE", + "WEB" + ], + "type": "string" + }, + "enumDescriptions": [ + "Native app context.", + "Mobile web app context." + ] + } + }, + "id": "AppContext" + }, + "NativeContent": { + "type": "object", + "properties": { + "videoUrl": { + "description": "The URL to fetch a native video ad.", + "type": "string" + }, + "clickLinkUrl": { + "description": "The URL that the browser/SDK will load when the user clicks the ad.", + "type": "string" + }, + "logo": { + "$ref": "Image", + "description": "A smaller image, for the advertiser's logo." + }, + "priceDisplayText": { + "description": "The price of the promoted app including currency info.", + "type": "string" + }, + "image": { + "$ref": "Image", + "description": "A large image." + }, + "clickTrackingUrl": { + "description": "The URL to use for click tracking.", + "type": "string" + }, + "advertiserName": { + "description": "The name of the advertiser or sponsor, to be displayed in the ad creative.", + "type": "string" + }, + "storeUrl": { + "description": "The URL to the app store to purchase/download the promoted app.", + "type": "string" + }, + "headline": { + "description": "A short title for the ad.", + "type": "string" + }, + "appIcon": { + "$ref": "Image", + "description": "The app icon, for app download ads." + }, + "callToAction": { + "description": "A label for the button that the user is supposed to click.", + "type": "string" + }, + "body": { + "description": "A long description of the ad.", + "type": "string" + }, + "starRating": { + "description": "The app rating in the app store. Must be in the range [0-5].", + "format": "double", + "type": "number" + } + }, + "id": "NativeContent", + "description": "Native content for a creative." + }, + "ListClientsResponse": { + "type": "object", + "properties": { + "nextPageToken": { + "description": "A token to retrieve the next page of results.\nPass this value in the\nListClientsRequest.pageToken\nfield in the subsequent call to the\naccounts.clients.list method\nto retrieve the next page of results.", + "type": "string" + }, + "clients": { + "description": "The returned list of clients.", + "type": "array", + "items": { + "$ref": "Client" + } + } + }, + "id": "ListClientsResponse" + }, + "SecurityContext": { + "description": "@OutputOnly A security context.", + "type": "object", + "properties": { + "securities": { + "enumDescriptions": [ + "Matches impressions that require insecure compatibility.", + "Matches impressions that require SSL compatibility." + ], + "description": "The security types in this context.", + "type": "array", + "items": { + "enum": [ + "INSECURE", + "SSL" + ], + "type": "string" + } + } + }, + "id": "SecurityContext" + }, + "ListCreativesResponse": { + "description": "A response for listing creatives.", + "type": "object", + "properties": { + "creatives": { + "description": "The list of creatives.", + "type": "array", + "items": { + "$ref": "Creative" + } + }, + "nextPageToken": { + "description": "A token to retrieve the next page of results.\nPass this value in the\nListCreativesRequest.page_token\nfield in the subsequent call to `ListCreatives` method to retrieve the next\npage of results.", + "type": "string" + } + }, + "id": "ListCreativesResponse" + }, + "HtmlContent": { + "id": "HtmlContent", + "description": "HTML content for a creative.", + "type": "object", + "properties": { + "height": { + "description": "The height of the HTML snippet in pixels.", + "format": "int32", + "type": "integer" + }, + "width": { + "description": "The width of the HTML snippet in pixels.", + "format": "int32", + "type": "integer" + }, + "snippet": { + "description": "The HTML snippet that displays the ad when inserted in the web page.", + "type": "string" + } + } + }, + "ServingContext": { + "description": "The serving context for this restriction.", + "type": "object", + "properties": { + "location": { + "$ref": "LocationContext", + "description": "Matches impressions coming from users *or* publishers in a specific\nlocation." + }, + "auctionType": { + "$ref": "AuctionContext", + "description": "Matches impressions for a particular auction type." + }, + "all": { + "type": "string", + "enumDescriptions": [ + "A simple context." + ], + "enum": [ + "SIMPLE_CONTEXT" + ], + "description": "Matches all contexts." + }, + "appType": { + "$ref": "AppContext", + "description": "Matches impressions for a particular app type." + }, + "securityType": { + "$ref": "SecurityContext", + "description": "Matches impressions for a particular security type." + }, + "platform": { + "$ref": "PlatformContext", + "description": "Matches impressions coming from a particular platform." + } + }, + "id": "ServingContext" + }, + "Image": { + "id": "Image", + "description": "An image resource. You may provide a larger image than was requested,\nso long as the aspect ratio is preserved.", + "type": "object", + "properties": { + "height": { + "description": "Image height in pixels.", + "format": "int32", + "type": "integer" + }, + "width": { + "description": "Image width in pixels.", + "format": "int32", + "type": "integer" + }, + "url": { + "description": "The URL of the image.", + "type": "string" + } + } + }, + "Reason": { + "description": "A specific filtering status and how many times it occurred.", + "type": "object", + "properties": { + "count": { + "description": "The number of times the creative was filtered for the status. The\ncount is aggregated across all publishers on the exchange.", + "format": "int64", + "type": "string" + }, + "status": { + "description": "The filtering status code. Please refer to the\n[creative-status-codes.txt](https://storage.googleapis.com/adx-rtb-dictionaries/creative-status-codes.txt)\nfile for different statuses.", + "format": "int32", + "type": "integer" + } + }, + "id": "Reason" + }, + "VideoContent": { + "description": "Video content for a creative.", + "type": "object", + "properties": { + "videoUrl": { + "type": "string", + "description": "The URL to fetch a video ad." + } + }, + "id": "VideoContent" + } + }, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "protocol": "rest", + "canonicalName": "AdExchangeBuyerII", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/adexchange.buyer": { + "description": "Manage your Ad Exchange buyer account configuration" + } + } + } + }, + "rootUrl": "https://adexchangebuyer.googleapis.com/", + "ownerDomain": "google.com", + "name": "adexchangebuyer2", + "batchPath": "batch", + "title": "Ad Exchange Buyer API II", + "ownerName": "Google", + "resources": { + "accounts": { + "resources": { + "creatives": { + "methods": { + "get": { + "id": "adexchangebuyer2.accounts.creatives.get", + "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}", + "description": "Gets a creative.", + "httpMethod": "GET", + "parameterOrder": [ + "accountId", + "creativeId" + ], + "response": { + "$ref": "Creative" + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "creativeId": { + "description": "The ID of the creative to retrieve.", + "required": true, + "type": "string", + "location": "path" + }, + "accountId": { + "location": "path", + "description": "The account the creative belongs to.", + "required": true, + "type": "string" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}" + }, + "watch": { + "httpMethod": "POST", + "parameterOrder": [ + "accountId", + "creativeId" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "creativeId": { + "description": "The creative ID to watch for status changes.\nSpecify \"-\" to watch all creatives under the above account.\nIf both creative-level and account-level notifications are\nsent, only a single notification will be sent to the\ncreative-level notification topic.", + "required": true, + "type": "string", + "location": "path" + }, + "accountId": { + "description": "The account of the creative to watch.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}:watch", + "id": "adexchangebuyer2.accounts.creatives.watch", + "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}:watch", + "description": "Watches a creative. Will result in push notifications being sent to the\ntopic when the creative changes status.", + "request": { + "$ref": "WatchCreativeRequest" + } + }, + "update": { + "httpMethod": "PUT", + "parameterOrder": [ + "accountId", + "creativeId" + ], + "response": { + "$ref": "Creative" + }, + "parameters": { + "accountId": { + "location": "path", + "description": "The account that this creative belongs to.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "required": true, + "type": "string" + }, + "creativeId": { + "location": "path", + "description": "The buyer-defined creative ID of this creative.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}", + "id": "adexchangebuyer2.accounts.creatives.update", + "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}", + "request": { + "$ref": "Creative" + }, + "description": "Updates a creative." + }, + "list": { + "description": "Lists creatives.", + "parameterOrder": [ + "accountId" + ], + "response": { + "$ref": "ListCreativesResponse" + }, + "httpMethod": "GET", + "parameters": { + "pageToken": { + "location": "query", + "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListCreativesResponse.next_page_token\nreturned from the previous call to 'ListCreatives' method.", + "type": "string" + }, + "accountId": { + "description": "The account to list the creatives from.\nSpecify \"-\" to list all creatives the current user has access to.", + "required": true, + "type": "string", + "location": "path" + }, + "pageSize": { + "description": "Requested page size. The server may return fewer creatives than requested\n(due to timeout constraint) even if more are available via another call.\nIf unspecified, server will pick an appropriate default.\nAcceptable values are 1 to 1000, inclusive.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "query": { + "location": "query", + "description": "An optional query string to filter creatives. If no filter is specified,\nall active creatives will be returned.\nSupported queries are:\n\u003cul\u003e\n\u003cli\u003eaccountId=\u003ci\u003eaccount_id_string\u003c/i\u003e\n\u003cli\u003ecreativeId=\u003ci\u003ecreative_id_string\u003c/i\u003e\n\u003cli\u003edealsStatus: {approved, conditionally_approved, disapproved,\n not_checked}\n\u003cli\u003eopenAuctionStatus: {approved, conditionally_approved, disapproved,\n not_checked}\n\u003cli\u003eattribute: {a numeric attribute from the list of attributes}\n\u003cli\u003edisapprovalReason: {a reason from DisapprovalReason\n\u003c/ul\u003e\nExample: 'accountId=12345 AND (dealsStatus:disapproved AND disapprovalReason:unacceptable_content) OR attribute:47'", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/creatives", + "path": "v2beta1/accounts/{accountId}/creatives", + "id": "adexchangebuyer2.accounts.creatives.list" + }, + "create": { + "request": { + "$ref": "Creative" + }, + "description": "Creates a creative.", + "response": { + "$ref": "Creative" + }, + "parameterOrder": [ + "accountId" + ], + "httpMethod": "POST", + "parameters": { + "accountId": { + "location": "path", + "description": "The account that this creative belongs to.\nCan be used to filter the response of the\ncreatives.list\nmethod.", + "required": true, + "type": "string" + }, + "duplicateIdMode": { + "description": "Indicates if multiple creatives can share an ID or not. Default is\nNO_DUPLICATES (one ID per creative).", + "type": "string", + "location": "query", + "enum": [ + "NO_DUPLICATES", + "FORCE_ENABLE_DUPLICATE_IDS" + ] + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/creatives", + "path": "v2beta1/accounts/{accountId}/creatives", + "id": "adexchangebuyer2.accounts.creatives.create" + }, + "stopWatching": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "accountId", + "creativeId" + ], + "httpMethod": "POST", + "parameters": { + "creativeId": { + "description": "The creative ID of the creative to stop notifications for.\nSpecify \"-\" to specify stopping account level notifications.", + "required": true, + "type": "string", + "location": "path" + }, + "accountId": { + "location": "path", + "description": "The account of the creative to stop notifications for.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}:stopWatching", + "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}:stopWatching", + "id": "adexchangebuyer2.accounts.creatives.stopWatching", + "request": { + "$ref": "StopWatchingCreativeRequest" + }, + "description": "Stops watching a creative. Will stop push notifications being sent to the\ntopics when the creative changes status." + } + }, + "resources": { + "dealAssociations": { + "methods": { + "list": { + "description": "List all creative-deal associations.", + "response": { + "$ref": "ListDealAssociationsResponse" + }, + "parameterOrder": [ + "accountId", + "creativeId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "accountId": { + "location": "path", + "description": "The account to list the associations from.\nSpecify \"-\" to list all creatives the current user has access to.", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Requested page size. Server may return fewer associations than requested.\nIf unspecified, server will pick an appropriate default.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "query": { + "location": "query", + "description": "An optional query string to filter deal associations. If no filter is\nspecified, all associations will be returned.\nSupported queries are:\n\u003cul\u003e\n\u003cli\u003eaccountId=\u003ci\u003eaccount_id_string\u003c/i\u003e\n\u003cli\u003ecreativeId=\u003ci\u003ecreative_id_string\u003c/i\u003e\n\u003cli\u003edealsId=\u003ci\u003edeals_id_string\u003c/i\u003e\n\u003cli\u003edealsStatus:{approved, conditionally_approved, disapproved,\n not_checked}\n\u003cli\u003eopenAuctionStatus:{approved, conditionally_approved, disapproved,\n not_checked}\n\u003c/ul\u003e\nExample: 'dealsId=12345 AND dealsStatus:disapproved'", + "type": "string" + }, + "creativeId": { + "description": "The creative ID to list the associations from.\nSpecify \"-\" to list all creatives under the above account.", + "required": true, + "type": "string", + "location": "path" + }, + "pageToken": { + "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListDealAssociationsResponse.next_page_token\nreturned from the previous call to 'ListDealAssociations' method.", + "type": "string", + "location": "query" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations", + "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations", + "id": "adexchangebuyer2.accounts.creatives.dealAssociations.list" + }, + "add": { + "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:add", + "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:add", + "id": "adexchangebuyer2.accounts.creatives.dealAssociations.add", + "request": { + "$ref": "AddDealAssociationRequest" + }, + "description": "Associate an existing deal with a creative.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "accountId", + "creativeId" + ], + "httpMethod": "POST", + "parameters": { + "accountId": { + "description": "The account the creative belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "creativeId": { + "required": true, + "type": "string", + "location": "path", + "description": "The ID of the creative associated with the deal." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ] + }, + "remove": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "accountId", + "creativeId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "accountId": { + "description": "The account the creative belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "creativeId": { + "location": "path", + "description": "The ID of the creative associated with the deal.", + "required": true, + "type": "string" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:remove", + "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:remove", + "id": "adexchangebuyer2.accounts.creatives.dealAssociations.remove", + "description": "Remove the association between a deal and a creative.", + "request": { + "$ref": "RemoveDealAssociationRequest" + } + } + } + } + } + }, + "clients": { + "methods": { + "create": { + "httpMethod": "POST", + "parameterOrder": [ + "accountId" + ], + "response": { + "$ref": "Client" + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "accountId": { + "location": "path", + "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to create a client for. (required)", + "format": "int64", + "required": true, + "type": "string" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/clients", + "id": "adexchangebuyer2.accounts.clients.create", + "path": "v2beta1/accounts/{accountId}/clients", + "description": "Creates a new client buyer.", + "request": { + "$ref": "Client" + } + }, + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "accountId", + "clientAccountId" + ], + "response": { + "$ref": "Client" + }, + "parameters": { + "accountId": { + "description": "Numerical account ID of the client's sponsor buyer. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + }, + "clientAccountId": { + "description": "Numerical account ID of the client buyer to retrieve. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + "id": "adexchangebuyer2.accounts.clients.get", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + "description": "Gets a client buyer with a given client account ID." + }, + "list": { + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientsResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.list method.", + "type": "string" + }, + "accountId": { + "required": true, + "type": "string", + "location": "path", + "description": "Unique numerical account ID of the sponsor buyer to list the clients for.", + "format": "int64" + }, + "pageSize": { + "location": "query", + "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", + "format": "int32", + "type": "integer" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/clients", + "path": "v2beta1/accounts/{accountId}/clients", + "id": "adexchangebuyer2.accounts.clients.list", + "description": "Lists all the clients for the current sponsor buyer.", + "response": { + "$ref": "ListClientsResponse" + }, + "parameterOrder": [ + "accountId" + ], + "httpMethod": "GET" + }, + "update": { + "description": "Updates an existing client buyer.", + "request": { + "$ref": "Client" + }, + "httpMethod": "PUT", + "parameterOrder": [ + "accountId", + "clientAccountId" + ], + "response": { + "$ref": "Client" + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "accountId": { + "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to update a client for. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + }, + "clientAccountId": { + "description": "Unique numerical account ID of the client to update. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + "id": "adexchangebuyer2.accounts.clients.update", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}" + } + }, + "resources": { + "invitations": { + "methods": { + "create": { + "request": { + "$ref": "ClientUserInvitation" + }, + "description": "Creates and sends out an email invitation to access\nan Ad Exchange client buyer account.", + "response": { + "$ref": "ClientUserInvitation" + }, + "parameterOrder": [ + "accountId", + "clientAccountId" + ], + "httpMethod": "POST", + "parameters": { + "accountId": { + "location": "path", + "description": "Numerical account ID of the client's sponsor buyer. (required)", + "format": "int64", + "required": true, + "type": "string" + }, + "clientAccountId": { + "description": "Numerical account ID of the client buyer that the user\nshould be associated with. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + "id": "adexchangebuyer2.accounts.clients.invitations.create" + }, + "get": { + "description": "Retrieves an existing client user invitation.", + "httpMethod": "GET", + "response": { + "$ref": "ClientUserInvitation" + }, + "parameterOrder": [ + "accountId", + "clientAccountId", + "invitationId" + ], + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "clientAccountId": { + "description": "Numerical account ID of the client buyer that the user invitation\nto be retrieved is associated with. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + }, + "invitationId": { + "description": "Numerical identifier of the user invitation to retrieve. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + }, + "accountId": { + "location": "path", + "description": "Numerical account ID of the client's sponsor buyer. (required)", + "format": "int64", + "required": true, + "type": "string" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}", + "id": "adexchangebuyer2.accounts.clients.invitations.get", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}" + }, + "list": { + "description": "Lists all the client users invitations for a client\nwith a given account ID.", + "httpMethod": "GET", + "response": { + "$ref": "ListClientUserInvitationsResponse" + }, + "parameterOrder": [ + "accountId", + "clientAccountId" + ], + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUserInvitationsResponse.nextPageToken\nreturned from the previous call to the\nclients.invitations.list\nmethod.", + "type": "string" + }, + "accountId": { + "location": "path", + "description": "Numerical account ID of the client's sponsor buyer. (required)", + "format": "int64", + "required": true, + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Requested page size. Server may return fewer clients than requested.\nIf unspecified, server will pick an appropriate default.", + "format": "int32", + "type": "integer" + }, + "clientAccountId": { + "description": "Numerical account ID of the client buyer to list invitations for.\n(required)\nYou must either specify a string representation of a\nnumerical account identifier or the `-` character\nto list all the invitations for all the clients\nof a given sponsor buyer.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + "id": "adexchangebuyer2.accounts.clients.invitations.list", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations" + } + } + }, + "users": { + "methods": { + "list": { + "description": "Lists all the known client users for a specified\nsponsor buyer account ID.", + "parameterOrder": [ + "accountId", + "clientAccountId" + ], + "response": { + "$ref": "ListClientUsersResponse" + }, + "httpMethod": "GET", + "parameters": { + "pageToken": { + "location": "query", + "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUsersResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.users.list method.", + "type": "string" + }, + "accountId": { + "description": "Numerical account ID of the sponsor buyer of the client to list users for.\n(required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + }, + "pageSize": { + "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "clientAccountId": { + "description": "The account ID of the client buyer to list users for. (required)\nYou must specify either a string representation of a\nnumerical account identifier or the `-` character\nto list all the client users for all the clients\nof a given sponsor buyer.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", + "id": "adexchangebuyer2.accounts.clients.users.list" + }, + "get": { + "parameterOrder": [ + "accountId", + "clientAccountId", + "userId" + ], + "httpMethod": "GET", + "response": { + "$ref": "ClientUser" + }, + "parameters": { + "clientAccountId": { + "location": "path", + "description": "Numerical account ID of the client buyer\nthat the user to be retrieved is associated with. (required)", + "format": "int64", + "required": true, + "type": "string" + }, + "userId": { + "location": "path", + "description": "Numerical identifier of the user to retrieve. (required)", + "format": "int64", + "required": true, + "type": "string" + }, + "accountId": { + "location": "path", + "description": "Numerical account ID of the client's sponsor buyer. (required)", + "format": "int64", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + "id": "adexchangebuyer2.accounts.clients.users.get", + "description": "Retrieves an existing client user." + }, + "update": { + "httpMethod": "PUT", + "parameterOrder": [ + "accountId", + "clientAccountId", + "userId" + ], + "response": { + "$ref": "ClientUser" + }, + "scopes": [ + "https://www.googleapis.com/auth/adexchange.buyer" + ], + "parameters": { + "clientAccountId": { + "description": "Numerical account ID of the client buyer that the user to be retrieved\nis associated with. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + }, + "userId": { + "description": "Numerical identifier of the user to retrieve. (required)", + "format": "int64", + "required": true, + "type": "string", + "location": "path" + }, + "accountId": { + "required": true, + "type": "string", + "location": "path", + "description": "Numerical account ID of the client's sponsor buyer. (required)", + "format": "int64" + } + }, + "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + "id": "adexchangebuyer2.accounts.clients.users.update", + "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + "description": "Updates an existing client user.\nOnly the user status can be changed on update.", + "request": { + "$ref": "ClientUser" + } + } + } + } } } - }, - "id": "ListClientsResponse" + } } }, - "revision": "20161020", - "basePath": "", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "version_module": "True", - "canonicalName": "AdExchangeBuyerII", - "discoveryVersion": "v1", - "baseUrl": "https://adexchangebuyer.googleapis.com/", - "name": "adexchangebuyer2", "parameters": { - "access_token": { - "description": "OAuth access token.", + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "$.xgafv": { + "description": "V1 error format.", "type": "string", - "location": "query" + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" + "alt": { + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] }, "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token." + }, + "access_token": { + "description": "OAuth access token.", "type": "string", "location": "query" }, @@ -633,77 +1951,41 @@ "location": "query" }, "pp": { - "description": "Pretty-print response.", - "default": "true", "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" + "default": "true", + "location": "query", + "description": "Pretty-print response." }, - "alt": { - "description": "Data format for response.", + "bearer_token": { "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], + "description": "OAuth bearer token.", "type": "string" }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", "type": "string", "location": "query" }, - "callback": { - "description": "JSONP", + "upload_protocol": { "type": "string", - "location": "query" + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\")." }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" + "prettyPrint": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Returns response with indentations and line breaks." }, "uploadType": { "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", "type": "string", "location": "query" }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" } - }, - "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest/guides/client-access/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v2beta1", - "rootUrl": "https://adexchangebuyer.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-gen.go b/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-gen.go index 9d14935b8..e9ef66f70 100644 --- a/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-gen.go +++ b/vendor/google.golang.org/api/adexchangebuyer2/v2beta1/adexchangebuyer2-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService } @@ -75,9 +76,14 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Clients = NewAccountsClientsService(s) + rs.Creatives = NewAccountsCreativesService(s) return rs } @@ -85,6 +91,8 @@ type AccountsService struct { s *Service Clients *AccountsClientsService + + Creatives *AccountsCreativesService } func NewAccountsClientsService(s *Service) *AccountsClientsService { @@ -120,6 +128,123 @@ type AccountsClientsUsersService struct { s *Service } +func NewAccountsCreativesService(s *Service) *AccountsCreativesService { + rs := &AccountsCreativesService{s: s} + rs.DealAssociations = NewAccountsCreativesDealAssociationsService(s) + return rs +} + +type AccountsCreativesService struct { + s *Service + + DealAssociations *AccountsCreativesDealAssociationsService +} + +func NewAccountsCreativesDealAssociationsService(s *Service) *AccountsCreativesDealAssociationsService { + rs := &AccountsCreativesDealAssociationsService{s: s} + return rs +} + +type AccountsCreativesDealAssociationsService struct { + s *Service +} + +// AddDealAssociationRequest: A request for associating a deal and a +// creative. +type AddDealAssociationRequest struct { + // Association: The association between a creative and a deal that + // should be added. + Association *CreativeDealAssociation `json:"association,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Association") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Association") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddDealAssociationRequest) MarshalJSON() ([]byte, error) { + type noMethod AddDealAssociationRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AppContext: @OutputOnly The app type the restriction applies to for +// mobile device. +type AppContext struct { + // AppTypes: The app types this restriction applies to. + // + // Possible values: + // "NATIVE" - Native app context. + // "WEB" - Mobile web app context. + AppTypes []string `json:"appTypes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AppTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AppTypes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AppContext) MarshalJSON() ([]byte, error) { + type noMethod AppContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuctionContext: @OutputOnly The auction type the restriction applies +// to. +type AuctionContext struct { + // AuctionTypes: The auction types this restriction applies to. + // + // Possible values: + // "OPEN_AUCTION" - The restriction applies to open auction. + // "DIRECT_DEALS" - The restriction applies to direct deals. + AuctionTypes []string `json:"auctionTypes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuctionTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuctionTypes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AuctionContext) MarshalJSON() ([]byte, error) { + type noMethod AuctionContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Client: A client resource represents a client buyer—an // agency, // a brand, or an advertiser customer of the sponsor buyer. @@ -369,25 +494,53 @@ func (s *ClientUserInvitation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ListClientUserInvitationsResponse struct { - // Invitations: The returned list of client users. - Invitations []*ClientUserInvitation `json:"invitations,omitempty"` - - // NextPageToken: A token to retrieve the next page of results. - // Pass this value in - // the - // ListClientUserInvitationsRequest.pageToken - // field in the subsequent call to the - // clients.invitations.list - // method to retrieve the next - // page of results. - NextPageToken string `json:"nextPageToken,omitempty"` +// Correction: @OutputOnly Shows any corrections that were applied to +// this creative. +type Correction struct { + // Contexts: The contexts for the correction. + Contexts []*ServingContext `json:"contexts,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Details: Additional details about what was corrected. + Details []string `json:"details,omitempty"` - // ForceSendFields is a list of field names (e.g. "Invitations") to + // Type: The type of correction that was applied to the creative. + // + // Possible values: + // "CORRECTION_TYPE_UNSPECIFIED" - The correction type is unknown. + // Refer to the details for more information. + // "VENDOR_IDS_ADDED" - The ad's declared vendors did not match the + // vendors that were detected. + // The detected vendors were added. + // "SSL_ATTRIBUTE_REMOVED" - The ad had the SSL attribute declared but + // was not SSL-compliant. + // The SSL attribute was removed. + // "FLASH_FREE_ATTRIBUTE_REMOVED" - The ad was declared as Flash-free + // but contained Flash, so the Flash-free + // attribute was removed. + // "FLASH_FREE_ATTRIBUTE_ADDED" - The ad was not declared as + // Flash-free but it did not reference any flash + // content, so the Flash-free attribute was added. + // "REQUIRED_ATTRIBUTE_ADDED" - The ad did not declare a required + // creative attribute. + // The attribute was added. + // "REQUIRED_VENDOR_ADDED" - The ad did not declare a required + // technology vendor. + // The technology vendor was added. + // "SSL_ATTRIBUTE_ADDED" - The ad did not declare the SSL attribute + // but was SSL-compliant, so the + // SSL attribute was added. + // "IN_BANNER_VIDEO_ATTRIBUTE_ADDED" - Properties consistent with + // In-banner video were found, so an + // In-Banner Video attribute was added. + // "MRAID_ATTRIBUTE_ADDED" - The ad makes calls to the MRAID API so + // the MRAID attribute was added. + // "FLASH_ATTRIBUTE_REMOVED" - The ad unnecessarily declared the Flash + // attribute, so the Flash attribute + // was removed. + // "VIDEO_IN_SNIPPET_ATTRIBUTE_ADDED" - The ad contains video content. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Contexts") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -395,39 +548,226 @@ type ListClientUserInvitationsResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Invitations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Contexts") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ListClientUserInvitationsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListClientUserInvitationsResponse +func (s *Correction) MarshalJSON() ([]byte, error) { + type noMethod Correction raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ListClientUsersResponse struct { - // NextPageToken: A token to retrieve the next page of results. - // Pass this value in the - // ListClientUsersRequest.pageToken - // field in the subsequent call to the - // clients.invitations.list - // method to retrieve the next - // page of results. - NextPageToken string `json:"nextPageToken,omitempty"` +// Creative: A creative and its classification data. +type Creative struct { + // AccountId: The account that this creative belongs to. + // Can be used to filter the response of the + // creatives.list + // method. + AccountId string `json:"accountId,omitempty"` + + // AdChoicesDestinationUrl: The link to AdChoices destination page. + AdChoicesDestinationUrl string `json:"adChoicesDestinationUrl,omitempty"` + + // AdvertiserName: The name of the company being advertised in the + // creative. + AdvertiserName string `json:"advertiserName,omitempty"` + + // AgencyId: The agency ID for this creative. + AgencyId int64 `json:"agencyId,omitempty,string"` + + // ApiUpdateTime: @OutputOnly The last update timestamp of the creative + // via API. + ApiUpdateTime string `json:"apiUpdateTime,omitempty"` + + // Attributes: All attributes for the ads that may be shown from this + // creative. + // Can be used to filter the response of the + // creatives.list + // method. + // + // Possible values: + // "ATTRIBUTE_UNSPECIFIED" - Do not use. This is a placeholder value + // only. + // "IS_TAGGED" - The creative is tagged. + // "IS_COOKIE_TARGETED" - The creative is cookie targeted. + // "IS_USER_INTEREST_TARGETED" - The creative is user interest + // targeted. + // "EXPANDING_DIRECTION_NONE" - The creative does not expand. + // "EXPANDING_DIRECTION_UP" - The creative expands up. + // "EXPANDING_DIRECTION_DOWN" - The creative expands down. + // "EXPANDING_DIRECTION_LEFT" - The creative expands left. + // "EXPANDING_DIRECTION_RIGHT" - The creative expands right. + // "EXPANDING_DIRECTION_UP_LEFT" - The creative expands up and left. + // "EXPANDING_DIRECTION_UP_RIGHT" - The creative expands up and right. + // "EXPANDING_DIRECTION_DOWN_LEFT" - The creative expands down and + // left. + // "EXPANDING_DIRECTION_DOWN_RIGHT" - The creative expands down and + // right. + // "EXPANDING_DIRECTION_UP_OR_DOWN" - The creative expands up or down. + // "EXPANDING_DIRECTION_LEFT_OR_RIGHT" - The creative expands left or + // right. + // "EXPANDING_DIRECTION_ANY_DIAGONAL" - The creative expands on any + // diagonal. + // "EXPANDING_ACTION_ROLLOVER_TO_EXPAND" - The creative expands when + // rolled over. + // "INSTREAM_VAST_VIDEO_TYPE_VPAID_FLASH" - The instream vast video + // type is vpaid flash. + // "RICH_MEDIA_CAPABILITY_TYPE_MRAID" - The creative is MRAID + // "RICH_MEDIA_CAPABILITY_TYPE_SSL" - The creative is SSL. + // "RICH_MEDIA_CAPABILITY_TYPE_INTERSTITIAL" - The creative is an + // interstitial. + // "NATIVE_ELIGIBILITY_ELIGIBLE" - The creative is eligible for + // native. + // "NATIVE_ELIGIBILITY_NOT_ELIGIBLE" - The creative is not eligible + // for native. + // "RENDERING_SIZELESS_ADX" - The creative can dynamically resize to + // fill a variety of slot sizes. + Attributes []string `json:"attributes,omitempty"` + + // ClickThroughUrls: The set of destination URLs for the creative. + ClickThroughUrls []string `json:"clickThroughUrls,omitempty"` + + // Corrections: @OutputOnly Shows any corrections that were applied to + // this creative. + Corrections []*Correction `json:"corrections,omitempty"` + + // CreativeId: The buyer-defined creative ID of this creative. + // Can be used to filter the response of the + // creatives.list + // method. + CreativeId string `json:"creativeId,omitempty"` + + // DealsStatus: @OutputOnly The top-level deals status of this + // creative. + // If disapproved, an entry for 'auctionType=DIRECT_DEALS' (or 'ALL') + // in + // serving_restrictions will also exist. Note + // that this may be nuanced with other contextual restrictions, in which + // case, + // it may be preferable to read from serving_restrictions directly. + // Can be used to filter the response of the + // creatives.list + // method. + // + // Possible values: + // "STATUS_UNSPECIFIED" - The status is unknown. + // "NOT_CHECKED" - The creative has not been checked. + // "CONDITIONALLY_APPROVED" - The creative has been conditionally + // approved. + // See serving_restrictions for details. + // "APPROVED" - The creative has been approved. + // "DISAPPROVED" - The creative has been disapproved. + DealsStatus string `json:"dealsStatus,omitempty"` + + // DetectedAdvertiserIds: @OutputOnly Detected advertiser IDs, if any. + DetectedAdvertiserIds googleapi.Int64s `json:"detectedAdvertiserIds,omitempty"` + + // DetectedDomains: @OutputOnly + // The detected domains for this creative. + DetectedDomains []string `json:"detectedDomains,omitempty"` + + // DetectedLanguages: @OutputOnly + // The detected languages for this creative. The order is arbitrary. The + // codes + // are 2 or 5 characters and are documented + // at + // https://developers.google.com/adwords/api/docs/appendix/languagecod + // es. + DetectedLanguages []string `json:"detectedLanguages,omitempty"` + + // DetectedProductCategories: @OutputOnly Detected product categories, + // if any. + // See the ad-product-categories.txt file in the technical + // documentation + // for a list of IDs. + DetectedProductCategories []int64 `json:"detectedProductCategories,omitempty"` + + // DetectedSensitiveCategories: @OutputOnly Detected sensitive + // categories, if any. + // See the ad-sensitive-categories.txt file in the technical + // documentation for + // a list of IDs. You should use these IDs along with + // the + // excluded-sensitive-category field in the bid request to filter your + // bids. + DetectedSensitiveCategories []int64 `json:"detectedSensitiveCategories,omitempty"` - // Users: The returned list of client users. - Users []*ClientUser `json:"users,omitempty"` + // FilteringStats: @OutputOnly The filtering stats for this creative. + FilteringStats *FilteringStats `json:"filteringStats,omitempty"` + + // Html: An HTML creative. + Html *HtmlContent `json:"html,omitempty"` + + // ImpressionTrackingUrls: The set of URLs to be called to record an + // impression. + ImpressionTrackingUrls []string `json:"impressionTrackingUrls,omitempty"` + + // Native: A native creative. + Native *NativeContent `json:"native,omitempty"` + + // OpenAuctionStatus: @OutputOnly The top-level open auction status of + // this creative. + // If disapproved, an entry for 'auctionType = OPEN_AUCTION' (or 'ALL') + // in + // serving_restrictions will also exist. Note + // that this may be nuanced with other contextual restrictions, in which + // case, + // it may be preferable to read from serving_restrictions directly. + // Can be used to filter the response of the + // creatives.list + // method. + // + // Possible values: + // "STATUS_UNSPECIFIED" - The status is unknown. + // "NOT_CHECKED" - The creative has not been checked. + // "CONDITIONALLY_APPROVED" - The creative has been conditionally + // approved. + // See serving_restrictions for details. + // "APPROVED" - The creative has been approved. + // "DISAPPROVED" - The creative has been disapproved. + OpenAuctionStatus string `json:"openAuctionStatus,omitempty"` + + // RestrictedCategories: All restricted categories for the ads that may + // be shown from this creative. + // + // Possible values: + // "NO_RESTRICTED_CATEGORIES" - The ad has no restricted categories + // "ALCOHOL" - The alcohol restricted category. + RestrictedCategories []string `json:"restrictedCategories,omitempty"` + + // ServingRestrictions: @OutputOnly The granular status of this ad in + // specific contexts. + // A context here relates to where something ultimately serves (for + // example, + // a physical location, a platform, an HTTPS vs HTTP request, or the + // type + // of auction). + ServingRestrictions []*ServingRestriction `json:"servingRestrictions,omitempty"` + + // VendorIds: All vendor IDs for the ads that may be shown from this + // creative. + // See + // https://storage.googleapis.com/adx-rtb-dictionaries/vendors.txt + // for possible values. + VendorIds []int64 `json:"vendorIds,omitempty"` + + // Version: @OutputOnly The version of this creative. + Version int64 `json:"version,omitempty"` + + // Video: A video creative. + Video *VideoContent `json:"video,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // ForceSendFields is a list of field names (e.g. "AccountId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -435,38 +775,35 @@ type ListClientUsersResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AccountId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ListClientUsersResponse) MarshalJSON() ([]byte, error) { - type noMethod ListClientUsersResponse +func (s *Creative) MarshalJSON() ([]byte, error) { + type noMethod Creative raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ListClientsResponse struct { - // Clients: The returned list of clients. - Clients []*Client `json:"clients,omitempty"` +// CreativeDealAssociation: The association between a creative and a +// deal. +type CreativeDealAssociation struct { + // AccountId: The account the creative belongs to. + AccountId string `json:"accountId,omitempty"` - // NextPageToken: A token to retrieve the next page of results. - // Pass this value in the - // ListClientsRequest.pageToken - // field in the subsequent call to the - // accounts.clients.list method - // to retrieve the next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` + // CreativeId: The ID of the creative associated with the deal. + CreativeId string `json:"creativeId,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // DealsId: The externalDealId for the deal associated with the + // creative. + DealsId string `json:"dealsId,omitempty"` - // ForceSendFields is a list of field names (e.g. "Clients") to + // ForceSendFields is a list of field names (e.g. "AccountId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -474,7 +811,7 @@ type ListClientsResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Clients") to include in + // NullFields is a list of field names (e.g. "AccountId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -483,87 +820,2412 @@ type ListClientsResponse struct { NullFields []string `json:"-"` } -func (s *ListClientsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListClientsResponse +func (s *CreativeDealAssociation) MarshalJSON() ([]byte, error) { + type noMethod CreativeDealAssociation raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// method id "adexchangebuyer2.accounts.clients.create": +// Date: Represents a whole calendar date, e.g. date of birth. The time +// of day and +// time zone are either specified elsewhere or are not significant. The +// date +// is relative to the Proleptic Gregorian Calendar. The day may be 0 +// to +// represent a year and month where the day is not significant, e.g. +// credit card +// expiration date. The year may be 0 to represent a month and day +// independent +// of year, e.g. anniversary date. Related types are +// google.type.TimeOfDay +// and `google.protobuf.Timestamp`. +type Date struct { + // Day: Day of month. Must be from 1 to 31 and valid for the year and + // month, or 0 + // if specifying a year/month where the day is not significant. + Day int64 `json:"day,omitempty"` + + // Month: Month of year. Must be from 1 to 12. + Month int64 `json:"month,omitempty"` + + // Year: Year of date. Must be from 1 to 9999, or 0 if specifying a date + // without + // a year. + Year int64 `json:"year,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Day") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` -type AccountsClientsCreateCall struct { - s *Service - accountId int64 - client *Client - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + // NullFields is a list of field names (e.g. "Day") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } -// Create: Creates a new client buyer. -func (r *AccountsClientsService) Create(accountId int64, client *Client) *AccountsClientsCreateCall { - c := &AccountsClientsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.accountId = accountId - c.client = client - return c +func (s *Date) MarshalJSON() ([]byte, error) { + type noMethod Date + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AccountsClientsCreateCall) Fields(s ...googleapi.Field) *AccountsClientsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} +// Disapproval: @OutputOnly The reason and details for a disapproval. +type Disapproval struct { + // Details: Additional details about the reason for disapproval. + Details []string `json:"details,omitempty"` -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AccountsClientsCreateCall) Context(ctx context.Context) *AccountsClientsCreateCall { - c.ctx_ = ctx - return c + // Reason: The categorized reason for disapproval. + // + // Possible values: + // "LENGTH_OF_IMAGE_ANIMATION" - The length of the image animation is + // longer than allowed. + // "BROKEN_URL" - The click through URL doesn't work properly. + // "MEDIA_NOT_FUNCTIONAL" - Something is wrong with the creative + // itself. + // "INVALID_FOURTH_PARTY_CALL" - The ad makes a fourth party call to + // an unapproved vendor. + // "INCORRECT_REMARKETING_DECLARATION" - The ad targets consumers + // using remarketing lists and/or collects + // data for subsequent use in retargeting, but does not correctly + // declare + // that use. + // "LANDING_PAGE_ERROR" - Clicking on the ad leads to an error page. + // "AD_SIZE_DOES_NOT_MATCH_AD_SLOT" - The ad size when rendered does + // not match the declaration. + // "NO_BORDER" - Ads with a white background require a border, which + // was missing. + // "FOURTH_PARTY_BROWSER_COOKIES" - The creative attempts to set + // cookies from a fourth party that is not + // certified. + // "LSO_OBJECTS" - The creative sets an LSO object. + // "BLANK_CREATIVE" - The ad serves a blank. + // "DESTINATION_URLS_UNDECLARED" - The ad uses rotation, but not all + // destination URLs were declared. + // "PROBLEM_WITH_CLICK_MACRO" - There is a problem with the way the + // click macro is used. + // "INCORRECT_AD_TECHNOLOGY_DECLARATION" - The ad technology + // declaration is not accurate. + // "INCORRECT_DESTINATION_URL_DECLARATION" - The actual destination + // URL does not match the declared destination URL. + // "EXPANDABLE_INCORRECT_DIRECTION" - The declared expanding direction + // does not match the actual direction. + // "EXPANDABLE_DIRECTION_NOT_SUPPORTED" - The ad does not expand in a + // supported direction. + // "EXPANDABLE_INVALID_VENDOR" - The ad uses an expandable vendor that + // is not supported. + // "EXPANDABLE_FUNCTIONALITY" - There was an issue with the expandable + // ad. + // "VIDEO_INVALID_VENDOR" - The ad uses a video vendor that is not + // supported. + // "VIDEO_UNSUPPORTED_LENGTH" - The length of the video ad is not + // supported. + // "VIDEO_UNSUPPORTED_FORMAT" - The format of the video ad is not + // supported. + // "VIDEO_FUNCTIONALITY" - There was an issue with the video ad. + // "LANDING_PAGE_DISABLED" - The landing page does not conform to Ad + // Exchange policy. + // "MALWARE_SUSPECTED" - The ad or the landing page may contain + // malware. + // "ADULT_IMAGE_OR_VIDEO" - The ad contains adult images or video + // content. + // "INACCURATE_AD_TEXT" - The ad contains text that is unclear or + // inaccurate. + // "COUNTERFEIT_DESIGNER_GOODS" - The ad promotes counterfeit designer + // goods. + // "POP_UP" - The ad causes a popup window to appear. + // "INVALID_RTB_PROTOCOL_USAGE" - The creative does not follow + // policies set for the RTB protocol. + // "RAW_IP_ADDRESS_IN_SNIPPET" - The ad contains a URL that uses a + // numeric IP address for the domain. + // "UNACCEPTABLE_CONTENT_SOFTWARE" - The ad or landing page contains + // unacceptable content because it initiated + // a software or executable download. + // "UNAUTHORIZED_COOKIE_ON_GOOGLE_DOMAIN" - The ad set an unauthorized + // cookie on a Google domain. + // "UNDECLARED_FLASH_OBJECTS" - Flash content found when no flash was + // declared. + // "INVALID_SSL_DECLARATION" - SSL support declared but not working + // correctly. + // "DIRECT_DOWNLOAD_IN_AD" - Rich Media - Direct Download in Ad (ex. + // PDF download). + // "MAXIMUM_DOWNLOAD_SIZE_EXCEEDED" - Maximum download size exceeded. + // "DESTINATION_URL_SITE_NOT_CRAWLABLE" - Bad Destination URL: Site + // Not Crawlable. + // "BAD_URL_LEGAL_DISAPPROVAL" - Bad URL: Legal disapproval. + // "PHARMA_GAMBLING_ALCOHOL_NOT_ALLOWED" - Pharmaceuticals, Gambling, + // Alcohol not allowed and at least one was + // detected. + // "DYNAMIC_DNS_AT_DESTINATION_URL" - Dynamic DNS at Destination URL. + // "POOR_IMAGE_OR_VIDEO_QUALITY" - Poor Image / Video Quality. + // "UNACCEPTABLE_IMAGE_CONTENT" - For example, Image Trick to Click. + // "INCORRECT_IMAGE_LAYOUT" - Incorrect Image Layout. + // "IRRELEVANT_IMAGE_OR_VIDEO" - Irrelevant Image / Video. + // "DESTINATION_SITE_DOES_NOT_ALLOW_GOING_BACK" - Broken back button. + // "MISLEADING_CLAIMS_IN_AD" - Misleading/Inaccurate claims in ads. + // "RESTRICTED_PRODUCTS" - Restricted Products. + // "UNACCEPTABLE_CONTENT" - Unacceptable content. For example, + // malware. + // "AUTOMATED_AD_CLICKING" - The ad automatically redirects to the + // destination site without a click, + // or reports a click when none were made. + // "INVALID_URL_PROTOCOL" - The ad uses URL protocols that do not + // exist or are not allowed on AdX. + // "UNDECLARED_RESTRICTED_CONTENT" - Restricted content (for example, + // alcohol) was found in the ad but not + // declared. + // "INVALID_REMARKETING_LIST_USAGE" - Violation of the remarketing + // list policy. + // "DESTINATION_SITE_NOT_CRAWLABLE_ROBOTS_TXT" - The destination + // site's robot.txt file prevents it from being crawled. + // "CLICK_TO_DOWNLOAD_NOT_AN_APP" - Click to download must link to an + // app. + // "INACCURATE_REVIEW_EXTENSION" - A review extension must be an + // accurate review. + // "SEXUALLY_EXPLICIT_CONTENT" - Sexually explicit content. + // "GAINING_AN_UNFAIR_ADVANTAGE" - The ad tries to gain an unfair + // traffic advantage. + // "GAMING_THE_GOOGLE_NETWORK" - The ad tries to circumvent Google's + // advertising systems. + // "DANGEROUS_PRODUCTS_KNIVES" - The ad promotes dangerous knives. + // "DANGEROUS_PRODUCTS_EXPLOSIVES" - The ad promotes explosives. + // "DANGEROUS_PRODUCTS_GUNS" - The ad promotes guns & parts. + // "DANGEROUS_PRODUCTS_DRUGS" - The ad promotes recreational + // drugs/services & related equipment. + // "DANGEROUS_PRODUCTS_TOBACCO" - The ad promotes tobacco + // products/services & related equipment. + // "DANGEROUS_PRODUCTS_WEAPONS" - The ad promotes weapons. + // "UNCLEAR_OR_IRRELEVANT_AD" - The ad is unclear or irrelevant to the + // destination site. + // "PROFESSIONAL_STANDARDS" - The ad does not meet professional + // standards. + // "DYSFUNCTIONAL_PROMOTION" - The promotion is unnecessarily + // difficult to navigate. + // "INVALID_INTEREST_BASED_AD" - Violation of Google's policy for + // interest-based ads. + // "MISUSE_OF_PERSONAL_INFORMATION" - Misuse of personal information. + // "OMISSION_OF_RELEVANT_INFORMATION" - Omission of relevant + // information. + // "UNAVAILABLE_PROMOTIONS" - Unavailable promotions. + // "MISLEADING_PROMOTIONS" - Misleading or unrealistic promotions. + // "INAPPROPRIATE_CONTENT" - Offensive or inappropriate content. + // "SENSITIVE_EVENTS" - Capitalizing on sensitive events. + // "SHOCKING_CONTENT" - Shocking content. + // "ENABLING_DISHONEST_BEHAVIOR" - Products & Services that enable + // dishonest behavior. + // "TECHNICAL_REQUIREMENTS" - The ad does not meet technical + // requirements. + // "RESTRICTED_POLITICAL_CONTENT" - Restricted political content. + // "UNSUPPORTED_CONTENT" - Unsupported content. + // "INVALID_BIDDING_METHOD" - Invalid bidding method. + // "VIDEO_TOO_LONG" - Video length exceeds limits. + // "VIOLATES_JAPANESE_PHARMACY_LAW" - Unacceptable content: Japanese + // healthcare. + // "UNACCREDITED_PET_PHARMACY" - Online pharmacy ID required. + // "ABORTION" - Unacceptable content: Abortion. + // "CONTRACEPTIVES" - Unacceptable content: Birth control. + // "NEED_CERTIFICATES_TO_ADVERTISE_IN_CHINA" - Restricted in China. + // "KCDSP_REGISTRATION" - Unacceptable content: Korean healthcare. + // "NOT_FAMILY_SAFE" - Non-family safe or adult content. + // "CLINICAL_TRIAL_RECRUITMENT" - Clinical trial recruitment. + // "MAXIMUM_NUMBER_OF_HTTP_CALLS_EXCEEDED" - Maximum number of HTTP + // calls exceeded. + // "MAXIMUM_NUMBER_OF_COOKIES_EXCEEDED" - Maximum number of cookies + // exceeded. + // "PERSONAL_LOANS" - Financial service ad does not adhere to + // specifications. + // "UNSUPPORTED_FLASH_CONTENT" - Flash content was found in an + // unsupported context. + Reason string `json:"reason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Details") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Details") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AccountsClientsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ +func (s *Disapproval) MarshalJSON() ([]byte, error) { + type noMethod Disapproval + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (c *AccountsClientsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.client) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` } -// Do executes the "adexchangebuyer2.accounts.clients.create" call. -// Exactly one of *Client or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Client.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// FilteringStats: @OutputOnly Filtering reasons for this creative +// during a period of a single +// day (from midnight to midnight Pacific). +type FilteringStats struct { + // Date: The day during which the data was collected. + // The data is collected from 00:00:00 to 23:59:59 PT. + // During switches from PST to PDT and back, the day may + // contain 23 or 25 hours of data instead of the usual 24. + Date *Date `json:"date,omitempty"` + + // Reasons: The set of filtering reasons for this date. + Reasons []*Reason `json:"reasons,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Date") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Date") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FilteringStats) MarshalJSON() ([]byte, error) { + type noMethod FilteringStats + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HtmlContent: HTML content for a creative. +type HtmlContent struct { + // Height: The height of the HTML snippet in pixels. + Height int64 `json:"height,omitempty"` + + // Snippet: The HTML snippet that displays the ad when inserted in the + // web page. + Snippet string `json:"snippet,omitempty"` + + // Width: The width of the HTML snippet in pixels. + Width int64 `json:"width,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Height") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Height") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HtmlContent) MarshalJSON() ([]byte, error) { + type noMethod HtmlContent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Image: An image resource. You may provide a larger image than was +// requested, +// so long as the aspect ratio is preserved. +type Image struct { + // Height: Image height in pixels. + Height int64 `json:"height,omitempty"` + + // Url: The URL of the image. + Url string `json:"url,omitempty"` + + // Width: Image width in pixels. + Width int64 `json:"width,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Height") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Height") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ListClientUserInvitationsResponse struct { + // Invitations: The returned list of client users. + Invitations []*ClientUserInvitation `json:"invitations,omitempty"` + + // NextPageToken: A token to retrieve the next page of results. + // Pass this value in + // the + // ListClientUserInvitationsRequest.pageToken + // field in the subsequent call to the + // clients.invitations.list + // method to retrieve the next + // page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Invitations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Invitations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListClientUserInvitationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListClientUserInvitationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ListClientUsersResponse struct { + // NextPageToken: A token to retrieve the next page of results. + // Pass this value in the + // ListClientUsersRequest.pageToken + // field in the subsequent call to the + // clients.invitations.list + // method to retrieve the next + // page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Users: The returned list of client users. + Users []*ClientUser `json:"users,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListClientUsersResponse) MarshalJSON() ([]byte, error) { + type noMethod ListClientUsersResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ListClientsResponse struct { + // Clients: The returned list of clients. + Clients []*Client `json:"clients,omitempty"` + + // NextPageToken: A token to retrieve the next page of results. + // Pass this value in the + // ListClientsRequest.pageToken + // field in the subsequent call to the + // accounts.clients.list method + // to retrieve the next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Clients") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Clients") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListClientsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListClientsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListCreativesResponse: A response for listing creatives. +type ListCreativesResponse struct { + // Creatives: The list of creatives. + Creatives []*Creative `json:"creatives,omitempty"` + + // NextPageToken: A token to retrieve the next page of results. + // Pass this value in the + // ListCreativesRequest.page_token + // field in the subsequent call to `ListCreatives` method to retrieve + // the next + // page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Creatives") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Creatives") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListCreativesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListCreativesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListDealAssociationsResponse: A response for listing creative and +// deal associations +type ListDealAssociationsResponse struct { + // Associations: The list of associations. + Associations []*CreativeDealAssociation `json:"associations,omitempty"` + + // NextPageToken: A token to retrieve the next page of results. + // Pass this value in the + // ListDealAssociationsRequest.page_token + // field in the subsequent call to 'ListDealAssociation' method to + // retrieve + // the next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Associations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Associations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListDealAssociationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListDealAssociationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LocationContext: @OutputOnly The Geo criteria the restriction applies +// to. +type LocationContext struct { + // GeoCriteriaIds: IDs representing the geo location for this + // context. + // Please refer to + // the + // [geo-table.csv](https://storage.googleapis.com/adx-rtb-dictionarie + // s/geo-table.csv) + // file for different geo criteria IDs. + GeoCriteriaIds []int64 `json:"geoCriteriaIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GeoCriteriaIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GeoCriteriaIds") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LocationContext) MarshalJSON() ([]byte, error) { + type noMethod LocationContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NativeContent: Native content for a creative. +type NativeContent struct { + // AdvertiserName: The name of the advertiser or sponsor, to be + // displayed in the ad creative. + AdvertiserName string `json:"advertiserName,omitempty"` + + // AppIcon: The app icon, for app download ads. + AppIcon *Image `json:"appIcon,omitempty"` + + // Body: A long description of the ad. + Body string `json:"body,omitempty"` + + // CallToAction: A label for the button that the user is supposed to + // click. + CallToAction string `json:"callToAction,omitempty"` + + // ClickLinkUrl: The URL that the browser/SDK will load when the user + // clicks the ad. + ClickLinkUrl string `json:"clickLinkUrl,omitempty"` + + // ClickTrackingUrl: The URL to use for click tracking. + ClickTrackingUrl string `json:"clickTrackingUrl,omitempty"` + + // Headline: A short title for the ad. + Headline string `json:"headline,omitempty"` + + // Image: A large image. + Image *Image `json:"image,omitempty"` + + // Logo: A smaller image, for the advertiser's logo. + Logo *Image `json:"logo,omitempty"` + + // PriceDisplayText: The price of the promoted app including currency + // info. + PriceDisplayText string `json:"priceDisplayText,omitempty"` + + // StarRating: The app rating in the app store. Must be in the range + // [0-5]. + StarRating float64 `json:"starRating,omitempty"` + + // StoreUrl: The URL to the app store to purchase/download the promoted + // app. + StoreUrl string `json:"storeUrl,omitempty"` + + // VideoUrl: The URL to fetch a native video ad. + VideoUrl string `json:"videoUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertiserName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdvertiserName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NativeContent) MarshalJSON() ([]byte, error) { + type noMethod NativeContent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *NativeContent) UnmarshalJSON(data []byte) error { + type noMethod NativeContent + var s1 struct { + StarRating gensupport.JSONFloat64 `json:"starRating"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.StarRating = float64(s1.StarRating) + return nil +} + +// PlatformContext: @OutputOnly The type of platform the restriction +// applies to. +type PlatformContext struct { + // Platforms: The platforms this restriction applies to. + // + // Possible values: + // "DESKTOP" - Desktop platform. + // "ANDROID" - Android platform. + // "IOS" - iOS platform. + Platforms []string `json:"platforms,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Platforms") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Platforms") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PlatformContext) MarshalJSON() ([]byte, error) { + type noMethod PlatformContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Reason: A specific filtering status and how many times it occurred. +type Reason struct { + // Count: The number of times the creative was filtered for the status. + // The + // count is aggregated across all publishers on the exchange. + Count int64 `json:"count,omitempty,string"` + + // Status: The filtering status code. Please refer to + // the + // [creative-status-codes.txt](https://storage.googleapis.com/adx-rtb + // -dictionaries/creative-status-codes.txt) + // file for different statuses. + Status int64 `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Count") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Reason) MarshalJSON() ([]byte, error) { + type noMethod Reason + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RemoveDealAssociationRequest: A request for removing the association +// between a deal and a creative. +type RemoveDealAssociationRequest struct { + // Association: The association between a creative and a deal that + // should be removed. + Association *CreativeDealAssociation `json:"association,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Association") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Association") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RemoveDealAssociationRequest) MarshalJSON() ([]byte, error) { + type noMethod RemoveDealAssociationRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecurityContext: @OutputOnly A security context. +type SecurityContext struct { + // Securities: The security types in this context. + // + // Possible values: + // "INSECURE" - Matches impressions that require insecure + // compatibility. + // "SSL" - Matches impressions that require SSL compatibility. + Securities []string `json:"securities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Securities") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Securities") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityContext) MarshalJSON() ([]byte, error) { + type noMethod SecurityContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ServingContext: The serving context for this restriction. +type ServingContext struct { + // All: Matches all contexts. + // + // Possible values: + // "SIMPLE_CONTEXT" - A simple context. + All string `json:"all,omitempty"` + + // AppType: Matches impressions for a particular app type. + AppType *AppContext `json:"appType,omitempty"` + + // AuctionType: Matches impressions for a particular auction type. + AuctionType *AuctionContext `json:"auctionType,omitempty"` + + // Location: Matches impressions coming from users *or* publishers in a + // specific + // location. + Location *LocationContext `json:"location,omitempty"` + + // Platform: Matches impressions coming from a particular platform. + Platform *PlatformContext `json:"platform,omitempty"` + + // SecurityType: Matches impressions for a particular security type. + SecurityType *SecurityContext `json:"securityType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "All") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "All") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServingContext) MarshalJSON() ([]byte, error) { + type noMethod ServingContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ServingRestriction: @OutputOnly A representation of the status of an +// ad in a +// specific context. A context here relates to where something +// ultimately serves +// (for example, a user or publisher geo, a platform, an HTTPS vs HTTP +// request, +// or the type of auction). +type ServingRestriction struct { + // Contexts: The contexts for the restriction. + Contexts []*ServingContext `json:"contexts,omitempty"` + + // DisapprovalReasons: Any disapprovals bound to this restriction. + // Only present if status=DISAPPROVED. + // Can be used to filter the response of the + // creatives.list + // method. + DisapprovalReasons []*Disapproval `json:"disapprovalReasons,omitempty"` + + // Status: The status of the creative in this context (for example, it + // has been + // explicitly disapproved or is pending review). + // + // Possible values: + // "STATUS_UNSPECIFIED" - The status is not known. + // "DISAPPROVAL" - The ad was disapproved in this context. + // "PENDING_REVIEW" - The ad is pending review in this context. + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Contexts") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Contexts") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServingRestriction) MarshalJSON() ([]byte, error) { + type noMethod ServingRestriction + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StopWatchingCreativeRequest: A request for stopping notifications for +// changes to creative Status. +type StopWatchingCreativeRequest struct { +} + +// VideoContent: Video content for a creative. +type VideoContent struct { + // VideoUrl: The URL to fetch a video ad. + VideoUrl string `json:"videoUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VideoUrl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VideoUrl") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VideoContent) MarshalJSON() ([]byte, error) { + type noMethod VideoContent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// WatchCreativeRequest: A request for watching changes to creative +// Status. +type WatchCreativeRequest struct { + // Topic: The Pub/Sub topic to publish notifications to. + // This topic must already exist and must give permission + // to + // ad-exchange-buyside-reports@google.com to write to the topic. + // This should be the full resource name + // in + // "projects/{project_id}/topics/{topic_id}" format. + Topic string `json:"topic,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Topic") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Topic") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WatchCreativeRequest) MarshalJSON() ([]byte, error) { + type noMethod WatchCreativeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "adexchangebuyer2.accounts.clients.create": + +type AccountsClientsCreateCall struct { + s *Service + accountId int64 + client *Client + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new client buyer. +func (r *AccountsClientsService) Create(accountId int64, client *Client) *AccountsClientsCreateCall { + c := &AccountsClientsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.client = client + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsCreateCall) Fields(s ...googleapi.Field) *AccountsClientsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsCreateCall) Context(ctx context.Context) *AccountsClientsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.client) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.create" call. +// Exactly one of *Client or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Client.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AccountsClientsCreateCall) Do(opts ...googleapi.CallOption) (*Client, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Client{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new client buyer.", + // "flatPath": "v2beta1/accounts/{accountId}/clients", + // "httpMethod": "POST", + // "id": "adexchangebuyer2.accounts.clients.create", + // "parameterOrder": [ + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to create a client for. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients", + // "request": { + // "$ref": "Client" + // }, + // "response": { + // "$ref": "Client" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// method id "adexchangebuyer2.accounts.clients.get": + +type AccountsClientsGetCall struct { + s *Service + accountId int64 + clientAccountId int64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a client buyer with a given client account ID. +func (r *AccountsClientsService) Get(accountId int64, clientAccountId int64) *AccountsClientsGetCall { + c := &AccountsClientsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsGetCall) Fields(s ...googleapi.Field) *AccountsClientsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AccountsClientsGetCall) IfNoneMatch(entityTag string) *AccountsClientsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsGetCall) Context(ctx context.Context) *AccountsClientsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.get" call. +// Exactly one of *Client or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Client.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AccountsClientsGetCall) Do(opts ...googleapi.CallOption) (*Client, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Client{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a client buyer with a given client account ID.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + // "httpMethod": "GET", + // "id": "adexchangebuyer2.accounts.clients.get", + // "parameterOrder": [ + // "accountId", + // "clientAccountId" + // ], + // "parameters": { + // "accountId": { + // "description": "Numerical account ID of the client's sponsor buyer. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "Numerical account ID of the client buyer to retrieve. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + // "response": { + // "$ref": "Client" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// method id "adexchangebuyer2.accounts.clients.list": + +type AccountsClientsListCall struct { + s *Service + accountId int64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the clients for the current sponsor buyer. +func (r *AccountsClientsService) List(accountId int64) *AccountsClientsListCall { + c := &AccountsClientsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + return c +} + +// PageSize sets the optional parameter "pageSize": Requested page size. +// The server may return fewer clients than requested. +// If unspecified, the server will pick an appropriate default. +func (c *AccountsClientsListCall) PageSize(pageSize int64) *AccountsClientsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A token +// identifying a page of results the server should return. +// Typically, this is the value +// of +// ListClientsResponse.nextPageToken +// returned from the previous call to the +// accounts.clients.list method. +func (c *AccountsClientsListCall) PageToken(pageToken string) *AccountsClientsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsListCall) Fields(s ...googleapi.Field) *AccountsClientsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AccountsClientsListCall) IfNoneMatch(entityTag string) *AccountsClientsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsListCall) Context(ctx context.Context) *AccountsClientsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.list" call. +// Exactly one of *ListClientsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListClientsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AccountsClientsListCall) Do(opts ...googleapi.CallOption) (*ListClientsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListClientsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all the clients for the current sponsor buyer.", + // "flatPath": "v2beta1/accounts/{accountId}/clients", + // "httpMethod": "GET", + // "id": "adexchangebuyer2.accounts.clients.list", + // "parameterOrder": [ + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "Unique numerical account ID of the sponsor buyer to list the clients for.", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientsResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.list method.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients", + // "response": { + // "$ref": "ListClientsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AccountsClientsListCall) Pages(ctx context.Context, f func(*ListClientsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "adexchangebuyer2.accounts.clients.update": + +type AccountsClientsUpdateCall struct { + s *Service + accountId int64 + clientAccountId int64 + client *Client + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an existing client buyer. +func (r *AccountsClientsService) Update(accountId int64, clientAccountId int64, client *Client) *AccountsClientsUpdateCall { + c := &AccountsClientsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + c.client = client + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsUpdateCall) Fields(s ...googleapi.Field) *AccountsClientsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsUpdateCall) Context(ctx context.Context) *AccountsClientsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.client) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.update" call. +// Exactly one of *Client or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Client.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *AccountsClientsCreateCall) Do(opts ...googleapi.CallOption) (*Client, error) { +func (c *AccountsClientsUpdateCall) Do(opts ...googleapi.CallOption) (*Client, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Client{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing client buyer.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + // "httpMethod": "PUT", + // "id": "adexchangebuyer2.accounts.clients.update", + // "parameterOrder": [ + // "accountId", + // "clientAccountId" + // ], + // "parameters": { + // "accountId": { + // "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to update a client for. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "Unique numerical account ID of the client to update. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + // "request": { + // "$ref": "Client" + // }, + // "response": { + // "$ref": "Client" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// method id "adexchangebuyer2.accounts.clients.invitations.create": + +type AccountsClientsInvitationsCreateCall struct { + s *Service + accountId int64 + clientAccountId int64 + clientuserinvitation *ClientUserInvitation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates and sends out an email invitation to access +// an Ad Exchange client buyer account. +func (r *AccountsClientsInvitationsService) Create(accountId int64, clientAccountId int64, clientuserinvitation *ClientUserInvitation) *AccountsClientsInvitationsCreateCall { + c := &AccountsClientsInvitationsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + c.clientuserinvitation = clientuserinvitation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsInvitationsCreateCall) Fields(s ...googleapi.Field) *AccountsClientsInvitationsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsInvitationsCreateCall) Context(ctx context.Context) *AccountsClientsInvitationsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsInvitationsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsInvitationsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.clientuserinvitation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.invitations.create" call. +// Exactly one of *ClientUserInvitation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ClientUserInvitation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AccountsClientsInvitationsCreateCall) Do(opts ...googleapi.CallOption) (*ClientUserInvitation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ClientUserInvitation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates and sends out an email invitation to access\nan Ad Exchange client buyer account.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + // "httpMethod": "POST", + // "id": "adexchangebuyer2.accounts.clients.invitations.create", + // "parameterOrder": [ + // "accountId", + // "clientAccountId" + // ], + // "parameters": { + // "accountId": { + // "description": "Numerical account ID of the client's sponsor buyer. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "Numerical account ID of the client buyer that the user\nshould be associated with. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + // "request": { + // "$ref": "ClientUserInvitation" + // }, + // "response": { + // "$ref": "ClientUserInvitation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// method id "adexchangebuyer2.accounts.clients.invitations.get": + +type AccountsClientsInvitationsGetCall struct { + s *Service + accountId int64 + clientAccountId int64 + invitationId int64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an existing client user invitation. +func (r *AccountsClientsInvitationsService) Get(accountId int64, clientAccountId int64, invitationId int64) *AccountsClientsInvitationsGetCall { + c := &AccountsClientsInvitationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + c.invitationId = invitationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsInvitationsGetCall) Fields(s ...googleapi.Field) *AccountsClientsInvitationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AccountsClientsInvitationsGetCall) IfNoneMatch(entityTag string) *AccountsClientsInvitationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsInvitationsGetCall) Context(ctx context.Context) *AccountsClientsInvitationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsInvitationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsInvitationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + "invitationId": strconv.FormatInt(c.invitationId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.invitations.get" call. +// Exactly one of *ClientUserInvitation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ClientUserInvitation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AccountsClientsInvitationsGetCall) Do(opts ...googleapi.CallOption) (*ClientUserInvitation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ClientUserInvitation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an existing client user invitation.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}", + // "httpMethod": "GET", + // "id": "adexchangebuyer2.accounts.clients.invitations.get", + // "parameterOrder": [ + // "accountId", + // "clientAccountId", + // "invitationId" + // ], + // "parameters": { + // "accountId": { + // "description": "Numerical account ID of the client's sponsor buyer. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "Numerical account ID of the client buyer that the user invitation\nto be retrieved is associated with. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "invitationId": { + // "description": "Numerical identifier of the user invitation to retrieve. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}", + // "response": { + // "$ref": "ClientUserInvitation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// method id "adexchangebuyer2.accounts.clients.invitations.list": + +type AccountsClientsInvitationsListCall struct { + s *Service + accountId int64 + clientAccountId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the client users invitations for a client +// with a given account ID. +func (r *AccountsClientsInvitationsService) List(accountId int64, clientAccountId string) *AccountsClientsInvitationsListCall { + c := &AccountsClientsInvitationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + return c +} + +// PageSize sets the optional parameter "pageSize": Requested page size. +// Server may return fewer clients than requested. +// If unspecified, server will pick an appropriate default. +func (c *AccountsClientsInvitationsListCall) PageSize(pageSize int64) *AccountsClientsInvitationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A token +// identifying a page of results the server should return. +// Typically, this is the value +// of +// ListClientUserInvitationsResponse.nextPageToken +// returned from the previous call to +// the +// clients.invitations.list +// method. +func (c *AccountsClientsInvitationsListCall) PageToken(pageToken string) *AccountsClientsInvitationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsInvitationsListCall) Fields(s ...googleapi.Field) *AccountsClientsInvitationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AccountsClientsInvitationsListCall) IfNoneMatch(entityTag string) *AccountsClientsInvitationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsInvitationsListCall) Context(ctx context.Context) *AccountsClientsInvitationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsInvitationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsInvitationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": c.clientAccountId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.invitations.list" call. +// Exactly one of *ListClientUserInvitationsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListClientUserInvitationsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *AccountsClientsInvitationsListCall) Do(opts ...googleapi.CallOption) (*ListClientUserInvitationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListClientUserInvitationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all the client users invitations for a client\nwith a given account ID.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + // "httpMethod": "GET", + // "id": "adexchangebuyer2.accounts.clients.invitations.list", + // "parameterOrder": [ + // "accountId", + // "clientAccountId" + // ], + // "parameters": { + // "accountId": { + // "description": "Numerical account ID of the client's sponsor buyer. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "Numerical account ID of the client buyer to list invitations for.\n(required)\nYou must either specify a string representation of a\nnumerical account identifier or the `-` character\nto list all the invitations for all the clients\nof a given sponsor buyer.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Requested page size. Server may return fewer clients than requested.\nIf unspecified, server will pick an appropriate default.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUserInvitationsResponse.nextPageToken\nreturned from the previous call to the\nclients.invitations.list\nmethod.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + // "response": { + // "$ref": "ListClientUserInvitationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AccountsClientsInvitationsListCall) Pages(ctx context.Context, f func(*ListClientUserInvitationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "adexchangebuyer2.accounts.clients.users.get": + +type AccountsClientsUsersGetCall struct { + s *Service + accountId int64 + clientAccountId int64 + userId int64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an existing client user. +func (r *AccountsClientsUsersService) Get(accountId int64, clientAccountId int64, userId int64) *AccountsClientsUsersGetCall { + c := &AccountsClientsUsersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + c.userId = userId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsUsersGetCall) Fields(s ...googleapi.Field) *AccountsClientsUsersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AccountsClientsUsersGetCall) IfNoneMatch(entityTag string) *AccountsClientsUsersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsUsersGetCall) Context(ctx context.Context) *AccountsClientsUsersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsUsersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsUsersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + "userId": strconv.FormatInt(c.userId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.users.get" call. +// Exactly one of *ClientUser or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ClientUser.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AccountsClientsUsersGetCall) Do(opts ...googleapi.CallOption) (*ClientUser, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ClientUser{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an existing client user.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + // "httpMethod": "GET", + // "id": "adexchangebuyer2.accounts.clients.users.get", + // "parameterOrder": [ + // "accountId", + // "clientAccountId", + // "userId" + // ], + // "parameters": { + // "accountId": { + // "description": "Numerical account ID of the client's sponsor buyer. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "Numerical account ID of the client buyer\nthat the user to be retrieved is associated with. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userId": { + // "description": "Numerical identifier of the user to retrieve. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + // "response": { + // "$ref": "ClientUser" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// method id "adexchangebuyer2.accounts.clients.users.list": + +type AccountsClientsUsersListCall struct { + s *Service + accountId int64 + clientAccountId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the known client users for a specified +// sponsor buyer account ID. +func (r *AccountsClientsUsersService) List(accountId int64, clientAccountId string) *AccountsClientsUsersListCall { + c := &AccountsClientsUsersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + return c +} + +// PageSize sets the optional parameter "pageSize": Requested page size. +// The server may return fewer clients than requested. +// If unspecified, the server will pick an appropriate default. +func (c *AccountsClientsUsersListCall) PageSize(pageSize int64) *AccountsClientsUsersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A token +// identifying a page of results the server should return. +// Typically, this is the value +// of +// ListClientUsersResponse.nextPageToken +// returned from the previous call to the +// accounts.clients.users.list method. +func (c *AccountsClientsUsersListCall) PageToken(pageToken string) *AccountsClientsUsersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsUsersListCall) Fields(s ...googleapi.Field) *AccountsClientsUsersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AccountsClientsUsersListCall) IfNoneMatch(entityTag string) *AccountsClientsUsersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsUsersListCall) Context(ctx context.Context) *AccountsClientsUsersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsUsersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsUsersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": c.clientAccountId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.users.list" call. +// Exactly one of *ListClientUsersResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListClientUsersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AccountsClientsUsersListCall) Do(opts ...googleapi.CallOption) (*ListClientUsersResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -582,7 +3244,7 @@ func (c *AccountsClientsCreateCall) Do(opts ...googleapi.CallOption) (*Client, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Client{ + ret := &ListClientUsersResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -594,28 +3256,223 @@ func (c *AccountsClientsCreateCall) Do(opts ...googleapi.CallOption) (*Client, e } return ret, nil // { - // "description": "Creates a new client buyer.", - // "flatPath": "v2beta1/accounts/{accountId}/clients", - // "httpMethod": "POST", - // "id": "adexchangebuyer2.accounts.clients.create", + // "description": "Lists all the known client users for a specified\nsponsor buyer account ID.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", + // "httpMethod": "GET", + // "id": "adexchangebuyer2.accounts.clients.users.list", // "parameterOrder": [ - // "accountId" + // "accountId", + // "clientAccountId" // ], // "parameters": { // "accountId": { - // "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to create a client for. (required)", + // "description": "Numerical account ID of the sponsor buyer of the client to list users for.\n(required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "The account ID of the client buyer to list users for. (required)\nYou must specify either a string representation of a\nnumerical account identifier or the `-` character\nto list all the client users for all the clients\nof a given sponsor buyer.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUsersResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.users.list method.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", + // "response": { + // "$ref": "ListClientUsersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/adexchange.buyer" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AccountsClientsUsersListCall) Pages(ctx context.Context, f func(*ListClientUsersResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "adexchangebuyer2.accounts.clients.users.update": + +type AccountsClientsUsersUpdateCall struct { + s *Service + accountId int64 + clientAccountId int64 + userId int64 + clientuser *ClientUser + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an existing client user. +// Only the user status can be changed on update. +func (r *AccountsClientsUsersService) Update(accountId int64, clientAccountId int64, userId int64, clientuser *ClientUser) *AccountsClientsUsersUpdateCall { + c := &AccountsClientsUsersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + c.clientAccountId = clientAccountId + c.userId = userId + c.clientuser = clientuser + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AccountsClientsUsersUpdateCall) Fields(s ...googleapi.Field) *AccountsClientsUsersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AccountsClientsUsersUpdateCall) Context(ctx context.Context) *AccountsClientsUsersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AccountsClientsUsersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AccountsClientsUsersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.clientuser) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "accountId": strconv.FormatInt(c.accountId, 10), + "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + "userId": strconv.FormatInt(c.userId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "adexchangebuyer2.accounts.clients.users.update" call. +// Exactly one of *ClientUser or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ClientUser.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AccountsClientsUsersUpdateCall) Do(opts ...googleapi.CallOption) (*ClientUser, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ClientUser{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing client user.\nOnly the user status can be changed on update.", + // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + // "httpMethod": "PUT", + // "id": "adexchangebuyer2.accounts.clients.users.update", + // "parameterOrder": [ + // "accountId", + // "clientAccountId", + // "userId" + // ], + // "parameters": { + // "accountId": { + // "description": "Numerical account ID of the client's sponsor buyer. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientAccountId": { + // "description": "Numerical account ID of the client buyer that the user to be retrieved\nis associated with. (required)", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userId": { + // "description": "Numerical identifier of the user to retrieve. (required)", // "format": "int64", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients", + // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", // "request": { - // "$ref": "Client" + // "$ref": "ClientUser" // }, // "response": { - // "$ref": "Client" + // "$ref": "ClientUser" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -624,91 +3481,95 @@ func (c *AccountsClientsCreateCall) Do(opts ...googleapi.CallOption) (*Client, e } -// method id "adexchangebuyer2.accounts.clients.get": +// method id "adexchangebuyer2.accounts.creatives.create": -type AccountsClientsGetCall struct { - s *Service - accountId int64 - clientAccountId int64 - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AccountsCreativesCreateCall struct { + s *Service + accountId string + creative *Creative + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a client buyer with a given client account ID. -func (r *AccountsClientsService) Get(accountId int64, clientAccountId int64) *AccountsClientsGetCall { - c := &AccountsClientsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates a creative. +func (r *AccountsCreativesService) Create(accountId string, creative *Creative) *AccountsCreativesCreateCall { + c := &AccountsCreativesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - c.clientAccountId = clientAccountId + c.creative = creative + return c +} + +// DuplicateIdMode sets the optional parameter "duplicateIdMode": +// Indicates if multiple creatives can share an ID or not. Default +// is +// NO_DUPLICATES (one ID per creative). +// +// Possible values: +// "NO_DUPLICATES" +// "FORCE_ENABLE_DUPLICATE_IDS" +func (c *AccountsCreativesCreateCall) DuplicateIdMode(duplicateIdMode string) *AccountsCreativesCreateCall { + c.urlParams_.Set("duplicateIdMode", duplicateIdMode) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsGetCall) Fields(s ...googleapi.Field) *AccountsClientsGetCall { +func (c *AccountsCreativesCreateCall) Fields(s ...googleapi.Field) *AccountsCreativesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AccountsClientsGetCall) IfNoneMatch(entityTag string) *AccountsClientsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsGetCall) Context(ctx context.Context) *AccountsClientsGetCall { +func (c *AccountsCreativesCreateCall) Context(ctx context.Context) *AccountsCreativesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsGetCall) Header() http.Header { +func (c *AccountsCreativesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + "accountId": c.accountId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.get" call. -// Exactly one of *Client or error will be non-nil. Any non-2xx status +// Do executes the "adexchangebuyer2.accounts.creatives.create" call. +// Exactly one of *Creative or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Client.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AccountsClientsGetCall) Do(opts ...googleapi.CallOption) (*Client, error) { +// *Creative.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AccountsCreativesCreateCall) Do(opts ...googleapi.CallOption) (*Creative, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -727,7 +3588,7 @@ func (c *AccountsClientsGetCall) Do(opts ...googleapi.CallOption) (*Client, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Client{ + ret := &Creative{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -739,33 +3600,36 @@ func (c *AccountsClientsGetCall) Do(opts ...googleapi.CallOption) (*Client, erro } return ret, nil // { - // "description": "Gets a client buyer with a given client account ID.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", - // "httpMethod": "GET", - // "id": "adexchangebuyer2.accounts.clients.get", + // "description": "Creates a creative.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives", + // "httpMethod": "POST", + // "id": "adexchangebuyer2.accounts.creatives.create", // "parameterOrder": [ - // "accountId", - // "clientAccountId" + // "accountId" // ], // "parameters": { // "accountId": { - // "description": "Numerical account ID of the client's sponsor buyer. (required)", - // "format": "int64", + // "description": "The account that this creative belongs to.\nCan be used to filter the response of the\ncreatives.list\nmethod.", // "location": "path", // "required": true, // "type": "string" // }, - // "clientAccountId": { - // "description": "Numerical account ID of the client buyer to retrieve. (required)", - // "format": "int64", - // "location": "path", - // "required": true, + // "duplicateIdMode": { + // "description": "Indicates if multiple creatives can share an ID or not. Default is\nNO_DUPLICATES (one ID per creative).", + // "enum": [ + // "NO_DUPLICATES", + // "FORCE_ENABLE_DUPLICATE_IDS" + // ], + // "location": "query", // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", + // "path": "v2beta1/accounts/{accountId}/creatives", + // "request": { + // "$ref": "Creative" + // }, // "response": { - // "$ref": "Client" + // "$ref": "Creative" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -774,48 +3638,30 @@ func (c *AccountsClientsGetCall) Do(opts ...googleapi.CallOption) (*Client, erro } -// method id "adexchangebuyer2.accounts.clients.list": +// method id "adexchangebuyer2.accounts.creatives.get": -type AccountsClientsListCall struct { +type AccountsCreativesGetCall struct { s *Service - accountId int64 + accountId string + creativeId string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists all the clients for the current sponsor buyer. -func (r *AccountsClientsService) List(accountId int64) *AccountsClientsListCall { - c := &AccountsClientsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Gets a creative. +func (r *AccountsCreativesService) Get(accountId string, creativeId string) *AccountsCreativesGetCall { + c := &AccountsCreativesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - return c -} - -// PageSize sets the optional parameter "pageSize": Requested page size. -// The server may return fewer clients than requested. -// If unspecified, the server will pick an appropriate default. -func (c *AccountsClientsListCall) PageSize(pageSize int64) *AccountsClientsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": A token -// identifying a page of results the server should return. -// Typically, this is the value -// of -// ListClientsResponse.nextPageToken -// returned from the previous call to the -// accounts.clients.list method. -func (c *AccountsClientsListCall) PageToken(pageToken string) *AccountsClientsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.creativeId = creativeId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsListCall) Fields(s ...googleapi.Field) *AccountsClientsListCall { +func (c *AccountsCreativesGetCall) Fields(s ...googleapi.Field) *AccountsCreativesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -825,7 +3671,7 @@ func (c *AccountsClientsListCall) Fields(s ...googleapi.Field) *AccountsClientsL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AccountsClientsListCall) IfNoneMatch(entityTag string) *AccountsClientsListCall { +func (c *AccountsCreativesGetCall) IfNoneMatch(entityTag string) *AccountsCreativesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -833,49 +3679,51 @@ func (c *AccountsClientsListCall) IfNoneMatch(entityTag string) *AccountsClients // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsListCall) Context(ctx context.Context) *AccountsClientsListCall { +func (c *AccountsCreativesGetCall) Context(ctx context.Context) *AccountsCreativesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsListCall) Header() http.Header { +func (c *AccountsCreativesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsListCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives/{creativeId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), + "accountId": c.accountId, + "creativeId": c.creativeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.list" call. -// Exactly one of *ListClientsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListClientsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AccountsClientsListCall) Do(opts ...googleapi.CallOption) (*ListClientsResponse, error) { +// Do executes the "adexchangebuyer2.accounts.creatives.get" call. +// Exactly one of *Creative or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Creative.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AccountsCreativesGetCall) Do(opts ...googleapi.CallOption) (*Creative, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -894,7 +3742,7 @@ func (c *AccountsClientsListCall) Do(opts ...googleapi.CallOption) (*ListClients if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListClientsResponse{ + ret := &Creative{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -906,36 +3754,31 @@ func (c *AccountsClientsListCall) Do(opts ...googleapi.CallOption) (*ListClients } return ret, nil // { - // "description": "Lists all the clients for the current sponsor buyer.", - // "flatPath": "v2beta1/accounts/{accountId}/clients", + // "description": "Gets a creative.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}", // "httpMethod": "GET", - // "id": "adexchangebuyer2.accounts.clients.list", + // "id": "adexchangebuyer2.accounts.creatives.get", // "parameterOrder": [ - // "accountId" + // "accountId", + // "creativeId" // ], // "parameters": { // "accountId": { - // "description": "Unique numerical account ID of the sponsor buyer to list the clients for.", - // "format": "int64", + // "description": "The account the creative belongs to.", // "location": "path", // "required": true, // "type": "string" // }, - // "pageSize": { - // "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientsResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.list method.", - // "location": "query", + // "creativeId": { + // "description": "The ID of the creative to retrieve.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients", + // "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}", // "response": { - // "$ref": "ListClientsResponse" + // "$ref": "Creative" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -944,105 +3787,137 @@ func (c *AccountsClientsListCall) Do(opts ...googleapi.CallOption) (*ListClients } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AccountsClientsListCall) Pages(ctx context.Context, f func(*ListClientsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "adexchangebuyer2.accounts.creatives.list": + +type AccountsCreativesListCall struct { + s *Service + accountId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "adexchangebuyer2.accounts.clients.update": +// List: Lists creatives. +func (r *AccountsCreativesService) List(accountId string) *AccountsCreativesListCall { + c := &AccountsCreativesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.accountId = accountId + return c +} -type AccountsClientsUpdateCall struct { - s *Service - accountId int64 - clientAccountId int64 - client *Client - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// PageSize sets the optional parameter "pageSize": Requested page size. +// The server may return fewer creatives than requested +// (due to timeout constraint) even if more are available via another +// call. +// If unspecified, server will pick an appropriate default. +// Acceptable values are 1 to 1000, inclusive. +func (c *AccountsCreativesListCall) PageSize(pageSize int64) *AccountsCreativesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c } -// Update: Updates an existing client buyer. -func (r *AccountsClientsService) Update(accountId int64, clientAccountId int64, client *Client) *AccountsClientsUpdateCall { - c := &AccountsClientsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.accountId = accountId - c.clientAccountId = clientAccountId - c.client = client +// PageToken sets the optional parameter "pageToken": A token +// identifying a page of results the server should return. +// Typically, this is the value +// of +// ListCreativesResponse.next_page_token +// returned from the previous call to 'ListCreatives' method. +func (c *AccountsCreativesListCall) PageToken(pageToken string) *AccountsCreativesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Query sets the optional parameter "query": An optional query string +// to filter creatives. If no filter is specified, +// all active creatives will be returned. +// Supported queries +// are: +//
        +//
      • accountId=account_id_string +//
      • creativeId=cre +// ative_id_string +//
      • dealsStatus: {approved, conditionally_approved, disapproved, +// not_checked} +//
      • openAuctionStatus: {approved, conditionally_approved, +// disapproved, +// not_checked} +//
      • attribute: {a numeric attribute from the list of +// attributes} +//
      • disapprovalReason: {a reason from +// DisapprovalReason +//
      +// Example: 'accountId=12345 AND (dealsStatus:disapproved AND +// disapprovalReason:unacceptable_content) OR attribute:47' +func (c *AccountsCreativesListCall) Query(query string) *AccountsCreativesListCall { + c.urlParams_.Set("query", query) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsUpdateCall) Fields(s ...googleapi.Field) *AccountsClientsUpdateCall { +func (c *AccountsCreativesListCall) Fields(s ...googleapi.Field) *AccountsCreativesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AccountsCreativesListCall) IfNoneMatch(entityTag string) *AccountsCreativesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsUpdateCall) Context(ctx context.Context) *AccountsClientsUpdateCall { +func (c *AccountsCreativesListCall) Context(ctx context.Context) *AccountsCreativesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsUpdateCall) Header() http.Header { +func (c *AccountsCreativesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.client) - if err != nil { - return nil, err + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + "accountId": c.accountId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.update" call. -// Exactly one of *Client or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Client.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AccountsClientsUpdateCall) Do(opts ...googleapi.CallOption) (*Client, error) { +// Do executes the "adexchangebuyer2.accounts.creatives.list" call. +// Exactly one of *ListCreativesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListCreativesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AccountsCreativesListCall) Do(opts ...googleapi.CallOption) (*ListCreativesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1061,7 +3936,7 @@ func (c *AccountsClientsUpdateCall) Do(opts ...googleapi.CallOption) (*Client, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Client{ + ret := &ListCreativesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1070,39 +3945,43 @@ func (c *AccountsClientsUpdateCall) Do(opts ...googleapi.CallOption) (*Client, e target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err - } - return ret, nil - // { - // "description": "Updates an existing client buyer.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", - // "httpMethod": "PUT", - // "id": "adexchangebuyer2.accounts.clients.update", + } + return ret, nil + // { + // "description": "Lists creatives.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives", + // "httpMethod": "GET", + // "id": "adexchangebuyer2.accounts.creatives.list", // "parameterOrder": [ - // "accountId", - // "clientAccountId" + // "accountId" // ], // "parameters": { // "accountId": { - // "description": "Unique numerical account ID for the buyer of which the client buyer\nis a customer; the sponsor buyer to update a client for. (required)", - // "format": "int64", + // "description": "The account to list the creatives from.\nSpecify \"-\" to list all creatives the current user has access to.", // "location": "path", // "required": true, // "type": "string" // }, - // "clientAccountId": { - // "description": "Unique numerical account ID of the client to update. (required)", - // "format": "int64", - // "location": "path", - // "required": true, + // "pageSize": { + // "description": "Requested page size. The server may return fewer creatives than requested\n(due to timeout constraint) even if more are available via another call.\nIf unspecified, server will pick an appropriate default.\nAcceptable values are 1 to 1000, inclusive.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListCreativesResponse.next_page_token\nreturned from the previous call to 'ListCreatives' method.", + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "An optional query string to filter creatives. If no filter is specified,\nall active creatives will be returned.\nSupported queries are:\n\u003cul\u003e\n\u003cli\u003eaccountId=\u003ci\u003eaccount_id_string\u003c/i\u003e\n\u003cli\u003ecreativeId=\u003ci\u003ecreative_id_string\u003c/i\u003e\n\u003cli\u003edealsStatus: {approved, conditionally_approved, disapproved,\n not_checked}\n\u003cli\u003eopenAuctionStatus: {approved, conditionally_approved, disapproved,\n not_checked}\n\u003cli\u003eattribute: {a numeric attribute from the list of attributes}\n\u003cli\u003edisapprovalReason: {a reason from DisapprovalReason\n\u003c/ul\u003e\nExample: 'accountId=12345 AND (dealsStatus:disapproved AND disapprovalReason:unacceptable_content) OR attribute:47'", + // "location": "query", // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}", - // "request": { - // "$ref": "Client" - // }, + // "path": "v2beta1/accounts/{accountId}/creatives", // "response": { - // "$ref": "Client" + // "$ref": "ListCreativesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -1111,32 +3990,54 @@ func (c *AccountsClientsUpdateCall) Do(opts ...googleapi.CallOption) (*Client, e } -// method id "adexchangebuyer2.accounts.clients.invitations.create": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AccountsCreativesListCall) Pages(ctx context.Context, f func(*ListCreativesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type AccountsClientsInvitationsCreateCall struct { - s *Service - accountId int64 - clientAccountId int64 - clientuserinvitation *ClientUserInvitation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "adexchangebuyer2.accounts.creatives.stopWatching": + +type AccountsCreativesStopWatchingCall struct { + s *Service + accountId string + creativeId string + stopwatchingcreativerequest *StopWatchingCreativeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates and sends out an email invitation to access -// an Ad Exchange client buyer account. -func (r *AccountsClientsInvitationsService) Create(accountId int64, clientAccountId int64, clientuserinvitation *ClientUserInvitation) *AccountsClientsInvitationsCreateCall { - c := &AccountsClientsInvitationsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StopWatching: Stops watching a creative. Will stop push notifications +// being sent to the +// topics when the creative changes status. +func (r *AccountsCreativesService) StopWatching(accountId string, creativeId string, stopwatchingcreativerequest *StopWatchingCreativeRequest) *AccountsCreativesStopWatchingCall { + c := &AccountsCreativesStopWatchingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - c.clientAccountId = clientAccountId - c.clientuserinvitation = clientuserinvitation + c.creativeId = creativeId + c.stopwatchingcreativerequest = stopwatchingcreativerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsInvitationsCreateCall) Fields(s ...googleapi.Field) *AccountsClientsInvitationsCreateCall { +func (c *AccountsCreativesStopWatchingCall) Fields(s ...googleapi.Field) *AccountsCreativesStopWatchingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -1144,52 +4045,53 @@ func (c *AccountsClientsInvitationsCreateCall) Fields(s ...googleapi.Field) *Acc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsInvitationsCreateCall) Context(ctx context.Context) *AccountsClientsInvitationsCreateCall { +func (c *AccountsCreativesStopWatchingCall) Context(ctx context.Context) *AccountsCreativesStopWatchingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsInvitationsCreateCall) Header() http.Header { +func (c *AccountsCreativesStopWatchingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsInvitationsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesStopWatchingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.clientuserinvitation) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.stopwatchingcreativerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives/{creativeId}:stopWatching") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), + "accountId": c.accountId, + "creativeId": c.creativeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.invitations.create" call. -// Exactly one of *ClientUserInvitation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ClientUserInvitation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AccountsClientsInvitationsCreateCall) Do(opts ...googleapi.CallOption) (*ClientUserInvitation, error) { +// Do executes the "adexchangebuyer2.accounts.creatives.stopWatching" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AccountsCreativesStopWatchingCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1208,7 +4110,7 @@ func (c *AccountsClientsInvitationsCreateCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ClientUserInvitation{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1220,36 +4122,34 @@ func (c *AccountsClientsInvitationsCreateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates and sends out an email invitation to access\nan Ad Exchange client buyer account.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + // "description": "Stops watching a creative. Will stop push notifications being sent to the\ntopics when the creative changes status.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}:stopWatching", // "httpMethod": "POST", - // "id": "adexchangebuyer2.accounts.clients.invitations.create", + // "id": "adexchangebuyer2.accounts.creatives.stopWatching", // "parameterOrder": [ // "accountId", - // "clientAccountId" + // "creativeId" // ], // "parameters": { // "accountId": { - // "description": "Numerical account ID of the client's sponsor buyer. (required)", - // "format": "int64", + // "description": "The account of the creative to stop notifications for.", // "location": "path", // "required": true, // "type": "string" // }, - // "clientAccountId": { - // "description": "Numerical account ID of the client buyer that the user\nshould be associated with. (required)", - // "format": "int64", + // "creativeId": { + // "description": "The creative ID of the creative to stop notifications for.\nSpecify \"-\" to specify stopping account level notifications.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + // "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}:stopWatching", // "request": { - // "$ref": "ClientUserInvitation" + // "$ref": "StopWatchingCreativeRequest" // }, // "response": { - // "$ref": "ClientUserInvitation" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -1258,94 +4158,85 @@ func (c *AccountsClientsInvitationsCreateCall) Do(opts ...googleapi.CallOption) } -// method id "adexchangebuyer2.accounts.clients.invitations.get": +// method id "adexchangebuyer2.accounts.creatives.update": -type AccountsClientsInvitationsGetCall struct { - s *Service - accountId int64 - clientAccountId int64 - invitationId int64 - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AccountsCreativesUpdateCall struct { + s *Service + accountId string + creativeId string + creative *Creative + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves an existing client user invitation. -func (r *AccountsClientsInvitationsService) Get(accountId int64, clientAccountId int64, invitationId int64) *AccountsClientsInvitationsGetCall { - c := &AccountsClientsInvitationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a creative. +func (r *AccountsCreativesService) Update(accountId string, creativeId string, creative *Creative) *AccountsCreativesUpdateCall { + c := &AccountsCreativesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - c.clientAccountId = clientAccountId - c.invitationId = invitationId + c.creativeId = creativeId + c.creative = creative return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsInvitationsGetCall) Fields(s ...googleapi.Field) *AccountsClientsInvitationsGetCall { +func (c *AccountsCreativesUpdateCall) Fields(s ...googleapi.Field) *AccountsCreativesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AccountsClientsInvitationsGetCall) IfNoneMatch(entityTag string) *AccountsClientsInvitationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsInvitationsGetCall) Context(ctx context.Context) *AccountsClientsInvitationsGetCall { +func (c *AccountsCreativesUpdateCall) Context(ctx context.Context) *AccountsCreativesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsInvitationsGetCall) Header() http.Header { +func (c *AccountsCreativesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsInvitationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives/{creativeId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), - "invitationId": strconv.FormatInt(c.invitationId, 10), + "accountId": c.accountId, + "creativeId": c.creativeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.invitations.get" call. -// Exactly one of *ClientUserInvitation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ClientUserInvitation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AccountsClientsInvitationsGetCall) Do(opts ...googleapi.CallOption) (*ClientUserInvitation, error) { +// Do executes the "adexchangebuyer2.accounts.creatives.update" call. +// Exactly one of *Creative or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Creative.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AccountsCreativesUpdateCall) Do(opts ...googleapi.CallOption) (*Creative, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1364,7 +4255,7 @@ func (c *AccountsClientsInvitationsGetCall) Do(opts ...googleapi.CallOption) (*C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ClientUserInvitation{ + ret := &Creative{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1376,41 +4267,34 @@ func (c *AccountsClientsInvitationsGetCall) Do(opts ...googleapi.CallOption) (*C } return ret, nil // { - // "description": "Retrieves an existing client user invitation.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}", - // "httpMethod": "GET", - // "id": "adexchangebuyer2.accounts.clients.invitations.get", + // "description": "Updates a creative.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}", + // "httpMethod": "PUT", + // "id": "adexchangebuyer2.accounts.creatives.update", // "parameterOrder": [ // "accountId", - // "clientAccountId", - // "invitationId" + // "creativeId" // ], // "parameters": { // "accountId": { - // "description": "Numerical account ID of the client's sponsor buyer. (required)", - // "format": "int64", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "clientAccountId": { - // "description": "Numerical account ID of the client buyer that the user invitation\nto be retrieved is associated with. (required)", - // "format": "int64", + // "description": "The account that this creative belongs to.\nCan be used to filter the response of the\ncreatives.list\nmethod.", // "location": "path", // "required": true, // "type": "string" // }, - // "invitationId": { - // "description": "Numerical identifier of the user invitation to retrieve. (required)", - // "format": "int64", + // "creativeId": { + // "description": "The buyer-defined creative ID of this creative.\nCan be used to filter the response of the\ncreatives.list\nmethod.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations/{invitationId}", + // "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}", + // "request": { + // "$ref": "Creative" + // }, // "response": { - // "$ref": "ClientUserInvitation" + // "$ref": "Creative" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -1419,115 +4303,87 @@ func (c *AccountsClientsInvitationsGetCall) Do(opts ...googleapi.CallOption) (*C } -// method id "adexchangebuyer2.accounts.clients.invitations.list": +// method id "adexchangebuyer2.accounts.creatives.watch": -type AccountsClientsInvitationsListCall struct { - s *Service - accountId int64 - clientAccountId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AccountsCreativesWatchCall struct { + s *Service + accountId string + creativeId string + watchcreativerequest *WatchCreativeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Lists all the client users invitations for a client -// with a given account ID. -func (r *AccountsClientsInvitationsService) List(accountId int64, clientAccountId string) *AccountsClientsInvitationsListCall { - c := &AccountsClientsInvitationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Watch: Watches a creative. Will result in push notifications being +// sent to the +// topic when the creative changes status. +func (r *AccountsCreativesService) Watch(accountId string, creativeId string, watchcreativerequest *WatchCreativeRequest) *AccountsCreativesWatchCall { + c := &AccountsCreativesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - c.clientAccountId = clientAccountId - return c -} - -// PageSize sets the optional parameter "pageSize": Requested page size. -// Server may return fewer clients than requested. -// If unspecified, server will pick an appropriate default. -func (c *AccountsClientsInvitationsListCall) PageSize(pageSize int64) *AccountsClientsInvitationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": A token -// identifying a page of results the server should return. -// Typically, this is the value -// of -// ListClientUserInvitationsResponse.nextPageToken -// returned from the previous call to -// the -// clients.invitations.list -// method. -func (c *AccountsClientsInvitationsListCall) PageToken(pageToken string) *AccountsClientsInvitationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.creativeId = creativeId + c.watchcreativerequest = watchcreativerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsInvitationsListCall) Fields(s ...googleapi.Field) *AccountsClientsInvitationsListCall { +func (c *AccountsCreativesWatchCall) Fields(s ...googleapi.Field) *AccountsCreativesWatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AccountsClientsInvitationsListCall) IfNoneMatch(entityTag string) *AccountsClientsInvitationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsInvitationsListCall) Context(ctx context.Context) *AccountsClientsInvitationsListCall { +func (c *AccountsCreativesWatchCall) Context(ctx context.Context) *AccountsCreativesWatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsInvitationsListCall) Header() http.Header { +func (c *AccountsCreativesWatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsInvitationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.watchcreativerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives/{creativeId}:watch") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": c.clientAccountId, + "accountId": c.accountId, + "creativeId": c.creativeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "adexchangebuyer2.accounts.clients.invitations.list" call. -// Exactly one of *ListClientUserInvitationsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListClientUserInvitationsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *AccountsClientsInvitationsListCall) Do(opts ...googleapi.CallOption) (*ListClientUserInvitationsResponse, error) { +} + +// Do executes the "adexchangebuyer2.accounts.creatives.watch" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AccountsCreativesWatchCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1546,7 +4402,7 @@ func (c *AccountsClientsInvitationsListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListClientUserInvitationsResponse{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1558,43 +4414,34 @@ func (c *AccountsClientsInvitationsListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Lists all the client users invitations for a client\nwith a given account ID.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", - // "httpMethod": "GET", - // "id": "adexchangebuyer2.accounts.clients.invitations.list", + // "description": "Watches a creative. Will result in push notifications being sent to the\ntopic when the creative changes status.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}:watch", + // "httpMethod": "POST", + // "id": "adexchangebuyer2.accounts.creatives.watch", // "parameterOrder": [ // "accountId", - // "clientAccountId" + // "creativeId" // ], // "parameters": { // "accountId": { - // "description": "Numerical account ID of the client's sponsor buyer. (required)", - // "format": "int64", + // "description": "The account of the creative to watch.", // "location": "path", // "required": true, // "type": "string" // }, - // "clientAccountId": { - // "description": "Numerical account ID of the client buyer to list invitations for.\n(required)\nYou must either specify a string representation of a\nnumerical account identifier or the `-` character\nto list all the invitations for all the clients\nof a given sponsor buyer.", + // "creativeId": { + // "description": "The creative ID to watch for status changes.\nSpecify \"-\" to watch all creatives under the above account.\nIf both creative-level and account-level notifications are\nsent, only a single notification will be sent to the\ncreative-level notification topic.", // "location": "path", // "required": true, // "type": "string" - // }, - // "pageSize": { - // "description": "Requested page size. Server may return fewer clients than requested.\nIf unspecified, server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUserInvitationsResponse.nextPageToken\nreturned from the previous call to the\nclients.invitations.list\nmethod.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/invitations", + // "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}:watch", + // "request": { + // "$ref": "WatchCreativeRequest" + // }, // "response": { - // "$ref": "ListClientUserInvitationsResponse" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -1603,115 +4450,85 @@ func (c *AccountsClientsInvitationsListCall) Do(opts ...googleapi.CallOption) (* } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AccountsClientsInvitationsListCall) Pages(ctx context.Context, f func(*ListClientUserInvitationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} +// method id "adexchangebuyer2.accounts.creatives.dealAssociations.add": -// method id "adexchangebuyer2.accounts.clients.users.get": - -type AccountsClientsUsersGetCall struct { - s *Service - accountId int64 - clientAccountId int64 - userId int64 - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AccountsCreativesDealAssociationsAddCall struct { + s *Service + accountId string + creativeId string + adddealassociationrequest *AddDealAssociationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves an existing client user. -func (r *AccountsClientsUsersService) Get(accountId int64, clientAccountId int64, userId int64) *AccountsClientsUsersGetCall { - c := &AccountsClientsUsersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Add: Associate an existing deal with a creative. +func (r *AccountsCreativesDealAssociationsService) Add(accountId string, creativeId string, adddealassociationrequest *AddDealAssociationRequest) *AccountsCreativesDealAssociationsAddCall { + c := &AccountsCreativesDealAssociationsAddCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - c.clientAccountId = clientAccountId - c.userId = userId + c.creativeId = creativeId + c.adddealassociationrequest = adddealassociationrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsUsersGetCall) Fields(s ...googleapi.Field) *AccountsClientsUsersGetCall { +func (c *AccountsCreativesDealAssociationsAddCall) Fields(s ...googleapi.Field) *AccountsCreativesDealAssociationsAddCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AccountsClientsUsersGetCall) IfNoneMatch(entityTag string) *AccountsClientsUsersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsUsersGetCall) Context(ctx context.Context) *AccountsClientsUsersGetCall { +func (c *AccountsCreativesDealAssociationsAddCall) Context(ctx context.Context) *AccountsCreativesDealAssociationsAddCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsUsersGetCall) Header() http.Header { +func (c *AccountsCreativesDealAssociationsAddCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsUsersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesDealAssociationsAddCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.adddealassociationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:add") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), - "userId": strconv.FormatInt(c.userId, 10), + "accountId": c.accountId, + "creativeId": c.creativeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.users.get" call. -// Exactly one of *ClientUser or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ClientUser.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AccountsClientsUsersGetCall) Do(opts ...googleapi.CallOption) (*ClientUser, error) { +// Do executes the "adexchangebuyer2.accounts.creatives.dealAssociations.add" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AccountsCreativesDealAssociationsAddCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1730,7 +4547,7 @@ func (c *AccountsClientsUsersGetCall) Do(opts ...googleapi.CallOption) (*ClientU if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ClientUser{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1742,41 +4559,34 @@ func (c *AccountsClientsUsersGetCall) Do(opts ...googleapi.CallOption) (*ClientU } return ret, nil // { - // "description": "Retrieves an existing client user.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", - // "httpMethod": "GET", - // "id": "adexchangebuyer2.accounts.clients.users.get", + // "description": "Associate an existing deal with a creative.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:add", + // "httpMethod": "POST", + // "id": "adexchangebuyer2.accounts.creatives.dealAssociations.add", // "parameterOrder": [ // "accountId", - // "clientAccountId", - // "userId" + // "creativeId" // ], // "parameters": { // "accountId": { - // "description": "Numerical account ID of the client's sponsor buyer. (required)", - // "format": "int64", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "clientAccountId": { - // "description": "Numerical account ID of the client buyer\nthat the user to be retrieved is associated with. (required)", - // "format": "int64", + // "description": "The account the creative belongs to.", // "location": "path", // "required": true, // "type": "string" // }, - // "userId": { - // "description": "Numerical identifier of the user to retrieve. (required)", - // "format": "int64", + // "creativeId": { + // "description": "The ID of the creative associated with the deal.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + // "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:add", + // "request": { + // "$ref": "AddDealAssociationRequest" + // }, // "response": { - // "$ref": "ClientUser" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -1785,31 +4595,30 @@ func (c *AccountsClientsUsersGetCall) Do(opts ...googleapi.CallOption) (*ClientU } -// method id "adexchangebuyer2.accounts.clients.users.list": +// method id "adexchangebuyer2.accounts.creatives.dealAssociations.list": -type AccountsClientsUsersListCall struct { - s *Service - accountId int64 - clientAccountId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AccountsCreativesDealAssociationsListCall struct { + s *Service + accountId string + creativeId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Lists all the known client users for a specified -// sponsor buyer account ID. -func (r *AccountsClientsUsersService) List(accountId int64, clientAccountId string) *AccountsClientsUsersListCall { - c := &AccountsClientsUsersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: List all creative-deal associations. +func (r *AccountsCreativesDealAssociationsService) List(accountId string, creativeId string) *AccountsCreativesDealAssociationsListCall { + c := &AccountsCreativesDealAssociationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - c.clientAccountId = clientAccountId + c.creativeId = creativeId return c } // PageSize sets the optional parameter "pageSize": Requested page size. -// The server may return fewer clients than requested. -// If unspecified, the server will pick an appropriate default. -func (c *AccountsClientsUsersListCall) PageSize(pageSize int64) *AccountsClientsUsersListCall { +// Server may return fewer associations than requested. +// If unspecified, server will pick an appropriate default. +func (c *AccountsCreativesDealAssociationsListCall) PageSize(pageSize int64) *AccountsCreativesDealAssociationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -1818,18 +4627,40 @@ func (c *AccountsClientsUsersListCall) PageSize(pageSize int64) *AccountsClients // identifying a page of results the server should return. // Typically, this is the value // of -// ListClientUsersResponse.nextPageToken -// returned from the previous call to the -// accounts.clients.users.list method. -func (c *AccountsClientsUsersListCall) PageToken(pageToken string) *AccountsClientsUsersListCall { +// ListDealAssociationsResponse.next_page_token +// returned from the previous call to 'ListDealAssociations' method. +func (c *AccountsCreativesDealAssociationsListCall) PageToken(pageToken string) *AccountsCreativesDealAssociationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } +// Query sets the optional parameter "query": An optional query string +// to filter deal associations. If no filter is +// specified, all associations will be returned. +// Supported queries +// are: +//
        +//
      • accountId=account_id_string +//
      • creativeId=cre +// ative_id_string +//
      • dealsId=deals_id_string +//
      • dealsStatus +// :{approved, conditionally_approved, disapproved, +// not_checked} +//
      • openAuctionStatus:{approved, conditionally_approved, +// disapproved, +// not_checked} +//
      +// Example: 'dealsId=12345 AND dealsStatus:disapproved' +func (c *AccountsCreativesDealAssociationsListCall) Query(query string) *AccountsCreativesDealAssociationsListCall { + c.urlParams_.Set("query", query) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsUsersListCall) Fields(s ...googleapi.Field) *AccountsClientsUsersListCall { +func (c *AccountsCreativesDealAssociationsListCall) Fields(s ...googleapi.Field) *AccountsCreativesDealAssociationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -1839,7 +4670,7 @@ func (c *AccountsClientsUsersListCall) Fields(s ...googleapi.Field) *AccountsCli // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AccountsClientsUsersListCall) IfNoneMatch(entityTag string) *AccountsClientsUsersListCall { +func (c *AccountsCreativesDealAssociationsListCall) IfNoneMatch(entityTag string) *AccountsCreativesDealAssociationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -1847,50 +4678,51 @@ func (c *AccountsClientsUsersListCall) IfNoneMatch(entityTag string) *AccountsCl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsUsersListCall) Context(ctx context.Context) *AccountsClientsUsersListCall { +func (c *AccountsCreativesDealAssociationsListCall) Context(ctx context.Context) *AccountsCreativesDealAssociationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsUsersListCall) Header() http.Header { +func (c *AccountsCreativesDealAssociationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsUsersListCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesDealAssociationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": c.clientAccountId, + "accountId": c.accountId, + "creativeId": c.creativeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.users.list" call. -// Exactly one of *ListClientUsersResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListClientUsersResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "adexchangebuyer2.accounts.creatives.dealAssociations.list" call. +// Exactly one of *ListDealAssociationsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListDealAssociationsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AccountsClientsUsersListCall) Do(opts ...googleapi.CallOption) (*ListClientUsersResponse, error) { +func (c *AccountsCreativesDealAssociationsListCall) Do(opts ...googleapi.CallOption) (*ListDealAssociationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1909,7 +4741,7 @@ func (c *AccountsClientsUsersListCall) Do(opts ...googleapi.CallOption) (*ListCl if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListClientUsersResponse{ + ret := &ListDealAssociationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1921,43 +4753,47 @@ func (c *AccountsClientsUsersListCall) Do(opts ...googleapi.CallOption) (*ListCl } return ret, nil // { - // "description": "Lists all the known client users for a specified\nsponsor buyer account ID.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", + // "description": "List all creative-deal associations.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations", // "httpMethod": "GET", - // "id": "adexchangebuyer2.accounts.clients.users.list", + // "id": "adexchangebuyer2.accounts.creatives.dealAssociations.list", // "parameterOrder": [ // "accountId", - // "clientAccountId" + // "creativeId" // ], // "parameters": { // "accountId": { - // "description": "Numerical account ID of the sponsor buyer of the client to list users for.\n(required)", - // "format": "int64", + // "description": "The account to list the associations from.\nSpecify \"-\" to list all creatives the current user has access to.", // "location": "path", // "required": true, // "type": "string" // }, - // "clientAccountId": { - // "description": "The account ID of the client buyer to list users for. (required)\nYou must specify either a string representation of a\nnumerical account identifier or the `-` character\nto list all the client users for all the clients\nof a given sponsor buyer.", + // "creativeId": { + // "description": "The creative ID to list the associations from.\nSpecify \"-\" to list all creatives under the above account.", // "location": "path", // "required": true, // "type": "string" // }, // "pageSize": { - // "description": "Requested page size. The server may return fewer clients than requested.\nIf unspecified, the server will pick an appropriate default.", + // "description": "Requested page size. Server may return fewer associations than requested.\nIf unspecified, server will pick an appropriate default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListClientUsersResponse.nextPageToken\nreturned from the previous call to the\naccounts.clients.users.list method.", + // "description": "A token identifying a page of results the server should return.\nTypically, this is the value of\nListDealAssociationsResponse.next_page_token\nreturned from the previous call to 'ListDealAssociations' method.", + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "An optional query string to filter deal associations. If no filter is\nspecified, all associations will be returned.\nSupported queries are:\n\u003cul\u003e\n\u003cli\u003eaccountId=\u003ci\u003eaccount_id_string\u003c/i\u003e\n\u003cli\u003ecreativeId=\u003ci\u003ecreative_id_string\u003c/i\u003e\n\u003cli\u003edealsId=\u003ci\u003edeals_id_string\u003c/i\u003e\n\u003cli\u003edealsStatus:{approved, conditionally_approved, disapproved,\n not_checked}\n\u003cli\u003eopenAuctionStatus:{approved, conditionally_approved, disapproved,\n not_checked}\n\u003c/ul\u003e\nExample: 'dealsId=12345 AND dealsStatus:disapproved'", // "location": "query", // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users", + // "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations", // "response": { - // "$ref": "ListClientUsersResponse" + // "$ref": "ListDealAssociationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" @@ -1969,7 +4805,7 @@ func (c *AccountsClientsUsersListCall) Do(opts ...googleapi.CallOption) (*ListCl // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AccountsClientsUsersListCall) Pages(ctx context.Context, f func(*ListClientUsersResponse) error) error { +func (c *AccountsCreativesDealAssociationsListCall) Pages(ctx context.Context, f func(*ListDealAssociationsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -1987,34 +4823,31 @@ func (c *AccountsClientsUsersListCall) Pages(ctx context.Context, f func(*ListCl } } -// method id "adexchangebuyer2.accounts.clients.users.update": +// method id "adexchangebuyer2.accounts.creatives.dealAssociations.remove": -type AccountsClientsUsersUpdateCall struct { - s *Service - accountId int64 - clientAccountId int64 - userId int64 - clientuser *ClientUser - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AccountsCreativesDealAssociationsRemoveCall struct { + s *Service + accountId string + creativeId string + removedealassociationrequest *RemoveDealAssociationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an existing client user. -// Only the user status can be changed on update. -func (r *AccountsClientsUsersService) Update(accountId int64, clientAccountId int64, userId int64, clientuser *ClientUser) *AccountsClientsUsersUpdateCall { - c := &AccountsClientsUsersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Remove: Remove the association between a deal and a creative. +func (r *AccountsCreativesDealAssociationsService) Remove(accountId string, creativeId string, removedealassociationrequest *RemoveDealAssociationRequest) *AccountsCreativesDealAssociationsRemoveCall { + c := &AccountsCreativesDealAssociationsRemoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.accountId = accountId - c.clientAccountId = clientAccountId - c.userId = userId - c.clientuser = clientuser + c.creativeId = creativeId + c.removedealassociationrequest = removedealassociationrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AccountsClientsUsersUpdateCall) Fields(s ...googleapi.Field) *AccountsClientsUsersUpdateCall { +func (c *AccountsCreativesDealAssociationsRemoveCall) Fields(s ...googleapi.Field) *AccountsCreativesDealAssociationsRemoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2022,53 +4855,53 @@ func (c *AccountsClientsUsersUpdateCall) Fields(s ...googleapi.Field) *AccountsC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AccountsClientsUsersUpdateCall) Context(ctx context.Context) *AccountsClientsUsersUpdateCall { +func (c *AccountsCreativesDealAssociationsRemoveCall) Context(ctx context.Context) *AccountsCreativesDealAssociationsRemoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AccountsClientsUsersUpdateCall) Header() http.Header { +func (c *AccountsCreativesDealAssociationsRemoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AccountsClientsUsersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *AccountsCreativesDealAssociationsRemoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.clientuser) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.removedealassociationrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:remove") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "accountId": strconv.FormatInt(c.accountId, 10), - "clientAccountId": strconv.FormatInt(c.clientAccountId, 10), - "userId": strconv.FormatInt(c.userId, 10), + "accountId": c.accountId, + "creativeId": c.creativeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "adexchangebuyer2.accounts.clients.users.update" call. -// Exactly one of *ClientUser or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ClientUser.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AccountsClientsUsersUpdateCall) Do(opts ...googleapi.CallOption) (*ClientUser, error) { +// Do executes the "adexchangebuyer2.accounts.creatives.dealAssociations.remove" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AccountsCreativesDealAssociationsRemoveCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2087,7 +4920,7 @@ func (c *AccountsClientsUsersUpdateCall) Do(opts ...googleapi.CallOption) (*Clie if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ClientUser{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2099,44 +4932,34 @@ func (c *AccountsClientsUsersUpdateCall) Do(opts ...googleapi.CallOption) (*Clie } return ret, nil // { - // "description": "Updates an existing client user.\nOnly the user status can be changed on update.", - // "flatPath": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", - // "httpMethod": "PUT", - // "id": "adexchangebuyer2.accounts.clients.users.update", + // "description": "Remove the association between a deal and a creative.", + // "flatPath": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:remove", + // "httpMethod": "POST", + // "id": "adexchangebuyer2.accounts.creatives.dealAssociations.remove", // "parameterOrder": [ // "accountId", - // "clientAccountId", - // "userId" + // "creativeId" // ], // "parameters": { // "accountId": { - // "description": "Numerical account ID of the client's sponsor buyer. (required)", - // "format": "int64", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "clientAccountId": { - // "description": "Numerical account ID of the client buyer that the user to be retrieved\nis associated with. (required)", - // "format": "int64", + // "description": "The account the creative belongs to.", // "location": "path", // "required": true, // "type": "string" // }, - // "userId": { - // "description": "Numerical identifier of the user to retrieve. (required)", - // "format": "int64", + // "creativeId": { + // "description": "The ID of the creative associated with the deal.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "v2beta1/accounts/{accountId}/clients/{clientAccountId}/users/{userId}", + // "path": "v2beta1/accounts/{accountId}/creatives/{creativeId}/dealAssociations:remove", // "request": { - // "$ref": "ClientUser" + // "$ref": "RemoveDealAssociationRequest" // }, // "response": { - // "$ref": "ClientUser" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/adexchange.buyer" diff --git a/vendor/google.golang.org/api/adexchangeseller/v1.1/adexchangeseller-gen.go b/vendor/google.golang.org/api/adexchangeseller/v1.1/adexchangeseller-gen.go index a3b2aee11..abc0b9096 100644 --- a/vendor/google.golang.org/api/adexchangeseller/v1.1/adexchangeseller-gen.go +++ b/vendor/google.golang.org/api/adexchangeseller/v1.1/adexchangeseller-gen.go @@ -72,9 +72,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -102,6 +103,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} return rs @@ -1175,6 +1180,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1327,6 +1333,7 @@ func (c *AdclientsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1489,6 +1496,7 @@ func (c *AdunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1659,6 +1667,7 @@ func (c *AdunitsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1856,6 +1865,7 @@ func (c *AdunitsCustomchannelsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2043,6 +2053,7 @@ func (c *AlertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2176,6 +2187,7 @@ func (c *CustomchannelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2340,6 +2352,7 @@ func (c *CustomchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2537,6 +2550,7 @@ func (c *CustomchannelsAdunitsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2721,6 +2735,7 @@ func (c *MetadataDimensionsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2844,6 +2859,7 @@ func (c *MetadataMetricsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2968,6 +2984,7 @@ func (c *PreferreddealsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3104,6 +3121,7 @@ func (c *PreferreddealsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3282,6 +3300,7 @@ func (c *ReportsGenerateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3516,6 +3535,7 @@ func (c *ReportsSavedGenerateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3690,6 +3710,7 @@ func (c *ReportsSavedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3867,6 +3888,7 @@ func (c *UrlchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/adexchangeseller/v1/adexchangeseller-gen.go b/vendor/google.golang.org/api/adexchangeseller/v1/adexchangeseller-gen.go index 10b943f76..e6827a91b 100644 --- a/vendor/google.golang.org/api/adexchangeseller/v1/adexchangeseller-gen.go +++ b/vendor/google.golang.org/api/adexchangeseller/v1/adexchangeseller-gen.go @@ -68,9 +68,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Adclients *AdclientsService @@ -90,6 +91,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAdclientsService(s *Service) *AdclientsService { rs := &AdclientsService{s: s} return rs @@ -809,6 +814,7 @@ func (c *AdclientsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -971,6 +977,7 @@ func (c *AdunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1141,6 +1148,7 @@ func (c *AdunitsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1338,6 +1346,7 @@ func (c *AdunitsCustomchannelsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1520,6 +1529,7 @@ func (c *CustomchannelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1684,6 +1694,7 @@ func (c *CustomchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1881,6 +1892,7 @@ func (c *CustomchannelsAdunitsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2120,6 +2132,7 @@ func (c *ReportsGenerateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2354,6 +2367,7 @@ func (c *ReportsSavedGenerateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2528,6 +2542,7 @@ func (c *ReportsSavedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2705,6 +2720,7 @@ func (c *UrlchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/adexchangeseller/v2.0/adexchangeseller-gen.go b/vendor/google.golang.org/api/adexchangeseller/v2.0/adexchangeseller-gen.go index 5b54c3abd..bc1cabf9f 100644 --- a/vendor/google.golang.org/api/adexchangeseller/v2.0/adexchangeseller-gen.go +++ b/vendor/google.golang.org/api/adexchangeseller/v2.0/adexchangeseller-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Adclients = NewAccountsAdclientsService(s) @@ -1085,6 +1090,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1237,6 +1243,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1413,6 +1420,7 @@ func (c *AccountsAdclientsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1594,6 +1602,7 @@ func (c *AccountsAlertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1741,6 +1750,7 @@ func (c *AccountsCustomchannelsGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1915,6 +1925,7 @@ func (c *AccountsCustomchannelsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2096,6 +2107,7 @@ func (c *AccountsMetadataDimensionsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2235,6 +2247,7 @@ func (c *AccountsMetadataMetricsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2375,6 +2388,7 @@ func (c *AccountsPreferreddealsGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2521,6 +2535,7 @@ func (c *AccountsPreferreddealsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2715,6 +2730,7 @@ func (c *AccountsReportsGenerateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2961,6 +2977,7 @@ func (c *AccountsReportsSavedGenerateCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3145,6 +3162,7 @@ func (c *AccountsReportsSavedListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3336,6 +3354,7 @@ func (c *AccountsUrlchannelsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/admin/datatransfer/v1/admin-gen.go b/vendor/google.golang.org/api/admin/datatransfer/v1/admin-gen.go index fc783ea17..9cfda8537 100644 --- a/vendor/google.golang.org/api/admin/datatransfer/v1/admin-gen.go +++ b/vendor/google.golang.org/api/admin/datatransfer/v1/admin-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Applications *ApplicationsService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewApplicationsService(s *Service) *ApplicationsService { rs := &ApplicationsService{s: s} return rs @@ -423,6 +428,7 @@ func (c *ApplicationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -582,6 +588,7 @@ func (c *ApplicationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -747,6 +754,7 @@ func (c *TransfersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -874,6 +882,7 @@ func (c *TransfersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.datatransfer) if err != nil { @@ -1042,6 +1051,7 @@ func (c *TransfersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go b/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go index be71a182e..c25c96f90 100644 --- a/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go +++ b/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go @@ -157,9 +157,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Asps *AspsService @@ -207,6 +208,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAspsService(s *Service) *AspsService { rs := &AspsService{s: s} return rs @@ -3456,6 +3461,7 @@ func (c *AspsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/{userKey}/asps/{codeId}") @@ -3573,6 +3579,7 @@ func (c *AspsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3719,6 +3726,7 @@ func (c *AspsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3845,6 +3853,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -3942,6 +3951,7 @@ func (c *ChromeosdevicesActionCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.chromeosdeviceaction) if err != nil { @@ -4078,6 +4088,7 @@ func (c *ChromeosdevicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4300,6 +4311,7 @@ func (c *ChromeosdevicesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4530,6 +4542,7 @@ func (c *ChromeosdevicesPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.chromeosdevice) if err != nil { @@ -4698,6 +4711,7 @@ func (c *ChromeosdevicesUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.chromeosdevice) if err != nil { @@ -4861,6 +4875,7 @@ func (c *CustomersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4990,6 +5005,7 @@ func (c *CustomersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customer) if err != nil { @@ -5123,6 +5139,7 @@ func (c *CustomersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customer) if err != nil { @@ -5256,6 +5273,7 @@ func (c *DomainAliasesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customer}/domainaliases/{domainAliasName}") @@ -5372,6 +5390,7 @@ func (c *DomainAliasesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5509,6 +5528,7 @@ func (c *DomainAliasesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.domainalias) if err != nil { @@ -5658,6 +5678,7 @@ func (c *DomainAliasesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5792,6 +5813,7 @@ func (c *DomainsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customer}/domains/{domainName}") @@ -5908,6 +5930,7 @@ func (c *DomainsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6045,6 +6068,7 @@ func (c *DomainsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.domains) if err != nil { @@ -6187,6 +6211,7 @@ func (c *DomainsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6314,6 +6339,7 @@ func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "groups/{groupKey}") @@ -6420,6 +6446,7 @@ func (c *GroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6547,6 +6574,7 @@ func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -6711,6 +6739,7 @@ func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6876,6 +6905,7 @@ func (c *GroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -7009,6 +7039,7 @@ func (c *GroupsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -7142,6 +7173,7 @@ func (c *GroupsAliasesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "groups/{groupKey}/aliases/{alias}") @@ -7247,6 +7279,7 @@ func (c *GroupsAliasesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.alias) if err != nil { @@ -7389,6 +7422,7 @@ func (c *GroupsAliasesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7519,6 +7553,7 @@ func (c *MembersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "groups/{groupKey}/members/{memberKey}") @@ -7636,6 +7671,7 @@ func (c *MembersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7775,6 +7811,7 @@ func (c *MembersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.member) if err != nil { @@ -7939,6 +7976,7 @@ func (c *MembersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8111,6 +8149,7 @@ func (c *MembersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.member) if err != nil { @@ -8255,6 +8294,7 @@ func (c *MembersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.member) if err != nil { @@ -8399,6 +8439,7 @@ func (c *MobiledevicesActionCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.mobiledeviceaction) if err != nil { @@ -8513,6 +8554,7 @@ func (c *MobiledevicesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customerId}/devices/mobile/{resourceId}") @@ -8641,6 +8683,7 @@ func (c *MobiledevicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8864,6 +8907,7 @@ func (c *MobiledevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9083,6 +9127,7 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customer}/notifications/{notificationId}") @@ -9199,6 +9244,7 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9366,6 +9412,7 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9533,6 +9580,7 @@ func (c *NotificationsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) if err != nil { @@ -9676,6 +9724,7 @@ func (c *NotificationsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) if err != nil { @@ -9817,6 +9866,7 @@ func (c *OrgunitsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customerId}/orgunits{/orgUnitPath*}") @@ -9934,6 +9984,7 @@ func (c *OrgunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10072,6 +10123,7 @@ func (c *OrgunitsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.orgunit) if err != nil { @@ -10232,6 +10284,7 @@ func (c *OrgunitsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10383,6 +10436,7 @@ func (c *OrgunitsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.orgunit) if err != nil { @@ -10527,6 +10581,7 @@ func (c *OrgunitsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.orgunit) if err != nil { @@ -10678,6 +10733,7 @@ func (c *PrivilegesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10807,6 +10863,7 @@ func (c *ResourcesCalendarsDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customer}/resources/calendars/{calendarResourceId}") @@ -10923,6 +10980,7 @@ func (c *ResourcesCalendarsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11060,6 +11118,7 @@ func (c *ResourcesCalendarsInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarresource) if err != nil { @@ -11216,6 +11275,7 @@ func (c *ResourcesCalendarsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11382,6 +11442,7 @@ func (c *ResourcesCalendarsPatchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarresource) if err != nil { @@ -11525,6 +11586,7 @@ func (c *ResourcesCalendarsUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarresource) if err != nil { @@ -11666,6 +11728,7 @@ func (c *RoleAssignmentsDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customer}/roleassignments/{roleAssignmentId}") @@ -11782,6 +11845,7 @@ func (c *RoleAssignmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11919,6 +11983,7 @@ func (c *RoleAssignmentsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.roleassignment) if err != nil { @@ -12091,6 +12156,7 @@ func (c *RoleAssignmentsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12264,6 +12330,7 @@ func (c *RolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customer}/roles/{roleId}") @@ -12380,6 +12447,7 @@ func (c *RolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12517,6 +12585,7 @@ func (c *RolesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) if err != nil { @@ -12673,6 +12742,7 @@ func (c *RolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12838,6 +12908,7 @@ func (c *RolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) if err != nil { @@ -12981,6 +13052,7 @@ func (c *RolesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) if err != nil { @@ -13122,6 +13194,7 @@ func (c *SchemasDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customer/{customerId}/schemas/{schemaKey}") @@ -13238,6 +13311,7 @@ func (c *SchemasGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13375,6 +13449,7 @@ func (c *SchemasInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.schema) if err != nil { @@ -13517,6 +13592,7 @@ func (c *SchemasListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13648,6 +13724,7 @@ func (c *SchemasPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.schema) if err != nil { @@ -13791,6 +13868,7 @@ func (c *SchemasUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.schema) if err != nil { @@ -13932,6 +14010,7 @@ func (c *TokensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/{userKey}/tokens/{clientId}") @@ -14048,6 +14127,7 @@ func (c *TokensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14194,6 +14274,7 @@ func (c *TokensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14320,6 +14401,7 @@ func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/{userKey}") @@ -14458,6 +14540,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14620,6 +14703,7 @@ func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -14861,6 +14945,7 @@ func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15115,6 +15200,7 @@ func (c *UsersMakeAdminCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.usermakeadmin) if err != nil { @@ -15220,6 +15306,7 @@ func (c *UsersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -15353,6 +15440,7 @@ func (c *UsersUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userundelete) if err != nil { @@ -15458,6 +15546,7 @@ func (c *UsersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -15703,6 +15792,7 @@ func (c *UsersWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -15942,6 +16032,7 @@ func (c *UsersAliasesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/{userKey}/aliases/{alias}") @@ -16048,6 +16139,7 @@ func (c *UsersAliasesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.alias) if err != nil { @@ -16202,6 +16294,7 @@ func (c *UsersAliasesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16358,6 +16451,7 @@ func (c *UsersAliasesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -16507,6 +16601,7 @@ func (c *UsersPhotosDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/{userKey}/photos/thumbnail") @@ -16613,6 +16708,7 @@ func (c *UsersPhotosGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16743,6 +16839,7 @@ func (c *UsersPhotosPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userphoto) if err != nil { @@ -16876,6 +16973,7 @@ func (c *UsersPhotosUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userphoto) if err != nil { @@ -17007,6 +17105,7 @@ func (c *VerificationCodesGenerateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/{userKey}/verificationCodes/generate") @@ -17103,6 +17202,7 @@ func (c *VerificationCodesInvalidateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/{userKey}/verificationCodes/invalidate") @@ -17210,6 +17310,7 @@ func (c *VerificationCodesListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/admin/reports/v1/admin-api.json b/vendor/google.golang.org/api/admin/reports/v1/admin-api.json index 343d12841..a95af13a2 100644 --- a/vendor/google.golang.org/api/admin/reports/v1/admin-api.json +++ b/vendor/google.golang.org/api/admin/reports/v1/admin-api.json @@ -1,6 +1,6 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/QGy4Wv90L7XHcKxA0VDj0fNkDc0\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/DCxy1_Jea5MRziGoQUK4JAZOHiE\"", "discoveryVersion": "v1", "id": "admin:reports_v1", "name": "admin", @@ -72,10 +72,10 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/admin.reports.audit.readonly": { - "description": "View audit reports of Google Apps for your domain" + "description": "View audit reports for your G Suite domain" }, "https://www.googleapis.com/auth/admin.reports.usage.readonly": { - "description": "View usage reports of Google Apps for your domain" + "description": "View usage reports for your G Suite domain" } } } diff --git a/vendor/google.golang.org/api/admin/reports/v1/admin-gen.go b/vendor/google.golang.org/api/admin/reports/v1/admin-gen.go index 7a4e58495..e279c38e2 100644 --- a/vendor/google.golang.org/api/admin/reports/v1/admin-gen.go +++ b/vendor/google.golang.org/api/admin/reports/v1/admin-gen.go @@ -47,10 +47,10 @@ const basePath = "https://www.googleapis.com/admin/reports/v1/" // OAuth2 scopes used by this API. const ( - // View audit reports of Google Apps for your domain + // View audit reports for your G Suite domain AdminReportsAuditReadonlyScope = "https://www.googleapis.com/auth/admin.reports.audit.readonly" - // View usage reports of Google Apps for your domain + // View usage reports for your G Suite domain AdminReportsUsageReadonlyScope = "https://www.googleapis.com/auth/admin.reports.usage.readonly" ) @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Activities *ActivitiesService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewActivitiesService(s *Service) *ActivitiesService { rs := &ActivitiesService{s: s} return rs @@ -775,6 +780,7 @@ func (c *ActivitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1041,6 +1047,7 @@ func (c *ActivitiesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -1230,6 +1237,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -1354,6 +1362,7 @@ func (c *CustomerUsageReportsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1569,6 +1578,7 @@ func (c *UserUsageReportGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/adsense/v1.3/adsense-gen.go b/vendor/google.golang.org/api/adsense/v1.3/adsense-gen.go index 4ec72e147..6fab5619a 100644 --- a/vendor/google.golang.org/api/adsense/v1.3/adsense-gen.go +++ b/vendor/google.golang.org/api/adsense/v1.3/adsense-gen.go @@ -72,9 +72,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -102,6 +103,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Adclients = NewAccountsAdclientsService(s) @@ -1644,6 +1649,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1801,6 +1807,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1977,6 +1984,7 @@ func (c *AccountsAdclientsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2154,6 +2162,7 @@ func (c *AccountsAdunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2312,6 +2321,7 @@ func (c *AccountsAdunitsGetAdCodeCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2492,6 +2502,7 @@ func (c *AccountsAdunitsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2699,6 +2710,7 @@ func (c *AccountsAdunitsCustomchannelsListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2896,6 +2908,7 @@ func (c *AccountsAlertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3044,6 +3057,7 @@ func (c *AccountsCustomchannelsGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3218,6 +3232,7 @@ func (c *AccountsCustomchannelsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3425,6 +3440,7 @@ func (c *AccountsCustomchannelsAdunitsListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3691,6 +3707,7 @@ func (c *AccountsReportsGenerateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3948,6 +3965,7 @@ func (c *AccountsReportsSavedGenerateCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4132,6 +4150,7 @@ func (c *AccountsReportsSavedListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4306,6 +4325,7 @@ func (c *AccountsSavedadstylesGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4469,6 +4489,7 @@ func (c *AccountsSavedadstylesListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4660,6 +4681,7 @@ func (c *AccountsUrlchannelsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4854,6 +4876,7 @@ func (c *AdclientsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5016,6 +5039,7 @@ func (c *AdunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5164,6 +5188,7 @@ func (c *AdunitsGetAdCodeCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5334,6 +5359,7 @@ func (c *AdunitsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5531,6 +5557,7 @@ func (c *AdunitsCustomchannelsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5718,6 +5745,7 @@ func (c *AlertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5851,6 +5879,7 @@ func (c *CustomchannelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6015,6 +6044,7 @@ func (c *CustomchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6212,6 +6242,7 @@ func (c *CustomchannelsAdunitsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6396,6 +6427,7 @@ func (c *MetadataDimensionsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6519,6 +6551,7 @@ func (c *MetadataMetricsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6721,6 +6754,7 @@ func (c *ReportsGenerateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6972,6 +7006,7 @@ func (c *ReportsSavedGenerateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7146,6 +7181,7 @@ func (c *ReportsSavedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7306,6 +7342,7 @@ func (c *SavedadstylesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7459,6 +7496,7 @@ func (c *SavedadstylesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7636,6 +7674,7 @@ func (c *UrlchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/adsense/v1.4/adsense-gen.go b/vendor/google.golang.org/api/adsense/v1.4/adsense-gen.go index e69196943..30b49b689 100644 --- a/vendor/google.golang.org/api/adsense/v1.4/adsense-gen.go +++ b/vendor/google.golang.org/api/adsense/v1.4/adsense-gen.go @@ -73,9 +73,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -105,6 +106,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Adclients = NewAccountsAdclientsService(s) @@ -1758,6 +1763,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1915,6 +1921,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2091,6 +2098,7 @@ func (c *AccountsAdclientsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2268,6 +2276,7 @@ func (c *AccountsAdunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2426,6 +2435,7 @@ func (c *AccountsAdunitsGetAdCodeCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2606,6 +2616,7 @@ func (c *AccountsAdunitsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2813,6 +2824,7 @@ func (c *AccountsAdunitsCustomchannelsListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2993,6 +3005,7 @@ func (c *AccountsAlertsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/alerts/{alertId}") @@ -3116,6 +3129,7 @@ func (c *AccountsAlertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3264,6 +3278,7 @@ func (c *AccountsCustomchannelsGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3438,6 +3453,7 @@ func (c *AccountsCustomchannelsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3645,6 +3661,7 @@ func (c *AccountsCustomchannelsAdunitsListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3838,6 +3855,7 @@ func (c *AccountsPaymentsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4049,6 +4067,7 @@ func (c *AccountsReportsGenerateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4306,6 +4325,7 @@ func (c *AccountsReportsSavedGenerateCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4490,6 +4510,7 @@ func (c *AccountsReportsSavedListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4664,6 +4685,7 @@ func (c *AccountsSavedadstylesGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4827,6 +4849,7 @@ func (c *AccountsSavedadstylesListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5018,6 +5041,7 @@ func (c *AccountsUrlchannelsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5212,6 +5236,7 @@ func (c *AdclientsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5374,6 +5399,7 @@ func (c *AdunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5522,6 +5548,7 @@ func (c *AdunitsGetAdCodeCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5692,6 +5719,7 @@ func (c *AdunitsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5889,6 +5917,7 @@ func (c *AdunitsCustomchannelsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6059,6 +6088,7 @@ func (c *AlertsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "alerts/{alertId}") @@ -6172,6 +6202,7 @@ func (c *AlertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6305,6 +6336,7 @@ func (c *CustomchannelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6469,6 +6501,7 @@ func (c *CustomchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6666,6 +6699,7 @@ func (c *CustomchannelsAdunitsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6850,6 +6884,7 @@ func (c *MetadataDimensionsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6973,6 +7008,7 @@ func (c *MetadataMetricsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7095,6 +7131,7 @@ func (c *PaymentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7297,6 +7334,7 @@ func (c *ReportsGenerateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7548,6 +7586,7 @@ func (c *ReportsSavedGenerateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7722,6 +7761,7 @@ func (c *ReportsSavedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7882,6 +7922,7 @@ func (c *SavedadstylesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8035,6 +8076,7 @@ func (c *SavedadstylesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8212,6 +8254,7 @@ func (c *UrlchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/adsensehost/v4.1/adsensehost-gen.go b/vendor/google.golang.org/api/adsensehost/v4.1/adsensehost-gen.go index 647b5061f..94c21eed3 100644 --- a/vendor/google.golang.org/api/adsensehost/v4.1/adsensehost-gen.go +++ b/vendor/google.golang.org/api/adsensehost/v4.1/adsensehost-gen.go @@ -66,9 +66,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -90,6 +91,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Adclients = NewAccountsAdclientsService(s) @@ -1099,6 +1104,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1236,6 +1242,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1374,6 +1381,7 @@ func (c *AccountsAdclientsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1535,6 +1543,7 @@ func (c *AccountsAdclientsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1700,6 +1709,7 @@ func (c *AccountsAdunitsDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/adclients/{adClientId}/adunits/{adUnitId}") @@ -1854,6 +1864,7 @@ func (c *AccountsAdunitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2019,6 +2030,7 @@ func (c *AccountsAdunitsGetAdCodeCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2172,6 +2184,7 @@ func (c *AccountsAdunitsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.adunit) if err != nil { @@ -2347,6 +2360,7 @@ func (c *AccountsAdunitsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2526,6 +2540,7 @@ func (c *AccountsAdunitsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.adunit) if err != nil { @@ -2677,6 +2692,7 @@ func (c *AccountsAdunitsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.adunit) if err != nil { @@ -2883,6 +2899,7 @@ func (c *AccountsReportsGenerateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3087,6 +3104,7 @@ func (c *AdclientsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3238,6 +3256,7 @@ func (c *AdclientsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3412,6 +3431,7 @@ func (c *AssociationsessionsStartCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3578,6 +3598,7 @@ func (c *AssociationsessionsVerifyCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3704,6 +3725,7 @@ func (c *CustomchannelsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "adclients/{adClientId}/customchannels/{customChannelId}") @@ -3848,6 +3870,7 @@ func (c *CustomchannelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3984,6 +4007,7 @@ func (c *CustomchannelsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customchannel) if err != nil { @@ -4143,6 +4167,7 @@ func (c *CustomchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4307,6 +4332,7 @@ func (c *CustomchannelsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customchannel) if err != nil { @@ -4447,6 +4473,7 @@ func (c *CustomchannelsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customchannel) if err != nil { @@ -4643,6 +4670,7 @@ func (c *ReportsGenerateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4827,6 +4855,7 @@ func (c *UrlchannelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "adclients/{adClientId}/urlchannels/{urlChannelId}") @@ -4960,6 +4989,7 @@ func (c *UrlchannelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlchannel) if err != nil { @@ -5118,6 +5148,7 @@ func (c *UrlchannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/analytics/v2.4/analytics-gen.go b/vendor/google.golang.org/api/analytics/v2.4/analytics-gen.go index 00d1d85de..3c870a756 100644 --- a/vendor/google.golang.org/api/analytics/v2.4/analytics-gen.go +++ b/vendor/google.golang.org/api/analytics/v2.4/analytics-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Data *DataService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDataService(s *Service) *DataService { rs := &DataService{s: s} return rs @@ -265,6 +270,7 @@ func (c *DataGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -446,6 +452,7 @@ func (c *ManagementAccountsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -576,6 +583,7 @@ func (c *ManagementGoalsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -732,6 +740,7 @@ func (c *ManagementProfilesListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -876,6 +885,7 @@ func (c *ManagementSegmentsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1002,6 +1012,7 @@ func (c *ManagementWebpropertiesListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/analytics/v3/analytics-gen.go b/vendor/google.golang.org/api/analytics/v3/analytics-gen.go index 732cc6527..4c83cbb75 100644 --- a/vendor/google.golang.org/api/analytics/v3/analytics-gen.go +++ b/vendor/google.golang.org/api/analytics/v3/analytics-gen.go @@ -80,9 +80,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Data *DataService @@ -100,6 +101,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDataService(s *Service) *DataService { rs := &DataService{s: s} rs.Ga = NewDataGaService(s) @@ -5668,6 +5673,7 @@ func (c *DataGaGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5952,6 +5958,7 @@ func (c *DataMcfGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6188,6 +6195,7 @@ func (c *DataRealtimeGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6371,6 +6379,7 @@ func (c *ManagementAccountSummariesListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6501,6 +6510,7 @@ func (c *ManagementAccountUserLinksDeleteCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/entityUserLinks/{linkId}") @@ -6606,6 +6616,7 @@ func (c *ManagementAccountUserLinksInsertCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityuserlink) if err != nil { @@ -6763,6 +6774,7 @@ func (c *ManagementAccountUserLinksListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6908,6 +6920,7 @@ func (c *ManagementAccountUserLinksUpdateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityuserlink) if err != nil { @@ -7071,6 +7084,7 @@ func (c *ManagementAccountsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7228,6 +7242,7 @@ func (c *ManagementCustomDataSourcesListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7395,6 +7410,7 @@ func (c *ManagementCustomDimensionsGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7542,6 +7558,7 @@ func (c *ManagementCustomDimensionsInsertCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customdimension) if err != nil { @@ -7709,6 +7726,7 @@ func (c *ManagementCustomDimensionsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7873,6 +7891,7 @@ func (c *ManagementCustomDimensionsPatchCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customdimension) if err != nil { @@ -8041,6 +8060,7 @@ func (c *ManagementCustomDimensionsUpdateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customdimension) if err != nil { @@ -8209,6 +8229,7 @@ func (c *ManagementCustomMetricsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8356,6 +8377,7 @@ func (c *ManagementCustomMetricsInsertCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.custommetric) if err != nil { @@ -8523,6 +8545,7 @@ func (c *ManagementCustomMetricsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8687,6 +8710,7 @@ func (c *ManagementCustomMetricsPatchCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.custommetric) if err != nil { @@ -8855,6 +8879,7 @@ func (c *ManagementCustomMetricsUpdateCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.custommetric) if err != nil { @@ -9014,6 +9039,7 @@ func (c *ManagementExperimentsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/profiles/{profileId}/experiments/{experimentId}") @@ -9151,6 +9177,7 @@ func (c *ManagementExperimentsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9309,6 +9336,7 @@ func (c *ManagementExperimentsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.experiment) if err != nil { @@ -9487,6 +9515,7 @@ func (c *ManagementExperimentsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9656,6 +9685,7 @@ func (c *ManagementExperimentsPatchCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.experiment) if err != nil { @@ -9820,6 +9850,7 @@ func (c *ManagementExperimentsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.experiment) if err != nil { @@ -9978,6 +10009,7 @@ func (c *ManagementFiltersDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/filters/{filterId}") @@ -10122,6 +10154,7 @@ func (c *ManagementFiltersGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10259,6 +10292,7 @@ func (c *ManagementFiltersInsertCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.filter) if err != nil { @@ -10416,6 +10450,7 @@ func (c *ManagementFiltersListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10562,6 +10597,7 @@ func (c *ManagementFiltersPatchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.filter) if err != nil { @@ -10705,6 +10741,7 @@ func (c *ManagementFiltersUpdateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.filter) if err != nil { @@ -10861,6 +10898,7 @@ func (c *ManagementGoalsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11018,6 +11056,7 @@ func (c *ManagementGoalsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.goal) if err != nil { @@ -11195,6 +11234,7 @@ func (c *ManagementGoalsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11361,6 +11401,7 @@ func (c *ManagementGoalsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.goal) if err != nil { @@ -11524,6 +11565,7 @@ func (c *ManagementGoalsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.goal) if err != nil { @@ -11685,6 +11727,7 @@ func (c *ManagementProfileFilterLinksDeleteCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/profiles/{profileId}/profileFilterLinks/{linkId}") @@ -11825,6 +11868,7 @@ func (c *ManagementProfileFilterLinksGetCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11986,6 +12030,7 @@ func (c *ManagementProfileFilterLinksInsertCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.profilefilterlink) if err != nil { @@ -12166,6 +12211,7 @@ func (c *ManagementProfileFilterLinksListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12332,6 +12378,7 @@ func (c *ManagementProfileFilterLinksPatchCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.profilefilterlink) if err != nil { @@ -12499,6 +12546,7 @@ func (c *ManagementProfileFilterLinksUpdateCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.profilefilterlink) if err != nil { @@ -12664,6 +12712,7 @@ func (c *ManagementProfileUserLinksDeleteCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/profiles/{profileId}/entityUserLinks/{linkId}") @@ -12789,6 +12838,7 @@ func (c *ManagementProfileUserLinksInsertCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityuserlink) if err != nil { @@ -12966,6 +13016,7 @@ func (c *ManagementProfileUserLinksListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13131,6 +13182,7 @@ func (c *ManagementProfileUserLinksUpdateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityuserlink) if err != nil { @@ -13290,6 +13342,7 @@ func (c *ManagementProfilesDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/profiles/{profileId}") @@ -13416,6 +13469,7 @@ func (c *ManagementProfilesGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13566,6 +13620,7 @@ func (c *ManagementProfilesInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.profile) if err != nil { @@ -13733,6 +13788,7 @@ func (c *ManagementProfilesListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13889,6 +13945,7 @@ func (c *ManagementProfilesPatchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.profile) if err != nil { @@ -14042,6 +14099,7 @@ func (c *ManagementProfilesUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.profile) if err != nil { @@ -14204,6 +14262,7 @@ func (c *ManagementRemarketingAudienceGetCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14351,6 +14410,7 @@ func (c *ManagementRemarketingAudienceInsertCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketingaudience) if err != nil { @@ -14524,6 +14584,7 @@ func (c *ManagementRemarketingAudienceListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14684,6 +14745,7 @@ func (c *ManagementRemarketingAudiencePatchCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketingaudience) if err != nil { @@ -14837,6 +14899,7 @@ func (c *ManagementRemarketingAudienceUpdateCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketingaudience) if err != nil { @@ -15008,6 +15071,7 @@ func (c *ManagementSegmentsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15143,6 +15207,7 @@ func (c *ManagementUnsampledReportsDeleteCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/profiles/{profileId}/unsampledReports/{unsampledReportId}") @@ -15279,6 +15344,7 @@ func (c *ManagementUnsampledReportsGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15437,6 +15503,7 @@ func (c *ManagementUnsampledReportsInsertCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.unsampledreport) if err != nil { @@ -15615,6 +15682,7 @@ func (c *ManagementUnsampledReportsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15778,6 +15846,7 @@ func (c *ManagementUploadsDeleteUploadDataCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyticsdataimportdeleteuploaddatarequest) if err != nil { @@ -15918,6 +15987,7 @@ func (c *ManagementUploadsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16104,6 +16174,7 @@ func (c *ManagementUploadsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16322,6 +16393,7 @@ func (c *ManagementUploadsUploadDataCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/customDataSources/{customDataSourceId}/uploads") @@ -16533,6 +16605,7 @@ func (c *ManagementWebPropertyAdWordsLinksDeleteCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/entityAdWordsLinks/{webPropertyAdWordsLinkId}") @@ -16660,6 +16733,7 @@ func (c *ManagementWebPropertyAdWordsLinksGetCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16807,6 +16881,7 @@ func (c *ManagementWebPropertyAdWordsLinksInsertCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityadwordslink) if err != nil { @@ -16974,6 +17049,7 @@ func (c *ManagementWebPropertyAdWordsLinksListCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17130,6 +17206,7 @@ func (c *ManagementWebPropertyAdWordsLinksPatchCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityadwordslink) if err != nil { @@ -17283,6 +17360,7 @@ func (c *ManagementWebPropertyAdWordsLinksUpdateCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityadwordslink) if err != nil { @@ -17443,6 +17521,7 @@ func (c *ManagementWebpropertiesGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17584,6 +17663,7 @@ func (c *ManagementWebpropertiesInsertCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.webproperty) if err != nil { @@ -17741,6 +17821,7 @@ func (c *ManagementWebpropertiesListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17887,6 +17968,7 @@ func (c *ManagementWebpropertiesPatchCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.webproperty) if err != nil { @@ -18030,6 +18112,7 @@ func (c *ManagementWebpropertiesUpdateCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.webproperty) if err != nil { @@ -18173,6 +18256,7 @@ func (c *ManagementWebpropertyUserLinksDeleteCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "management/accounts/{accountId}/webproperties/{webPropertyId}/entityUserLinks/{linkId}") @@ -18288,6 +18372,7 @@ func (c *ManagementWebpropertyUserLinksInsertCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityuserlink) if err != nil { @@ -18455,6 +18540,7 @@ func (c *ManagementWebpropertyUserLinksListCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18610,6 +18696,7 @@ func (c *ManagementWebpropertyUserLinksUpdateCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entityuserlink) if err != nil { @@ -18768,6 +18855,7 @@ func (c *MetadataColumnsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18897,6 +18985,7 @@ func (c *ProvisioningCreateAccountTicketCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountticket) if err != nil { diff --git a/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-api.json b/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-api.json index 3cfc26312..51f74608c 100644 --- a/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-api.json +++ b/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-api.json @@ -1,268 +1,345 @@ { "id": "analyticsreporting:v4", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/analytics.readonly": { - "description": "View your Google Analytics data" - }, - "https://www.googleapis.com/auth/analytics": { - "description": "View and manage your Google Analytics data" - } - } - } - }, - "description": "Accesses Analytics report data.", - "protocol": "rest", - "title": "Google Analytics Reporting API", - "resources": { - "reports": { - "methods": { - "batchGet": { - "id": "analyticsreporting.reports.batchGet", - "response": { - "$ref": "GetReportsResponse" - }, - "parameterOrder": [], - "description": "Returns the Analytics data.", - "request": { - "$ref": "GetReportsRequest" - }, - "flatPath": "v4/reports:batchGet", - "httpMethod": "POST", - "parameters": {}, - "path": "v4/reports:batchGet", - "scopes": [ - "https://www.googleapis.com/auth/analytics", - "https://www.googleapis.com/auth/analytics.readonly" - ] - } - } - } - }, + "documentationLink": "https://developers.google.com/analytics/devguides/reporting/core/v4/", + "revision": "20161129", + "discoveryVersion": "v1", + "version_module": "True", "schemas": { - "PivotHeader": { - "description": "The headers for each of the pivot sections defined in the request.", - "type": "object", - "properties": { - "totalPivotGroupsCount": { - "description": "The total number of groups for this pivot.", - "type": "integer", - "format": "int32" - }, - "pivotHeaderEntries": { - "description": "A single pivot section header.", - "type": "array", - "items": { - "$ref": "PivotHeaderEntry" - } - } - }, - "id": "PivotHeader" - }, - "Metric": { - "description": "[Metrics](https://support.google.com/analytics/answer/1033861)\nare the quantitative measurements. For example, the metric `ga:users`\nindicates the total number of users for the requested time period.", + "SegmentMetricFilter": { + "description": "Metric filter to be used in a segment filter clause.", "type": "object", "properties": { - "alias": { - "description": "An alias for the metric expression is an alternate name for the\nexpression. The alias can be used for filtering and sorting. This field\nis optional and is useful if the expression is not a single metric but\na complex expression which cannot be used in filtering and sorting.\nThe alias is also used in the response column header.", + "comparisonValue": { + "description": "The value to compare against. If the operator is `BETWEEN`, this value is\ntreated as minimum comparison value.", "type": "string" }, - "formattingType": { - "description": "Specifies how the metric expression should be formatted, for example\n`INTEGER`.", + "operator": { "enum": [ - "METRIC_TYPE_UNSPECIFIED", - "INTEGER", - "FLOAT", - "CURRENCY", - "PERCENT", - "TIME" + "UNSPECIFIED_OPERATOR", + "LESS_THAN", + "GREATER_THAN", + "EQUAL", + "BETWEEN" ], + "description": "Specifies is the operation to perform to compare the metric. The default\nis `EQUAL`.", + "type": "string", "enumDescriptions": [ - "Metric type is unspecified.", - "Integer metric.", - "Float metric.", - "Currency metric.", - "Percentage metric.", - "Time metric in `HH:MM:SS` format." + "Unspecified operator is treated as `LESS_THAN` operator.", + "Checks if the metric value is less than comparison value.", + "Checks if the metric value is greater than comparison value.", + "Equals operator.", + "For between operator, both the minimum and maximum are exclusive.\nWe will use `LT` and `GT` for comparison." + ] + }, + "metricName": { + "type": "string", + "description": "The metric that will be filtered on. A `metricFilter` must contain a\nmetric name." + }, + "scope": { + "enum": [ + "UNSPECIFIED_SCOPE", + "PRODUCT", + "HIT", + "SESSION", + "USER" ], - "type": "string" + "description": "Scope for a metric defines the level at which that metric is defined. The\nspecified metric scope must be equal to or greater than its primary scope\nas defined in the data model. The primary scope is defined by if the\nsegment is selecting users or sessions.", + "type": "string", + "enumDescriptions": [ + "If the scope is unspecified, it defaults to the condition scope,\n`USER` or `SESSION` depending on if the segment is trying to choose\nusers or sessions.", + "Product scope.", + "Hit scope.", + "Session scope.", + "User scope." + ] }, - "expression": { - "description": "A metric expression in the request. An expression is constructed from one\nor more metrics and numbers. Accepted operators include: Plus (+), Minus\n(-), Negation (Unary -), Divided by (/), Multiplied by (*), Parenthesis,\nPositive cardinal numbers (0-9), can include decimals and is limited to\n1024 characters. Example `ga:totalRefunds/ga:users`, in most cases the\nmetric expression is just a single metric name like `ga:users`.\nAdding mixed `MetricType` (E.g., `CURRENCY` + `PERCENTAGE`) metrics\nwill result in unexpected results.", - "type": "string" + "maxComparisonValue": { + "type": "string", + "description": "Max comparison value is only used for `BETWEEN` operator." } }, - "id": "Metric" + "id": "SegmentMetricFilter" }, - "ColumnHeader": { - "description": "Column headers.", + "DateRangeValues": { + "description": "Used to return a list of metrics for a single DateRange / dimension\ncombination", "type": "object", "properties": { - "dimensions": { - "description": "The dimension names in the response.", + "values": { + "description": "Each value corresponds to each Metric in the request.", "type": "array", "items": { "type": "string" } }, - "metricHeader": { - "description": "Metric headers for the metrics in the response.", - "$ref": "MetricHeader" + "pivotValueRegions": { + "description": "The values of each pivot region.", + "type": "array", + "items": { + "$ref": "PivotValueRegion" + } } }, - "id": "ColumnHeader" + "id": "DateRangeValues" }, - "DynamicSegment": { - "description": "Dynamic segment definition for defining the segment within the request.\nA segment can select users, sessions or both.", - "type": "object", + "CohortGroup": { "properties": { - "sessionSegment": { - "description": "Session Segment to select sessions to include in the segment.", - "$ref": "SegmentDefinition" - }, - "name": { - "description": "The name of the dynamic segment.", - "type": "string" + "lifetimeValue": { + "description": "Enable Life Time Value (LTV). LTV measures lifetime value for users\nacquired through different channels.\nPlease see:\n[Cohort Analysis](https://support.google.com/analytics/answer/6074676) and\n[Lifetime Value](https://support.google.com/analytics/answer/6182550)\nIf the value of lifetimeValue is false:\n\n- The metric values are similar to the values in the web interface cohort\n report.\n- The cohort definition date ranges must be aligned to the calendar week\n and month. i.e. while requesting `ga:cohortNthWeek` the `startDate` in\n the cohort definition should be a Sunday and the `endDate` should be the\n following Saturday, and for `ga:cohortNthMonth`, the `startDate`\n should be the 1st of the month and `endDate` should be the last day\n of the month.\n\nWhen the lifetimeValue is true:\n\n- The metric values will correspond to the values in the web interface\n LifeTime value report.\n- The Lifetime Value report shows you how user value (Revenue) and\n engagement (Appviews, Goal Completions, Sessions, and Session Duration)\n grow during the 90 days after a user is acquired.\n- The metrics are calculated as a cumulative average per user per the time\n increment.\n- The cohort definition date ranges need not be aligned to the calendar\n week and month boundaries.\n- The `viewId` must be an\n [app view ID](https://support.google.com/analytics/answer/2649553#WebVersusAppViews)", + "type": "boolean" }, - "userSegment": { - "description": "User Segment to select users to include in the segment.", - "$ref": "SegmentDefinition" + "cohorts": { + "description": "The definition for the cohort.", + "type": "array", + "items": { + "$ref": "Cohort" + } } }, - "id": "DynamicSegment" + "id": "CohortGroup", + "description": "Defines a cohort group.\nFor example:\n\n \"cohortGroup\": {\n \"cohorts\": [{\n \"name\": \"cohort 1\",\n \"type\": \"FIRST_VISIT_DATE\",\n \"dateRange\": { \"startDate\": \"2015-08-01\", \"endDate\": \"2015-08-01\" }\n },{\n \"name\": \"cohort 2\"\n \"type\": \"FIRST_VISIT_DATE\"\n \"dateRange\": { \"startDate\": \"2015-07-01\", \"endDate\": \"2015-07-01\" }\n }]\n }", + "type": "object" }, - "MetricHeader": { - "description": "The headers for the metrics.", + "GetReportsRequest": { + "properties": { + "reportRequests": { + "description": "Requests, each request will have a separate response.\nThere can be a maximum of 5 requests. All requests should have the same\n`dateRanges`, `viewId`, `segments`, `samplingLevel`, and `cohortGroup`.", + "type": "array", + "items": { + "$ref": "ReportRequest" + } + } + }, + "id": "GetReportsRequest", + "description": "The batch request containing multiple report request.", + "type": "object" + }, + "Pivot": { + "description": "The Pivot describes the pivot section in the request.\nThe Pivot helps rearrange the information in the table for certain reports\nby pivoting your data on a second dimension.", "type": "object", "properties": { - "metricHeaderEntries": { - "description": "Headers for the metrics in the response.", + "maxGroupCount": { + "description": "Specifies the maximum number of groups to return.\nThe default value is 10, also the maximum value is 1,000.", + "format": "int32", + "type": "integer" + }, + "startGroup": { + "description": "If k metrics were requested, then the response will contain some\ndata-dependent multiple of k columns in the report. E.g., if you pivoted\non the dimension `ga:browser` then you'd get k columns for \"Firefox\", k\ncolumns for \"IE\", k columns for \"Chrome\", etc. The ordering of the groups\nof columns is determined by descending order of \"total\" for the first of\nthe k values. Ties are broken by lexicographic ordering of the first\npivot dimension, then lexicographic ordering of the second pivot\ndimension, and so on. E.g., if the totals for the first value for\nFirefox, IE, and Chrome were 8, 2, 8, respectively, the order of columns\nwould be Chrome, Firefox, IE.\n\nThe following let you choose which of the groups of k columns are\nincluded in the response.", + "format": "int32", + "type": "integer" + }, + "metrics": { + "description": "The pivot metrics. Pivot metrics are part of the\nrestriction on total number of metrics allowed in the request.", "type": "array", "items": { - "$ref": "MetricHeaderEntry" + "$ref": "Metric" } }, - "pivotHeaders": { - "description": "Headers for the pivots in the response.", + "dimensions": { "type": "array", "items": { - "$ref": "PivotHeader" + "$ref": "Dimension" + }, + "description": "A list of dimensions to show as pivot columns. A Pivot can have a maximum\nof 4 dimensions. Pivot dimensions are part of the restriction on the\ntotal number of dimensions allowed in the request." + }, + "dimensionFilterClauses": { + "description": "DimensionFilterClauses are logically combined with an `AND` operator: only\ndata that is included by all these DimensionFilterClauses contributes to\nthe values in this pivot region. Dimension filters can be used to restrict\nthe columns shown in the pivot region. For example if you have\n`ga:browser` as the requested dimension in the pivot region, and you\nspecify key filters to restrict `ga:browser` to only \"IE\" or \"Firefox\",\nthen only those two browsers would show up as columns.", + "type": "array", + "items": { + "$ref": "DimensionFilterClause" } } }, - "id": "MetricHeader" + "id": "Pivot" }, - "Report": { - "description": "The data response corresponding to the request.", + "PivotHeaderEntry": { + "description": "The headers for the each of the metric column corresponding to the metrics\nrequested in the pivots section of the response.", "type": "object", "properties": { - "columnHeader": { - "description": "The column headers.", - "$ref": "ColumnHeader" + "dimensionValues": { + "description": "The values for the dimensions in the pivot.", + "type": "array", + "items": { + "type": "string" + } }, - "data": { - "description": "Response data.", - "$ref": "ReportData" + "dimensionNames": { + "description": "The name of the dimensions in the pivot response.", + "type": "array", + "items": { + "type": "string" + } }, - "nextPageToken": { - "description": "Page token to retrieve the next page of results in the list.", - "type": "string" + "metric": { + "$ref": "MetricHeaderEntry", + "description": "The metric header for the metric in the pivot." } }, - "id": "Report" + "id": "PivotHeaderEntry" }, - "SegmentFilterClause": { - "description": "Filter Clause to be used in a segment definition, can be wither a metric or\na dimension filter.", + "SegmentFilter": { + "description": "SegmentFilter defines the segment to be either a simple or a sequence\nsegment. A simple segment condition contains dimension and metric conditions\nto select the sessions or users. A sequence segment condition can be used to\nselect users or sessions based on sequential conditions.", "type": "object", "properties": { - "dimensionFilter": { - "description": "Dimension Filter for the segment definition.", - "$ref": "SegmentDimensionFilter" - }, - "metricFilter": { - "description": "Metric Filter for the segment definition.", - "$ref": "SegmentMetricFilter" + "sequenceSegment": { + "description": "Sequence conditions consist of one or more steps, where each step is\ndefined by one or more dimension/metric conditions. Multiple steps can\nbe combined with special sequence operators.", + "$ref": "SequenceSegment" }, "not": { - "description": "Matches the complement (`!`) of the filter.", + "description": "If true, match the complement of simple or sequence segment.\nFor example, to match all visits not from \"New York\", we can define the\nsegment as follows:\n\n \"sessionSegment\": {\n \"segmentFilters\": [{\n \"simpleSegment\" :{\n \"orFiltersForSegment\": [{\n \"segmentFilterClauses\":[{\n \"dimensionFilter\": {\n \"dimensionName\": \"ga:city\",\n \"expressions\": [\"New York\"]\n }\n }]\n }]\n },\n \"not\": \"True\"\n }]\n },", "type": "boolean" + }, + "simpleSegment": { + "$ref": "SimpleSegment", + "description": "A Simple segment conditions consist of one or more dimension/metric\nconditions that can be combined" } }, - "id": "SegmentFilterClause" + "id": "SegmentFilter" }, - "DimensionFilter": { - "description": "Dimension filter specifies the filtering options on a dimension.", - "type": "object", + "SegmentDefinition": { "properties": { - "dimensionName": { - "description": "The dimension to filter on. A DimensionFilter must contain a dimension.", - "type": "string" - }, - "operator": { - "description": "How to match the dimension to the expression. The default is REGEXP.", - "enum": [ - "OPERATOR_UNSPECIFIED", - "REGEXP", - "BEGINS_WITH", - "ENDS_WITH", - "PARTIAL", - "EXACT", - "NUMERIC_EQUAL", - "NUMERIC_GREATER_THAN", - "NUMERIC_LESS_THAN", - "IN_LIST" - ], + "segmentFilters": { + "description": "A segment is defined by a set of segment filters which are combined\ntogether with a logical `AND` operation.", + "type": "array", + "items": { + "$ref": "SegmentFilter" + } + } + }, + "id": "SegmentDefinition", + "description": "SegmentDefinition defines the segment to be a set of SegmentFilters which\nare combined together with a logical `AND` operation.", + "type": "object" + }, + "MetricHeaderEntry": { + "description": "Header for the metrics.", + "type": "object", + "properties": { + "type": { "enumDescriptions": [ - "If the match type is unspecified, it is treated as a `REGEXP`.", - "The match expression is treated as a regular expression. All match types\nare not treated as regular expressions.", - "Matches the value which begin with the match expression provided.", - "Matches the values which end with the match expression provided.", - "Substring match.", - "The value should match the match expression entirely.", - "Integer comparison filters.\ncase sensitivity is ignored for these and the expression\nis assumed to be a string representing an integer.\nFailure conditions:\n\n- If expression is not a valid int64, the client should expect\n an error.\n- Input dimensions that are not valid int64 values will never match the\n filter.", - "Checks if the dimension is numerically greater than the match\nexpression. Read the description for `NUMERIC_EQUALS` for restrictions.", - "Checks if the dimension is numerically less than the match expression.\nRead the description for `NUMERIC_EQUALS` for restrictions.", - "This option is used to specify a dimension filter whose expression can\ntake any value from a selected list of values. This helps avoiding\nevaluating multiple exact match dimension filters which are OR'ed for\nevery single response row. For example:\n\n expressions: [\"A\", \"B\", \"C\"]\n\nAny response row whose dimension has it is value as A, B or C, matches\nthis DimensionFilter." + "Metric type is unspecified.", + "Integer metric.", + "Float metric.", + "Currency metric.", + "Percentage metric.", + "Time metric in `HH:MM:SS` format." + ], + "enum": [ + "METRIC_TYPE_UNSPECIFIED", + "INTEGER", + "FLOAT", + "CURRENCY", + "PERCENT", + "TIME" ], + "description": "The type of the metric, for example `INTEGER`.", "type": "string" }, - "caseSensitive": { - "description": "Should the match be case sensitive? Default is false.", - "type": "boolean" + "name": { + "type": "string", + "description": "The name of the header." + } + }, + "id": "MetricHeaderEntry" + }, + "ReportData": { + "description": "The data part of the report.", + "type": "object", + "properties": { + "minimums": { + "type": "array", + "items": { + "$ref": "DateRangeValues" + }, + "description": "Minimum and maximum values seen over all matching rows. These are both\nempty when `hideValueRanges` in the request is false, or when\nrowCount is zero." }, - "expressions": { - "description": "Strings or regular expression to match against. Only the first value of\nthe list is used for comparison unless the operator is `IN_LIST`.\nIf `IN_LIST` operator, then the entire list is used to filter the\ndimensions as explained in the description of the `IN_LIST` operator.", + "samplingSpaceSizes": { + "description": "If the results are\n[sampled](https://support.google.com/analytics/answer/2637192),\nthis returns the total number of\nsamples present, one entry per date range. If the results are not sampled\nthis field will not be defined. See\n[developer guide](/analytics/devguides/reporting/core/v4/basics#sampling)\nfor details.", + "type": "array", + "items": { + "type": "string", + "format": "int64" + } + }, + "totals": { + "description": "For each requested date range, for the set of all rows that match\nthe query, every requested value format gets a total. The total\nfor a value format is computed by first totaling the metrics\nmentioned in the value format and then evaluating the value\nformat as a scalar expression. E.g., The \"totals\" for\n`3 / (ga:sessions + 2)` we compute\n`3 / ((sum of all relevant ga:sessions) + 2)`.\nTotals are computed before pagination.", + "type": "array", + "items": { + "$ref": "DateRangeValues" + } + }, + "samplesReadCounts": { + "description": "If the results are\n[sampled](https://support.google.com/analytics/answer/2637192),\nthis returns the total number of samples read, one entry per date range.\nIf the results are not sampled this field will not be defined. See\n[developer guide](/analytics/devguides/reporting/core/v4/basics#sampling)\nfor details.", "type": "array", "items": { + "format": "int64", "type": "string" } }, - "not": { - "description": "Logical `NOT` operator. If this boolean is set to true, then the matching\ndimension values will be excluded in the report. The default is false.", + "isDataGolden": { + "description": "Indicates if response to this request is golden or not. Data is\ngolden when the exact same request will not produce any new results if\nasked at a later point in time.", "type": "boolean" + }, + "rows": { + "type": "array", + "items": { + "$ref": "ReportRow" + }, + "description": "There's one ReportRow for every unique combination of dimensions." + }, + "rowCount": { + "description": "Total number of matching rows for this query.", + "format": "int32", + "type": "integer" + }, + "dataLastRefreshed": { + "type": "string", + "description": "The last time the data in the report was refreshed. All the hits received\nbefore this timestamp are included in the calculation of the report.", + "format": "google-datetime" + }, + "maximums": { + "description": "Minimum and maximum values seen over all matching rows. These are both\nempty when `hideValueRanges` in the request is false, or when\nrowCount is zero.", + "type": "array", + "items": { + "$ref": "DateRangeValues" + } } }, - "id": "DimensionFilter" + "id": "ReportData" }, - "SegmentDimensionFilter": { - "description": "Dimension filter specifies the filtering options on a dimension.", + "DimensionFilter": { "type": "object", "properties": { - "maxComparisonValue": { - "description": "Maximum comparison values for `BETWEEN` match type.", - "type": "string" + "not": { + "description": "Logical `NOT` operator. If this boolean is set to true, then the matching\ndimension values will be excluded in the report. The default is false.", + "type": "boolean" }, - "dimensionName": { - "description": "Name of the dimension for which the filter is being applied.", - "type": "string" + "expressions": { + "description": "Strings or regular expression to match against. Only the first value of\nthe list is used for comparison unless the operator is `IN_LIST`.\nIf `IN_LIST` operator, then the entire list is used to filter the\ndimensions as explained in the description of the `IN_LIST` operator.", + "type": "array", + "items": { + "type": "string" + } }, "caseSensitive": { - "description": "Should the match be case sensitive, ignored for `IN_LIST` operator.", + "description": "Should the match be case sensitive? Default is false.", "type": "boolean" }, + "dimensionName": { + "description": "The dimension to filter on. A DimensionFilter must contain a dimension.", + "type": "string" + }, "operator": { - "description": "The operator to use to match the dimension with the expressions.", + "enumDescriptions": [ + "If the match type is unspecified, it is treated as a `REGEXP`.", + "The match expression is treated as a regular expression. All match types\nare not treated as regular expressions.", + "Matches the value which begin with the match expression provided.", + "Matches the values which end with the match expression provided.", + "Substring match.", + "The value should match the match expression entirely.", + "Integer comparison filters.\ncase sensitivity is ignored for these and the expression\nis assumed to be a string representing an integer.\nFailure conditions:\n\n- If expression is not a valid int64, the client should expect\n an error.\n- Input dimensions that are not valid int64 values will never match the\n filter.", + "Checks if the dimension is numerically greater than the match\nexpression. Read the description for `NUMERIC_EQUALS` for restrictions.", + "Checks if the dimension is numerically less than the match expression.\nRead the description for `NUMERIC_EQUALS` for restrictions.", + "This option is used to specify a dimension filter whose expression can\ntake any value from a selected list of values. This helps avoiding\nevaluating multiple exact match dimension filters which are OR'ed for\nevery single response row. For example:\n\n expressions: [\"A\", \"B\", \"C\"]\n\nAny response row whose dimension has it is value as A, B or C, matches\nthis DimensionFilter." + ], "enum": [ "OPERATOR_UNSPECIFIED", "REGEXP", @@ -270,11 +347,38 @@ "ENDS_WITH", "PARTIAL", "EXACT", - "IN_LIST", - "NUMERIC_LESS_THAN", + "NUMERIC_EQUAL", "NUMERIC_GREATER_THAN", - "NUMERIC_BETWEEN" + "NUMERIC_LESS_THAN", + "IN_LIST" ], + "description": "How to match the dimension to the expression. The default is REGEXP.", + "type": "string" + } + }, + "id": "DimensionFilter", + "description": "Dimension filter specifies the filtering options on a dimension." + }, + "SegmentDimensionFilter": { + "type": "object", + "properties": { + "caseSensitive": { + "description": "Should the match be case sensitive, ignored for `IN_LIST` operator.", + "type": "boolean" + }, + "minComparisonValue": { + "description": "Minimum comparison values for `BETWEEN` match type.", + "type": "string" + }, + "maxComparisonValue": { + "description": "Maximum comparison values for `BETWEEN` match type.", + "type": "string" + }, + "dimensionName": { + "description": "Name of the dimension for which the filter is being applied.", + "type": "string" + }, + "operator": { "enumDescriptions": [ "If the match type is unspecified, it is treated as a REGEXP.", "The match expression is treated as a regular expression. All other match\ntypes are not treated as regular expressions.", @@ -287,6 +391,19 @@ "Checks if the dimension is numerically greater than the match\nexpression.", "Checks if the dimension is numerically between the minimum and maximum\nof the match expression, boundaries excluded." ], + "enum": [ + "OPERATOR_UNSPECIFIED", + "REGEXP", + "BEGINS_WITH", + "ENDS_WITH", + "PARTIAL", + "EXACT", + "IN_LIST", + "NUMERIC_LESS_THAN", + "NUMERIC_GREATER_THAN", + "NUMERIC_BETWEEN" + ], + "description": "The operator to use to match the dimension with the expressions.", "type": "string" }, "expressions": { @@ -295,612 +412,495 @@ "items": { "type": "string" } - }, - "minComparisonValue": { - "description": "Minimum comparison values for `BETWEEN` match type.", - "type": "string" } }, - "id": "SegmentDimensionFilter" + "id": "SegmentDimensionFilter", + "description": "Dimension filter specifies the filtering options on a dimension." }, - "ReportRequest": { - "description": "The main request class which specifies the Reporting API request.", + "OrderBy": { + "description": "Specifies the sorting options.", "type": "object", "properties": { - "cohortGroup": { - "description": "Cohort group associated with this request. If there is a cohort group\nin the request the `ga:cohort` dimension must be present.\nEvery [ReportRequest](#ReportRequest) within a `batchGet` method must\ncontain the same `cohortGroup` definition.", - "$ref": "CohortGroup" - }, - "dimensions": { - "description": "The dimensions requested.\nRequests can have a total of 7 dimensions.", - "type": "array", - "items": { - "$ref": "Dimension" - } - }, - "metricFilterClauses": { - "description": "The metric filter clauses. They are logically combined with the `AND`\noperator. Metric filters look at only the first date range and not the\ncomparing date range. Note that filtering on metrics occurs after the\nmetrics are aggregated.", - "type": "array", - "items": { - "$ref": "MetricFilterClause" - } - }, - "hideTotals": { - "description": "If set to true, hides the total of all metrics for all the matching rows,\nfor every date range. The default false and will return the totals.", - "type": "boolean" + "fieldName": { + "description": "The field which to sort by. The default sort order is ascending. Example:\n`ga:browser`.\nNote, that you can only specify one field for sort here. For example,\n`ga:browser, ga:city` is not valid.", + "type": "string" }, - "includeEmptyRows": { - "description": "If set to false, the response does not include rows if all the retrieved\nmetrics are equal to zero. The default is false which will exclude these\nrows.", - "type": "boolean" + "orderType": { + "enum": [ + "ORDER_TYPE_UNSPECIFIED", + "VALUE", + "DELTA", + "SMART", + "HISTOGRAM_BUCKET", + "DIMENSION_AS_INTEGER" + ], + "description": "The order type. The default orderType is `VALUE`.", + "type": "string", + "enumDescriptions": [ + "Unspecified order type will be treated as sort based on value.", + "The sort order is based on the value of the chosen column; looks only at\nthe first date range.", + "The sort order is based on the difference of the values of the chosen\ncolumn between the first two date ranges. Usable only if there are\nexactly two date ranges.", + "The sort order is based on weighted value of the chosen column. If\ncolumn has n/d format, then weighted value of this ratio will\nbe `(n + totals.n)/(d + totals.d)` Usable only for metrics that\nrepresent ratios.", + "Histogram order type is applicable only to dimension columns with\nnon-empty histogram-buckets.", + "If the dimensions are fixed length numbers, ordinary sort would just\nwork fine. `DIMENSION_AS_INTEGER` can be used if the dimensions are\nvariable length numbers." + ] }, - "dimensionFilterClauses": { - "description": "The dimension filter clauses for filtering Dimension Values. They are\nlogically combined with the `AND` operator. Note that filtering occurs\nbefore any dimensions are aggregated, so that the returned metrics\nrepresent the total for only the relevant dimensions.", - "type": "array", - "items": { - "$ref": "DimensionFilterClause" - } - }, - "pivots": { - "description": "The pivot definitions. Requests can have a maximum of 2 pivots.", - "type": "array", - "items": { - "$ref": "Pivot" - } - }, - "dateRanges": { - "description": "Date ranges in the request. The request can have a maximum of 2 date\nranges. The response will contain a set of metric values for each\ncombination of the dimensions for each date range in the request. So, if\nthere are two date ranges, there will be two set of metric values, one for\nthe original date range and one for the second date range.\nThe `reportRequest.dateRanges` field should not be specified for cohorts\nor Lifetime value requests.\nIf a date range is not provided, the default date range is (startDate:\ncurrent date - 7 days, endDate: current date - 1 day). Every\n[ReportRequest](#ReportRequest) within a `batchGet` method must\ncontain the same `dateRanges` definition.", - "type": "array", - "items": { - "$ref": "DateRange" - } + "sortOrder": { + "description": "The sorting order for the field.", + "type": "string", + "enumDescriptions": [ + "If the sort order is unspecified, the default is ascending.", + "Ascending sort. The field will be sorted in an ascending manner.", + "Descending sort. The field will be sorted in a descending manner." + ], + "enum": [ + "SORT_ORDER_UNSPECIFIED", + "ASCENDING", + "DESCENDING" + ] + } + }, + "id": "OrderBy" + }, + "Segment": { + "description": "The segment definition, if the report needs to be segmented.\nA Segment is a subset of the Analytics data. For example, of the entire\nset of users, one Segment might be users from a particular country or city.", + "type": "object", + "properties": { + "dynamicSegment": { + "description": "A dynamic segment definition in the request.", + "$ref": "DynamicSegment" }, - "segments": { - "description": "Segment the data returned for the request. A segment definition helps look\nat a subset of the segment request. A request can contain up to four\nsegments. Every [ReportRequest](#ReportRequest) within a\n`batchGet` method must contain the same `segments` definition. Requests\nwith segments must have the `ga:segment` dimension.", + "segmentId": { + "description": "The segment ID of a built-in or custom segment, for example `gaid::-3`.", + "type": "string" + } + }, + "id": "Segment" + }, + "SegmentSequenceStep": { + "description": "A segment sequence definition.", + "type": "object", + "properties": { + "orFiltersForSegment": { + "description": "A sequence is specified with a list of Or grouped filters which are\ncombined with `AND` operator.", "type": "array", "items": { - "$ref": "Segment" + "$ref": "OrFiltersForSegment" } }, - "samplingLevel": { - "description": "The desired report\n[sample](https://support.google.com/analytics/answer/2637192) size.\nIf the the `samplingLevel` field is unspecified the `DEFAULT` sampling\nlevel is used. Every [ReportRequest](#ReportRequest) within a\n`batchGet` method must contain the same `samplingLevel` definition. See\n[developer guide](/analytics/devguides/reporting/core/v4/basics#sampling)\n for details.", - "enum": [ - "SAMPLING_UNSPECIFIED", - "DEFAULT", - "SMALL", - "LARGE" - ], + "matchType": { + "description": "Specifies if the step immediately precedes or can be any time before the\nnext step.", + "type": "string", "enumDescriptions": [ - "If the `samplingLevel` field is unspecified the `DEFAULT` sampling level\nis used.", - "Returns response with a sample size that balances speed and\naccuracy.", - "It returns a fast response with a smaller sampling size.", - "Returns a more accurate response using a large sampling size. But this\nmay result in response being slower." + "Unspecified match type is treated as precedes.", + "Operator indicates that the previous step precedes the next step.", + "Operator indicates that the previous step immediately precedes the next\nstep." ], + "enum": [ + "UNSPECIFIED_MATCH_TYPE", + "PRECEDES", + "IMMEDIATELY_PRECEDES" + ] + } + }, + "id": "SegmentSequenceStep" + }, + "Metric": { + "type": "object", + "properties": { + "expression": { + "description": "A metric expression in the request. An expression is constructed from one\nor more metrics and numbers. Accepted operators include: Plus (+), Minus\n(-), Negation (Unary -), Divided by (/), Multiplied by (*), Parenthesis,\nPositive cardinal numbers (0-9), can include decimals and is limited to\n1024 characters. Example `ga:totalRefunds/ga:users`, in most cases the\nmetric expression is just a single metric name like `ga:users`.\nAdding mixed `MetricType` (E.g., `CURRENCY` + `PERCENTAGE`) metrics\nwill result in unexpected results.", "type": "string" }, - "metrics": { - "description": "The metrics requested.\nRequests must specify at least one metric. Requests can have a\ntotal of 10 metrics.", - "type": "array", - "items": { - "$ref": "Metric" - } - }, - "pageSize": { - "description": "Page size is for paging and specifies the maximum number of returned rows.\nPage size should be \u003e= 0. A query returns the default of 1,000 rows.\nThe Analytics Core Reporting API returns a maximum of 10,000 rows per\nrequest, no matter how many you ask for. It can also return fewer rows\nthan requested, if there aren't as many dimension segments as you expect.\nFor instance, there are fewer than 300 possible values for `ga:country`,\nso when segmenting only by country, you can't get more than 300 rows,\neven if you set `pageSize` to a higher value.", - "type": "integer", - "format": "int32" + "formattingType": { + "description": "Specifies how the metric expression should be formatted, for example\n`INTEGER`.", + "type": "string", + "enumDescriptions": [ + "Metric type is unspecified.", + "Integer metric.", + "Float metric.", + "Currency metric.", + "Percentage metric.", + "Time metric in `HH:MM:SS` format." + ], + "enum": [ + "METRIC_TYPE_UNSPECIFIED", + "INTEGER", + "FLOAT", + "CURRENCY", + "PERCENT", + "TIME" + ] }, - "orderBys": { - "description": "Sort order on output rows. To compare two rows, the elements of the\nfollowing are applied in order until a difference is found. All date\nranges in the output get the same row order.", + "alias": { + "description": "An alias for the metric expression is an alternate name for the\nexpression. The alias can be used for filtering and sorting. This field\nis optional and is useful if the expression is not a single metric but\na complex expression which cannot be used in filtering and sorting.\nThe alias is also used in the response column header.", + "type": "string" + } + }, + "id": "Metric", + "description": "[Metrics](https://support.google.com/analytics/answer/1033861)\nare the quantitative measurements. For example, the metric `ga:users`\nindicates the total number of users for the requested time period." + }, + "PivotValueRegion": { + "id": "PivotValueRegion", + "description": "The metric values in the pivot region.", + "type": "object", + "properties": { + "values": { + "description": "The values of the metrics in each of the pivot regions.", "type": "array", "items": { - "$ref": "OrderBy" + "type": "string" } + } + } + }, + "Report": { + "description": "The data response corresponding to the request.", + "type": "object", + "properties": { + "data": { + "description": "Response data.", + "$ref": "ReportData" }, - "filtersExpression": { - "description": "Dimension or metric filters that restrict the data returned for your\nrequest. To use the `filtersExpression`, supply a dimension or metric on\nwhich to filter, followed by the filter expression. For example, the\nfollowing expression selects `ga:browser` dimension which starts with\nFirefox; `ga:browser=~^Firefox`. For more information on dimensions\nand metric filters, see\n[Filters reference](https://developers.google.com/analytics/devguides/reporting/core/v3/reference#filters).", - "type": "string" - }, - "hideValueRanges": { - "description": "If set to true, hides the minimum and maximum across all matching rows.\nThe default is false and the value ranges are returned.", - "type": "boolean" - }, - "viewId": { - "description": "The Analytics\n[view ID](https://support.google.com/analytics/answer/1009618)\nfrom which to retrieve data. Every [ReportRequest](#ReportRequest)\nwithin a `batchGet` method must contain the same `viewId`.", + "nextPageToken": { + "description": "Page token to retrieve the next page of results in the list.", "type": "string" }, - "pageToken": { - "description": "A continuation token to get the next page of the results. Adding this to\nthe request will return the rows after the pageToken. The pageToken should\nbe the value returned in the nextPageToken parameter in the response to\nthe GetReports request.", - "type": "string" + "columnHeader": { + "description": "The column headers.", + "$ref": "ColumnHeader" } }, - "id": "ReportRequest" + "id": "Report" }, - "SimpleSegment": { - "description": "A Simple segment conditions consist of one or more dimension/metric\nconditions that can be combined.", + "PivotHeader": { + "description": "The headers for each of the pivot sections defined in the request.", "type": "object", "properties": { - "orFiltersForSegment": { - "description": "A list of segment filters groups which are combined with logical `AND`\noperator.", + "totalPivotGroupsCount": { + "description": "The total number of groups for this pivot.", + "format": "int32", + "type": "integer" + }, + "pivotHeaderEntries": { + "description": "A single pivot section header.", "type": "array", "items": { - "$ref": "OrFiltersForSegment" + "$ref": "PivotHeaderEntry" } } }, - "id": "SimpleSegment" + "id": "PivotHeader" }, - "SegmentDefinition": { - "description": "SegmentDefinition defines the segment to be a set of SegmentFilters which\nare combined together with a logical `AND` operation.", - "type": "object", + "DateRange": { "properties": { - "segmentFilters": { - "description": "A segment is defined by a set of segment filters which are combined\ntogether with a logical `AND` operation.", - "type": "array", - "items": { - "$ref": "SegmentFilter" - } + "endDate": { + "description": "The end date for the query in the format `YYYY-MM-DD`.", + "type": "string" + }, + "startDate": { + "description": "The start date for the query in the format `YYYY-MM-DD`.", + "type": "string" } }, - "id": "SegmentDefinition" + "id": "DateRange", + "description": "A contiguous set of days: startDate, startDate + 1 day, ..., endDate.\nThe start and end dates are specified in\n[ISO8601](https://en.wikipedia.org/wiki/ISO_8601) date format `YYYY-MM-DD`.", + "type": "object" }, - "SegmentMetricFilter": { - "description": "Metric filter to be used in a segment filter clause.", + "MetricFilter": { + "id": "MetricFilter", + "description": "MetricFilter specifies the filter on a metric.", "type": "object", "properties": { + "not": { + "type": "boolean", + "description": "Logical `NOT` operator. If this boolean is set to true, then the matching\nmetric values will be excluded in the report. The default is false." + }, "metricName": { - "description": "The metric that will be filtered on. A `metricFilter` must contain a\nmetric name.", + "description": "The metric that will be filtered on. A metricFilter must contain a metric\nname. A metric name can be an alias earlier defined as a metric or it can\nalso be a metric expression.", "type": "string" }, + "comparisonValue": { + "type": "string", + "description": "The value to compare against." + }, "operator": { - "description": "Specifies is the operation to perform to compare the metric. The default\nis `EQUAL`.", - "enum": [ - "UNSPECIFIED_OPERATOR", - "LESS_THAN", - "GREATER_THAN", - "EQUAL", - "BETWEEN" - ], "enumDescriptions": [ - "Unspecified operator is treated as `LESS_THAN` operator.", - "Checks if the metric value is less than comparison value.", - "Checks if the metric value is greater than comparison value.", - "Equals operator.", - "For between operator, both the minimum and maximum are exclusive.\nWe will use `LT` and `GT` for comparison." + "If the operator is not specified, it is treated as `EQUAL`.", + "Should the value of the metric be exactly equal to the comparison value.", + "Should the value of the metric be less than to the comparison value.", + "Should the value of the metric be greater than to the comparison value.", + "Validates if the metric is missing.\nDoesn't take comparisonValue into account." ], - "type": "string" - }, - "comparisonValue": { - "description": "The value to compare against. If the operator is `BETWEEN`, this value is\ntreated as minimum comparison value.", - "type": "string" - }, - "scope": { - "description": "Scope for a metric defines the level at which that metric is defined. The\nspecified metric scope must be equal to or greater than its primary scope\nas defined in the data model. The primary scope is defined by if the\nsegment is selecting users or sessions.", "enum": [ - "UNSPECIFIED_SCOPE", - "PRODUCT", - "HIT", - "SESSION", - "USER" - ], - "enumDescriptions": [ - "If the scope is unspecified, it defaults to the condition scope,\n`USER` or `SESSION` depending on if the segment is trying to choose\nusers or sessions.", - "Product scope.", - "Hit scope.", - "Session scope.", - "User scope." + "OPERATOR_UNSPECIFIED", + "EQUAL", + "LESS_THAN", + "GREATER_THAN", + "IS_MISSING" ], - "type": "string" - }, - "maxComparisonValue": { - "description": "Max comparison value is only used for `BETWEEN` operator.", + "description": "Is the metric `EQUAL`, `LESS_THAN` or `GREATER_THAN` the\ncomparisonValue, the default is `EQUAL`. If the operator is\n`IS_MISSING`, checks if the metric is missing and would ignore the\ncomparisonValue.", "type": "string" } - }, - "id": "SegmentMetricFilter" + } }, - "ReportData": { - "description": "The data part of the report.", + "ReportRequest": { + "description": "The main request class which specifies the Reporting API request.", "type": "object", "properties": { - "rowCount": { - "description": "Total number of matching rows for this query.", - "type": "integer", - "format": "int32" - }, - "samplingSpaceSizes": { - "description": "If the results are\n[sampled](https://support.google.com/analytics/answer/2637192),\nthis returns the total number of\nsamples present, one entry per date range. If the results are not sampled\nthis field will not be defined. See\n[developer guide](/analytics/devguides/reporting/core/v4/basics#sampling)\nfor details.", - "type": "array", - "items": { - "type": "string", - "format": "int64" - } - }, - "maximums": { - "description": "Minimum and maximum values seen over all matching rows. These are both\nempty when `hideValueRanges` in the request is false, or when\nrowCount is zero.", + "dimensions": { + "description": "The dimensions requested.\nRequests can have a total of 7 dimensions.", "type": "array", "items": { - "$ref": "DateRangeValues" + "$ref": "Dimension" } }, - "samplesReadCounts": { - "description": "If the results are\n[sampled](https://support.google.com/analytics/answer/2637192),\nthis returns the total number of samples read, one entry per date range.\nIf the results are not sampled this field will not be defined. See\n[developer guide](/analytics/devguides/reporting/core/v4/basics#sampling)\nfor details.", + "dateRanges": { + "description": "Date ranges in the request. The request can have a maximum of 2 date\nranges. The response will contain a set of metric values for each\ncombination of the dimensions for each date range in the request. So, if\nthere are two date ranges, there will be two set of metric values, one for\nthe original date range and one for the second date range.\nThe `reportRequest.dateRanges` field should not be specified for cohorts\nor Lifetime value requests.\nIf a date range is not provided, the default date range is (startDate:\ncurrent date - 7 days, endDate: current date - 1 day). Every\n[ReportRequest](#ReportRequest) within a `batchGet` method must\ncontain the same `dateRanges` definition.", "type": "array", "items": { - "type": "string", - "format": "int64" + "$ref": "DateRange" } }, - "minimums": { - "description": "Minimum and maximum values seen over all matching rows. These are both\nempty when `hideValueRanges` in the request is false, or when\nrowCount is zero.", - "type": "array", - "items": { - "$ref": "DateRangeValues" - } + "pageToken": { + "description": "A continuation token to get the next page of the results. Adding this to\nthe request will return the rows after the pageToken. The pageToken should\nbe the value returned in the nextPageToken parameter in the response to\nthe GetReports request.", + "type": "string" }, - "rows": { - "description": "There's one ReportRow for every unique combination of dimensions.", + "pivots": { + "description": "The pivot definitions. Requests can have a maximum of 2 pivots.", "type": "array", "items": { - "$ref": "ReportRow" + "$ref": "Pivot" } }, - "dataLastRefreshed": { - "description": "The last time the data in the report was refreshed. All the hits received\nbefore this timestamp are included in the calculation of the report.", - "type": "string", - "format": "google-datetime" + "includeEmptyRows": { + "description": "If set to false, the response does not include rows if all the retrieved\nmetrics are equal to zero. The default is false which will exclude these\nrows.", + "type": "boolean" }, - "totals": { - "description": "For each requested date range, for the set of all rows that match\nthe query, every requested value format gets a total. The total\nfor a value format is computed by first totaling the metrics\nmentioned in the value format and then evaluating the value\nformat as a scalar expression. E.g., The \"totals\" for\n`3 / (ga:sessions + 2)` we compute\n`3 / ((sum of all relevant ga:sessions) + 2)`.\nTotals are computed before pagination.", + "metricFilterClauses": { + "description": "The metric filter clauses. They are logically combined with the `AND`\noperator. Metric filters look at only the first date range and not the\ncomparing date range. Note that filtering on metrics occurs after the\nmetrics are aggregated.", "type": "array", "items": { - "$ref": "DateRangeValues" + "$ref": "MetricFilterClause" } }, - "isDataGolden": { - "description": "Indicates if response to this request is golden or not. Data is\ngolden when the exact same request will not produce any new results if\nasked at a later point in time.", + "pageSize": { + "description": "Page size is for paging and specifies the maximum number of returned rows.\nPage size should be \u003e= 0. A query returns the default of 1,000 rows.\nThe Analytics Core Reporting API returns a maximum of 10,000 rows per\nrequest, no matter how many you ask for. It can also return fewer rows\nthan requested, if there aren't as many dimension segments as you expect.\nFor instance, there are fewer than 300 possible values for `ga:country`,\nso when segmenting only by country, you can't get more than 300 rows,\neven if you set `pageSize` to a higher value.", + "format": "int32", + "type": "integer" + }, + "hideTotals": { + "description": "If set to true, hides the total of all metrics for all the matching rows,\nfor every date range. The default false and will return the totals.", "type": "boolean" - } - }, - "id": "ReportData" - }, - "GetReportsRequest": { - "description": "The batch request containing multiple report request.", - "type": "object", - "properties": { - "reportRequests": { - "description": "Requests, each request will have a separate response.\nThere can be a maximum of 5 requests. All requests should have the same\n`dateRanges`, `viewId`, `segments`, `samplingLevel`, and `cohortGroup`.", - "type": "array", - "items": { - "$ref": "ReportRequest" - } - } - }, - "id": "GetReportsRequest" - }, - "OrderBy": { - "description": "Specifies the sorting options.", - "type": "object", - "properties": { - "sortOrder": { - "description": "The sorting order for the field.", - "enum": [ - "SORT_ORDER_UNSPECIFIED", - "ASCENDING", - "DESCENDING" - ], - "enumDescriptions": [ - "If the sort order is unspecified, the default is ascending.", - "Ascending sort. The field will be sorted in an ascending manner.", - "Descending sort. The field will be sorted in a descending manner." - ], - "type": "string" }, - "orderType": { - "description": "The order type. The default orderType is `VALUE`.", - "enum": [ - "ORDER_TYPE_UNSPECIFIED", - "VALUE", - "DELTA", - "SMART", - "HISTOGRAM_BUCKET", - "DIMENSION_AS_INTEGER" - ], - "enumDescriptions": [ - "Unspecified order type will be treated as sort based on value.", - "The sort order is based on the value of the chosen column; looks only at\nthe first date range.", - "The sort order is based on the difference of the values of the chosen\ncolumn between the first two date ranges. Usable only if there are\nexactly two date ranges.", - "The sort order is based on weighted value of the chosen column. If\ncolumn has n/d format, then weighted value of this ratio will\nbe `(n + totals.n)/(d + totals.d)` Usable only for metrics that\nrepresent ratios.", - "Histogram order type is applicable only to dimension columns with\nnon-empty histogram-buckets.", - "If the dimensions are fixed length numbers, ordinary sort would just\nwork fine. `DIMENSION_AS_INTEGER` can be used if the dimensions are\nvariable length numbers." - ], - "type": "string" + "hideValueRanges": { + "description": "If set to true, hides the minimum and maximum across all matching rows.\nThe default is false and the value ranges are returned.", + "type": "boolean" }, - "fieldName": { - "description": "The field which to sort by. The default sort order is ascending. Example:\n`ga:browser`.\nNote, that you can only specify one field for sort here. For example,\n`ga:browser, ga:city` is not valid.", - "type": "string" - } - }, - "id": "OrderBy" - }, - "Cohort": { - "description": "Defines a cohort. A cohort is a group of users who share a common\ncharacteristic. For example, all users with the same acquisition date\nbelong to the same cohort.", - "type": "object", - "properties": { - "type": { - "description": "Type of the cohort. The only supported type as of now is\n`FIRST_VISIT_DATE`. If this field is unspecified the cohort is treated\nas `FIRST_VISIT_DATE` type cohort.", - "enum": [ - "UNSPECIFIED_COHORT_TYPE", - "FIRST_VISIT_DATE" - ], - "enumDescriptions": [ - "If unspecified it's treated as `FIRST_VISIT_DATE`.", - "Cohorts that are selected based on first visit date." - ], + "filtersExpression": { + "description": "Dimension or metric filters that restrict the data returned for your\nrequest. To use the `filtersExpression`, supply a dimension or metric on\nwhich to filter, followed by the filter expression. For example, the\nfollowing expression selects `ga:browser` dimension which starts with\nFirefox; `ga:browser=~^Firefox`. For more information on dimensions\nand metric filters, see\n[Filters reference](https://developers.google.com/analytics/devguides/reporting/core/v3/reference#filters).", "type": "string" }, - "dateRange": { - "description": "This is used for `FIRST_VISIT_DATE` cohort, the cohort selects users\nwhose first visit date is between start date and end date defined in the\nDateRange. The date ranges should be aligned for cohort requests. If the\nrequest contains `ga:cohortNthDay` it should be exactly one day long,\nif `ga:cohortNthWeek` it should be aligned to the week boundary (starting\nat Sunday and ending Saturday), and for `ga:cohortNthMonth` the date range\nshould be aligned to the month (starting at the first and ending on the\nlast day of the month).\nFor LTV requests there are no such restrictions.\nYou do not need to supply a date range for the\n`reportsRequest.dateRanges` field.", - "$ref": "DateRange" + "cohortGroup": { + "description": "Cohort group associated with this request. If there is a cohort group\nin the request the `ga:cohort` dimension must be present.\nEvery [ReportRequest](#ReportRequest) within a `batchGet` method must\ncontain the same `cohortGroup` definition.", + "$ref": "CohortGroup" }, - "name": { - "description": "A unique name for the cohort. If not defined name will be auto-generated\nwith values cohort_[1234...].", + "viewId": { + "description": "The Analytics\n[view ID](https://support.google.com/analytics/answer/1009618)\nfrom which to retrieve data. Every [ReportRequest](#ReportRequest)\nwithin a `batchGet` method must contain the same `viewId`.", "type": "string" - } - }, - "id": "Cohort" - }, - "OrFiltersForSegment": { - "description": "A list of segment filters in the `OR` group are combined with the logical OR\noperator.", - "type": "object", - "properties": { - "segmentFilterClauses": { - "description": "List of segment filters to be combined with a `OR` operator.", + }, + "metrics": { + "description": "The metrics requested.\nRequests must specify at least one metric. Requests can have a\ntotal of 10 metrics.", "type": "array", "items": { - "$ref": "SegmentFilterClause" + "$ref": "Metric" } - } - }, - "id": "OrFiltersForSegment" - }, - "SequenceSegment": { - "description": "Sequence conditions consist of one or more steps, where each step is defined\nby one or more dimension/metric conditions. Multiple steps can be combined\nwith special sequence operators.", - "type": "object", - "properties": { - "firstStepShouldMatchFirstHit": { - "description": "If set, first step condition must match the first hit of the visitor (in\nthe date range).", - "type": "boolean" }, - "segmentSequenceSteps": { - "description": "The list of steps in the sequence.", + "dimensionFilterClauses": { + "description": "The dimension filter clauses for filtering Dimension Values. They are\nlogically combined with the `AND` operator. Note that filtering occurs\nbefore any dimensions are aggregated, so that the returned metrics\nrepresent the total for only the relevant dimensions.", "type": "array", "items": { - "$ref": "SegmentSequenceStep" + "$ref": "DimensionFilterClause" } - } - }, - "id": "SequenceSegment" - }, - "SegmentFilter": { - "description": "SegmentFilter defines the segment to be either a simple or a sequence\nsegment. A simple segment condition contains dimension and metric conditions\nto select the sessions or users. A sequence segment condition can be used to\nselect users or sessions based on sequential conditions.", - "type": "object", - "properties": { - "sequenceSegment": { - "description": "Sequence conditions consist of one or more steps, where each step is\ndefined by one or more dimension/metric conditions. Multiple steps can\nbe combined with special sequence operators.", - "$ref": "SequenceSegment" }, - "not": { - "description": "If true, match the complement of simple or sequence segment.\nFor example, to match all visits not from \"New York\", we can define the\nsegment as follows:\n\n \"sessionSegment\": {\n \"segmentFilters\": [{\n \"simpleSegment\" :{\n \"orFiltersForSegment\": [{\n \"segmentFilterClauses\":[{\n \"dimensionFilter\": {\n \"dimensionName\": \"ga:city\",\n \"expressions\": [\"New York\"]\n }\n }]\n }]\n },\n \"not\": \"True\"\n }]\n },", - "type": "boolean" + "orderBys": { + "type": "array", + "items": { + "$ref": "OrderBy" + }, + "description": "Sort order on output rows. To compare two rows, the elements of the\nfollowing are applied in order until a difference is found. All date\nranges in the output get the same row order." }, - "simpleSegment": { - "description": "A Simple segment conditions consist of one or more dimension/metric\nconditions that can be combined", - "$ref": "SimpleSegment" + "segments": { + "description": "Segment the data returned for the request. A segment definition helps look\nat a subset of the segment request. A request can contain up to four\nsegments. Every [ReportRequest](#ReportRequest) within a\n`batchGet` method must contain the same `segments` definition. Requests\nwith segments must have the `ga:segment` dimension.", + "type": "array", + "items": { + "$ref": "Segment" + } + }, + "samplingLevel": { + "enumDescriptions": [ + "If the `samplingLevel` field is unspecified the `DEFAULT` sampling level\nis used.", + "Returns response with a sample size that balances speed and\naccuracy.", + "It returns a fast response with a smaller sampling size.", + "Returns a more accurate response using a large sampling size. But this\nmay result in response being slower." + ], + "enum": [ + "SAMPLING_UNSPECIFIED", + "DEFAULT", + "SMALL", + "LARGE" + ], + "description": "The desired report\n[sample](https://support.google.com/analytics/answer/2637192) size.\nIf the the `samplingLevel` field is unspecified the `DEFAULT` sampling\nlevel is used. Every [ReportRequest](#ReportRequest) within a\n`batchGet` method must contain the same `samplingLevel` definition. See\n[developer guide](/analytics/devguides/reporting/core/v4/basics#sampling)\n for details.", + "type": "string" } }, - "id": "SegmentFilter" + "id": "ReportRequest" }, - "PivotHeaderEntry": { - "description": "The headers for the each of the metric column corresponding to the metrics\nrequested in the pivots section of the response.", + "Dimension": { + "description": "[Dimensions](https://support.google.com/analytics/answer/1033861)\nare attributes of your data. For example, the dimension `ga:city`\nindicates the city, for example, \"Paris\" or \"New York\", from which\na session originates.", "type": "object", "properties": { - "dimensionNames": { - "description": "The name of the dimensions in the pivot response.", - "type": "array", - "items": { - "type": "string" - } - }, - "dimensionValues": { - "description": "The values for the dimensions in the pivot.", + "histogramBuckets": { + "description": "If non-empty, we place dimension values into buckets after string to\nint64. Dimension values that are not the string representation of an\nintegral value will be converted to zero. The bucket values have to be in\nincreasing order. Each bucket is closed on the lower end, and open on the\nupper end. The \"first\" bucket includes all values less than the first\nboundary, the \"last\" bucket includes all values up to infinity. Dimension\nvalues that fall in a bucket get transformed to a new dimension value. For\nexample, if one gives a list of \"0, 1, 3, 4, 7\", then we return the\nfollowing buckets:\n\n- bucket #1: values \u003c 0, dimension value \"\u003c0\"\n- bucket #2: values in [0,1), dimension value \"0\"\n- bucket #3: values in [1,3), dimension value \"1-2\"\n- bucket #4: values in [3,4), dimension value \"3\"\n- bucket #5: values in [4,7), dimension value \"4-6\"\n- bucket #6: values \u003e= 7, dimension value \"7+\"\n\nNOTE: If you are applying histogram mutation on any dimension, and using\nthat dimension in sort, you will want to use the sort type\n`HISTOGRAM_BUCKET` for that purpose. Without that the dimension values\nwill be sorted according to dictionary\n(lexicographic) order. For example the ascending dictionary order is:\n\n \"\u003c50\", \"1001+\", \"121-1000\", \"50-120\"\n\nAnd the ascending `HISTOGRAM_BUCKET` order is:\n\n \"\u003c50\", \"50-120\", \"121-1000\", \"1001+\"\n\nThe client has to explicitly request `\"orderType\": \"HISTOGRAM_BUCKET\"`\nfor a histogram-mutated dimension.", "type": "array", "items": { + "format": "int64", "type": "string" } }, - "metric": { - "description": "The metric header for the metric in the pivot.", - "$ref": "MetricHeaderEntry" + "name": { + "description": "Name of the dimension to fetch, for example `ga:browser`.", + "type": "string" } }, - "id": "PivotHeaderEntry" + "id": "Dimension" }, - "DimensionFilterClause": { - "description": "A group of dimension filters. Set the operator value to specify how\nthe filters are logically combined.", + "DynamicSegment": { + "description": "Dynamic segment definition for defining the segment within the request.\nA segment can select users, sessions or both.", "type": "object", "properties": { - "operator": { - "description": "The operator for combining multiple dimension filters. If unspecified, it\nis treated as an `OR`.", - "enum": [ - "OPERATOR_UNSPECIFIED", - "OR", - "AND" - ], - "enumDescriptions": [ - "Unspecified operator. It is treated as an `OR`.", - "The logical `OR` operator.", - "The logical `AND` operator." - ], + "sessionSegment": { + "$ref": "SegmentDefinition", + "description": "Session Segment to select sessions to include in the segment." + }, + "name": { + "description": "The name of the dynamic segment.", "type": "string" }, - "filters": { - "description": "The repeated set of filters. They are logically combined based on the\noperator specified.", - "type": "array", - "items": { - "$ref": "DimensionFilter" - } + "userSegment": { + "$ref": "SegmentDefinition", + "description": "User Segment to select users to include in the segment." } }, - "id": "DimensionFilterClause" + "id": "DynamicSegment" }, - "SegmentSequenceStep": { - "description": "A segment sequence definition.", + "SimpleSegment": { + "description": "A Simple segment conditions consist of one or more dimension/metric\nconditions that can be combined.", "type": "object", "properties": { - "matchType": { - "description": "Specifies if the step immediately precedes or can be any time before the\nnext step.", - "enum": [ - "UNSPECIFIED_MATCH_TYPE", - "PRECEDES", - "IMMEDIATELY_PRECEDES" - ], - "enumDescriptions": [ - "Unspecified match type is treated as precedes.", - "Operator indicates that the previous step precedes the next step.", - "Operator indicates that the previous step immediately precedes the next\nstep." - ], - "type": "string" - }, "orFiltersForSegment": { - "description": "A sequence is specified with a list of Or grouped filters which are\ncombined with `AND` operator.", + "description": "A list of segment filters groups which are combined with logical `AND`\noperator.", "type": "array", "items": { "$ref": "OrFiltersForSegment" } } }, - "id": "SegmentSequenceStep" + "id": "SimpleSegment" }, - "Pivot": { - "description": "The Pivot describes the pivot section in the request.\nThe Pivot helps rearrange the information in the table for certain reports\nby pivoting your data on a second dimension.", + "ColumnHeader": { + "description": "Column headers.", "type": "object", "properties": { - "dimensions": { - "description": "A list of dimensions to show as pivot columns. A Pivot can have a maximum\nof 4 dimensions. Pivot dimensions are part of the restriction on the\ntotal number of dimensions allowed in the request.", - "type": "array", - "items": { - "$ref": "Dimension" - } - }, - "metrics": { - "description": "The pivot metrics. Pivot metrics are part of the\nrestriction on total number of metrics allowed in the request.", - "type": "array", - "items": { - "$ref": "Metric" - } - }, - "maxGroupCount": { - "description": "Specifies the maximum number of groups to return.\nThe default value is 10, also the maximum value is 1,000.", - "type": "integer", - "format": "int32" + "metricHeader": { + "description": "Metric headers for the metrics in the response.", + "$ref": "MetricHeader" }, - "dimensionFilterClauses": { - "description": "DimensionFilterClauses are logically combined with an `AND` operator: only\ndata that is included by all these DimensionFilterClauses contributes to\nthe values in this pivot region. Dimension filters can be used to restrict\nthe columns shown in the pivot region. For example if you have\n`ga:browser` as the requested dimension in the pivot region, and you\nspecify key filters to restrict `ga:browser` to only \"IE\" or \"Firefox\",\nthen only those two browsers would show up as columns.", + "dimensions": { + "description": "The dimension names in the response.", "type": "array", "items": { - "$ref": "DimensionFilterClause" + "type": "string" } - }, - "startGroup": { - "description": "If k metrics were requested, then the response will contain some\ndata-dependent multiple of k columns in the report. E.g., if you pivoted\non the dimension `ga:browser` then you'd get k columns for \"Firefox\", k\ncolumns for \"IE\", k columns for \"Chrome\", etc. The ordering of the groups\nof columns is determined by descending order of \"total\" for the first of\nthe k values. Ties are broken by lexicographic ordering of the first\npivot dimension, then lexicographic ordering of the second pivot\ndimension, and so on. E.g., if the totals for the first value for\nFirefox, IE, and Chrome were 8, 2, 8, respectively, the order of columns\nwould be Chrome, Firefox, IE.\n\nThe following let you choose which of the groups of k columns are\nincluded in the response.", - "type": "integer", - "format": "int32" } }, - "id": "Pivot" + "id": "ColumnHeader" }, - "DateRangeValues": { - "description": "Used to return a list of metrics for a single DateRange / dimension\ncombination", + "SegmentFilterClause": { + "description": "Filter Clause to be used in a segment definition, can be wither a metric or\na dimension filter.", "type": "object", "properties": { - "values": { - "description": "Each value corresponds to each Metric in the request.", - "type": "array", - "items": { - "type": "string" - } + "not": { + "type": "boolean", + "description": "Matches the complement (`!`) of the filter." }, - "pivotValueRegions": { - "description": "The values of each pivot region.", - "type": "array", - "items": { - "$ref": "PivotValueRegion" - } + "dimensionFilter": { + "description": "Dimension Filter for the segment definition.", + "$ref": "SegmentDimensionFilter" + }, + "metricFilter": { + "$ref": "SegmentMetricFilter", + "description": "Metric Filter for the segment definition." } }, - "id": "DateRangeValues" + "id": "SegmentFilterClause" }, "MetricFilterClause": { + "id": "MetricFilterClause", "description": "Represents a group of metric filters.\nSet the operator value to specify how the filters are logically combined.", "type": "object", "properties": { "operator": { "description": "The operator for combining multiple metric filters. If unspecified, it is\ntreated as an `OR`.", - "enum": [ - "OPERATOR_UNSPECIFIED", - "OR", - "AND" - ], + "type": "string", "enumDescriptions": [ "Unspecified operator. It is treated as an `OR`.", "The logical `OR` operator.", "The logical `AND` operator." ], - "type": "string" + "enum": [ + "OPERATOR_UNSPECIFIED", + "OR", + "AND" + ] }, "filters": { - "description": "The repeated set of filters. They are logically combined based on the\noperator specified.", "type": "array", "items": { "$ref": "MetricFilter" - } - } - }, - "id": "MetricFilterClause" - }, - "Segment": { - "description": "The segment definition, if the report needs to be segmented.\nA Segment is a subset of the Analytics data. For example, of the entire\nset of users, one Segment might be users from a particular country or city.", - "type": "object", - "properties": { - "dynamicSegment": { - "description": "A dynamic segment definition in the request.", - "$ref": "DynamicSegment" - }, - "segmentId": { - "description": "The segment ID of a built-in or custom segment, for example `gaid::-3`.", - "type": "string" + }, + "description": "The repeated set of filters. They are logically combined based on the\noperator specified." } - }, - "id": "Segment" + } }, - "DateRange": { - "description": "A contiguous set of days: startDate, startDate + 1 day, ..., endDate.\nThe start and end dates are specified in\n[ISO8601](https://en.wikipedia.org/wiki/ISO_8601) date format `YYYY-MM-DD`.", + "Cohort": { "type": "object", "properties": { - "startDate": { - "description": "The start date for the query in the format `YYYY-MM-DD`.", + "name": { + "description": "A unique name for the cohort. If not defined name will be auto-generated\nwith values cohort_[1234...].", "type": "string" }, - "endDate": { - "description": "The end date for the query in the format `YYYY-MM-DD`.", + "dateRange": { + "$ref": "DateRange", + "description": "This is used for `FIRST_VISIT_DATE` cohort, the cohort selects users\nwhose first visit date is between start date and end date defined in the\nDateRange. The date ranges should be aligned for cohort requests. If the\nrequest contains `ga:cohortNthDay` it should be exactly one day long,\nif `ga:cohortNthWeek` it should be aligned to the week boundary (starting\nat Sunday and ending Saturday), and for `ga:cohortNthMonth` the date range\nshould be aligned to the month (starting at the first and ending on the\nlast day of the month).\nFor LTV requests there are no such restrictions.\nYou do not need to supply a date range for the\n`reportsRequest.dateRanges` field." + }, + "type": { + "enumDescriptions": [ + "If unspecified it's treated as `FIRST_VISIT_DATE`.", + "Cohorts that are selected based on first visit date." + ], + "enum": [ + "UNSPECIFIED_COHORT_TYPE", + "FIRST_VISIT_DATE" + ], + "description": "Type of the cohort. The only supported type as of now is\n`FIRST_VISIT_DATE`. If this field is unspecified the cohort is treated\nas `FIRST_VISIT_DATE` type cohort.", "type": "string" } }, - "id": "DateRange" + "id": "Cohort", + "description": "Defines a cohort. A cohort is a group of users who share a common\ncharacteristic. For example, all users with the same acquisition date\nbelong to the same cohort." }, "ReportRow": { - "description": "A row in the report.", - "type": "object", "properties": { "dimensions": { "description": "List of requested dimensions.", @@ -917,228 +917,230 @@ } } }, - "id": "ReportRow" + "id": "ReportRow", + "description": "A row in the report.", + "type": "object" }, - "CohortGroup": { - "description": "Defines a cohort group.\nFor example:\n\n \"cohortGroup\": {\n \"cohorts\": [{\n \"name\": \"cohort 1\",\n \"type\": \"FIRST_VISIT_DATE\",\n \"dateRange\": { \"startDate\": \"2015-08-01\", \"endDate\": \"2015-08-01\" }\n },{\n \"name\": \"cohort 2\"\n \"type\": \"FIRST_VISIT_DATE\"\n \"dateRange\": { \"startDate\": \"2015-07-01\", \"endDate\": \"2015-07-01\" }\n }]\n }", + "OrFiltersForSegment": { + "description": "A list of segment filters in the `OR` group are combined with the logical OR\noperator.", "type": "object", "properties": { - "lifetimeValue": { - "description": "Enable Life Time Value (LTV). LTV measures lifetime value for users\nacquired through different channels.\nPlease see:\n[Cohort Analysis](https://support.google.com/analytics/answer/6074676) and\n[Lifetime Value](https://support.google.com/analytics/answer/6182550)\nIf the value of lifetimeValue is false:\n\n- The metric values are similar to the values in the web interface cohort\n report.\n- The cohort definition date ranges must be aligned to the calendar week\n and month. i.e. while requesting `ga:cohortNthWeek` the `startDate` in\n the cohort definition should be a Sunday and the `endDate` should be the\n following Saturday, and for `ga:cohortNthMonth`, the `startDate`\n should be the 1st of the month and `endDate` should be the last day\n of the month.\n\nWhen the lifetimeValue is true:\n\n- The metric values will correspond to the values in the web interface\n LifeTime value report.\n- The Lifetime Value report shows you how user value (Revenue) and\n engagement (Appviews, Goal Completions, Sessions, and Session Duration)\n grow during the 90 days after a user is acquired.\n- The metrics are calculated as a cumulative average per user per the time\n increment.\n- The cohort definition date ranges need not be aligned to the calendar\n week and month boundaries.\n- The `viewId` must be an\n [app view ID](https://support.google.com/analytics/answer/2649553#WebVersusAppViews)", - "type": "boolean" - }, - "cohorts": { - "description": "The definition for the cohort.", + "segmentFilterClauses": { + "description": "List of segment filters to be combined with a `OR` operator.", "type": "array", "items": { - "$ref": "Cohort" + "$ref": "SegmentFilterClause" } } }, - "id": "CohortGroup" + "id": "OrFiltersForSegment" }, - "GetReportsResponse": { - "description": "The main response class which holds the reports from the Reporting API\n`batchGet` call.", + "MetricHeader": { + "description": "The headers for the metrics.", "type": "object", "properties": { - "reports": { - "description": "Responses corresponding to each of the request.", + "metricHeaderEntries": { + "description": "Headers for the metrics in the response.", + "type": "array", + "items": { + "$ref": "MetricHeaderEntry" + } + }, + "pivotHeaders": { + "description": "Headers for the pivots in the response.", "type": "array", "items": { - "$ref": "Report" + "$ref": "PivotHeader" } } }, - "id": "GetReportsResponse" + "id": "MetricHeader" }, - "MetricHeaderEntry": { - "description": "Header for the metrics.", + "DimensionFilterClause": { + "description": "A group of dimension filters. Set the operator value to specify how\nthe filters are logically combined.", "type": "object", "properties": { - "type": { - "description": "The type of the metric, for example `INTEGER`.", - "enum": [ - "METRIC_TYPE_UNSPECIFIED", - "INTEGER", - "FLOAT", - "CURRENCY", - "PERCENT", - "TIME" - ], + "operator": { + "description": "The operator for combining multiple dimension filters. If unspecified, it\nis treated as an `OR`.", + "type": "string", "enumDescriptions": [ - "Metric type is unspecified.", - "Integer metric.", - "Float metric.", - "Currency metric.", - "Percentage metric.", - "Time metric in `HH:MM:SS` format." + "Unspecified operator. It is treated as an `OR`.", + "The logical `OR` operator.", + "The logical `AND` operator." ], - "type": "string" - }, - "name": { - "description": "The name of the header.", - "type": "string" - } - }, - "id": "MetricHeaderEntry" - }, - "MetricFilter": { - "description": "MetricFilter specifies the filter on a metric.", - "type": "object", - "properties": { - "metricName": { - "description": "The metric that will be filtered on. A metricFilter must contain a metric\nname. A metric name can be an alias earlier defined as a metric or it can\nalso be a metric expression.", - "type": "string" - }, - "operator": { - "description": "Is the metric `EQUAL`, `LESS_THAN` or `GREATER_THAN` the\ncomparisonValue, the default is `EQUAL`. If the operator is\n`IS_MISSING`, checks if the metric is missing and would ignore the\ncomparisonValue.", "enum": [ "OPERATOR_UNSPECIFIED", - "EQUAL", - "LESS_THAN", - "GREATER_THAN", - "IS_MISSING" - ], - "enumDescriptions": [ - "If the operator is not specified, it is treated as `EQUAL`.", - "Should the value of the metric be exactly equal to the comparison value.", - "Should the value of the metric be less than to the comparison value.", - "Should the value of the metric be greater than to the comparison value.", - "Validates if the metric is missing.\nDoesn't take comparisonValue into account." - ], - "type": "string" - }, - "comparisonValue": { - "description": "The value to compare against.", - "type": "string" + "OR", + "AND" + ] }, - "not": { - "description": "Logical `NOT` operator. If this boolean is set to true, then the matching\nmetric values will be excluded in the report. The default is false.", - "type": "boolean" + "filters": { + "type": "array", + "items": { + "$ref": "DimensionFilter" + }, + "description": "The repeated set of filters. They are logically combined based on the\noperator specified." } }, - "id": "MetricFilter" + "id": "DimensionFilterClause" }, - "Dimension": { - "description": "[Dimensions](https://support.google.com/analytics/answer/1033861)\nare attributes of your data. For example, the dimension `ga:city`\nindicates the city, for example, \"Paris\" or \"New York\", from which\na session originates.", + "GetReportsResponse": { "type": "object", "properties": { - "histogramBuckets": { - "description": "If non-empty, we place dimension values into buckets after string to\nint64. Dimension values that are not the string representation of an\nintegral value will be converted to zero. The bucket values have to be in\nincreasing order. Each bucket is closed on the lower end, and open on the\nupper end. The \"first\" bucket includes all values less than the first\nboundary, the \"last\" bucket includes all values up to infinity. Dimension\nvalues that fall in a bucket get transformed to a new dimension value. For\nexample, if one gives a list of \"0, 1, 3, 4, 7\", then we return the\nfollowing buckets:\n\n- bucket #1: values \u003c 0, dimension value \"\u003c0\"\n- bucket #2: values in [0,1), dimension value \"0\"\n- bucket #3: values in [1,3), dimension value \"1-2\"\n- bucket #4: values in [3,4), dimension value \"3\"\n- bucket #5: values in [4,7), dimension value \"4-6\"\n- bucket #6: values \u003e= 7, dimension value \"7+\"\n\nNOTE: If you are applying histogram mutation on any dimension, and using\nthat dimension in sort, you will want to use the sort type\n`HISTOGRAM_BUCKET` for that purpose. Without that the dimension values\nwill be sorted according to dictionary\n(lexicographic) order. For example the ascending dictionary order is:\n\n \"\u003c50\", \"1001+\", \"121-1000\", \"50-120\"\n\nAnd the ascending `HISTOGRAM_BUCKET` order is:\n\n \"\u003c50\", \"50-120\", \"121-1000\", \"1001+\"\n\nThe client has to explicitly request `\"orderType\": \"HISTOGRAM_BUCKET\"`\nfor a histogram-mutated dimension.", + "reports": { + "description": "Responses corresponding to each of the request.", "type": "array", "items": { - "type": "string", - "format": "int64" + "$ref": "Report" } - }, - "name": { - "description": "Name of the dimension to fetch, for example `ga:browser`.", - "type": "string" } }, - "id": "Dimension" + "id": "GetReportsResponse", + "description": "The main response class which holds the reports from the Reporting API\n`batchGet` call." }, - "PivotValueRegion": { - "description": "The metric values in the pivot region.", - "type": "object", + "SequenceSegment": { "properties": { - "values": { - "description": "The values of the metrics in each of the pivot regions.", + "segmentSequenceSteps": { + "description": "The list of steps in the sequence.", "type": "array", "items": { - "type": "string" + "$ref": "SegmentSequenceStep" } + }, + "firstStepShouldMatchFirstHit": { + "description": "If set, first step condition must match the first hit of the visitor (in\nthe date range).", + "type": "boolean" } }, - "id": "PivotValueRegion" + "id": "SequenceSegment", + "description": "Sequence conditions consist of one or more steps, where each step is defined\nby one or more dimension/metric conditions. Multiple steps can be combined\nwith special sequence operators.", + "type": "object" } }, - "revision": "20161129", - "basePath": "", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version_module": "True", + "protocol": "rest", "canonicalName": "AnalyticsReporting", - "discoveryVersion": "v1", - "baseUrl": "https://analyticsreporting.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/analytics.readonly": { + "description": "View your Google Analytics data" + }, + "https://www.googleapis.com/auth/analytics": { + "description": "View and manage your Google Analytics data" + } + } + } + }, + "rootUrl": "https://analyticsreporting.googleapis.com/", + "ownerDomain": "google.com", "name": "analyticsreporting", + "batchPath": "batch", + "title": "Google Analytics Reporting API", + "ownerName": "Google", + "resources": { + "reports": { + "methods": { + "batchGet": { + "response": { + "$ref": "GetReportsResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/analytics", + "https://www.googleapis.com/auth/analytics.readonly" + ], + "parameters": {}, + "flatPath": "v4/reports:batchGet", + "path": "v4/reports:batchGet", + "id": "analyticsreporting.reports.batchGet", + "description": "Returns the Analytics data.", + "request": { + "$ref": "GetReportsRequest" + } + } + } + } + }, "parameters": { - "access_token": { - "description": "OAuth access token.", + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", "location": "query" }, "prettyPrint": { + "location": "query", "description": "Returns response with indentations and line breaks.", - "default": "true", "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" + "default": "true" }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", "type": "string", "location": "query" }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, "fields": { + "location": "query", "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "callback": { + "description": "JSONP", "type": "string", "location": "query" }, - "alt": { - "description": "Data format for response.", + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], "location": "query", "enum": [ - "json", - "media", - "proto" + "1", + "2" ], - "default": "json", + "description": "V1 error format.", + "type": "string" + }, + "alt": { "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", + "location": "query", + "description": "Data format for response.", + "default": "json", "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" + "json", + "media", + "proto" ], - "type": "string", - "location": "query" + "type": "string" }, - "callback": { - "description": "JSONP", + "access_token": { "type": "string", - "location": "query" + "location": "query", + "description": "OAuth access token." }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", "location": "query" }, "bearer_token": { @@ -1146,18 +1148,16 @@ "type": "string", "location": "query" }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "oauth_token": { "type": "string", - "location": "query" + "location": "query", + "description": "OAuth 2.0 token for the current user." } }, - "documentationLink": "https://developers.google.com/analytics/devguides/reporting/core/v4/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", "version": "v4", - "rootUrl": "https://analyticsreporting.googleapis.com/", - "kind": "discovery#restDescription" + "baseUrl": "https://analyticsreporting.googleapis.com/", + "kind": "discovery#restDescription", + "description": "Accesses Analytics report data.", + "servicePath": "", + "basePath": "" } diff --git a/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-gen.go b/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-gen.go index 055e3015d..41676f2f9 100644 --- a/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-gen.go +++ b/vendor/google.golang.org/api/analyticsreporting/v4/analyticsreporting-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Reports *ReportsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewReportsService(s *Service) *ReportsService { rs := &ReportsService{s: s} return rs @@ -2072,6 +2077,7 @@ func (c *ReportsBatchGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getreportsrequest) if err != nil { diff --git a/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-api.json b/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-api.json index a8f48d388..eb83a3b59 100644 --- a/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-api.json +++ b/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/POjkLwx_Ki840e_TX5U8lbxSuxE\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/7Hat7OD71pyaIh88VMr8fQ27dGA\"", "discoveryVersion": "v1", "id": "androidenterprise:v1", "name": "androidenterprise", "canonicalName": "Android Enterprise", "version": "v1", - "revision": "20170117", + "revision": "20170215", "title": "Google Play EMM API", "description": "Manages the deployment of apps to Android for Work users.", "ownerDomain": "google.com", @@ -298,11 +298,11 @@ "Device": { "id": "Device", "type": "object", - "description": "A device resource represents a mobile device managed by the EMM and belonging to a specific enterprise user.\n\nThis collection cannot be modified via the API; it is automatically populated as devices are set up to be managed.", + "description": "A Devices resource represents a mobile device managed by the EMM and belonging to a specific enterprise user.\n\nThis collection cannot be modified via the API. It is automatically populated as devices are set up to be managed.", "properties": { "androidId": { "type": "string", - "description": "The Google Play Services Android ID for the device encoded as a lowercase hex string, e.g. \"123456789abcdef0\"." + "description": "The Google Play Services Android ID for the device encoded as a lowercase hex string. For example, \"123456789abcdef0\"." }, "kind": { "type": "string", @@ -438,7 +438,7 @@ "Entitlement": { "id": "Entitlement", "type": "object", - "description": "The existence of an entitlement resource means that a user has the right to use a particular app on any of their devices. This might be because the app is free or because they have been allocated a license to the app from a group license purchased by the enterprise.\n\nIt should always be true that a user has an app installed on one of their devices only if they have an entitlement to it. So if an entitlement is deleted, the app will be uninstalled from all devices. Similarly if the user installs an app (and is permitted to do so), or the EMM triggers an install of the app, an entitlement to that app is automatically created. If this is impossible - e.g. the enterprise has not purchased sufficient licenses - then installation fails.\n\nNote that entitlements are always user specific, not device specific; a user may have an entitlement even though they have not installed the app anywhere. Once they have an entitlement they can install the app on multiple devices.\n\nThe API can be used to create an entitlement. If the app is a free app, a group license for that app is created. If it's a paid app, creating the entitlement consumes one license; it remains consumed until the entitlement is removed. Optionally an installation of the app on all the user's managed devices can be triggered at the time the entitlement is created. An entitlement cannot be created for an app if the app requires permissions that the enterprise has not yet accepted.\n\nEntitlements for paid apps that are due to purchases by the user on a non-managed profile will have \"userPurchase\" as entitlement reason; those entitlements cannot be removed via the API.", + "description": "The presence of an Entitlements resource indicates that a user has the right to use a particular app. Entitlements are user specific, not device specific. This allows a user with an entitlement to an app to install the app on all their devices. It's also possible for a user to hold an entitlement to an app without installing the app on any device.\n\nThe API can be used to create an entitlement. As an option, you can also use the API to trigger the installation of an app on all a user's managed devices at the same time the entitlement is created.\n\nIf the app is free, creating the entitlement also creates a group license for that app. For paid apps, creating the entitlement consumes one license, and that license remains consumed until the entitlement is removed. If the enterprise hasn't purchased enough licenses, then no entitlement is created and the installation fails. An entitlement is also not created for an app if the app requires permissions that the enterprise hasn't accepted.\n\nIf an entitlement is deleted, the app may be uninstalled from a user's device. As a best practice, uninstall the app by calling Installs.delete() before deleting the entitlement.\n\nEntitlements for apps that a user pays for on an unmanaged profile have \"userPurchase\" as the entitlement reason. These entitlements cannot be removed via the API.", "properties": { "kind": { "type": "string", @@ -447,11 +447,11 @@ }, "productId": { "type": "string", - "description": "The ID of the product that the entitlement is for, e.g. \"app:com.google.android.gm\"." + "description": "The ID of the product that the entitlement is for. For example, \"app:com.google.android.gm\"." }, "reason": { "type": "string", - "description": "The reason for the entitlement, e.g. \"free\" for free apps. This is temporary, it will be replaced by the acquisition kind field of group licenses." + "description": "The reason for the entitlement. For example, \"free\" for free apps. This property is temporary: it will be replaced by the acquisition kind field of group licenses." } } }, @@ -481,11 +481,11 @@ "properties": { "acquisitionKind": { "type": "string", - "description": "How this group license was acquired. \"bulkPurchase\" means that this group license object was created because the enterprise purchased licenses for this product; this is \"free\" otherwise (for free products)." + "description": "How this group license was acquired. \"bulkPurchase\" means that this Grouplicenses resource was created because the enterprise purchased licenses for this product; otherwise, the value is \"free\" (for free products)." }, "approval": { "type": "string", - "description": "Whether the product to which this group license relates is currently approved by the enterprise, as either \"approved\" or \"unapproved\". Products are approved when a group license is first created, but this approval may be revoked by an enterprise admin via Google Play. Unapproved products will not be visible to end users in collections and new entitlements to them should not normally be created." + "description": "Whether the product to which this group license relates is currently approved by the enterprise. Products are approved when a group license is first created, but this approval may be revoked by an enterprise admin via Google Play. Unapproved products will not be visible to end users in collections, and new entitlements to them should not normally be created." }, "kind": { "type": "string", @@ -499,12 +499,12 @@ }, "numPurchased": { "type": "integer", - "description": "The number of purchased licenses (possibly in multiple purchases). If this field is omitted then there is no limit on the number of licenses that can be provisioned (e.g. if the acquisition kind is \"free\").", + "description": "The number of purchased licenses (possibly in multiple purchases). If this field is omitted, then there is no limit on the number of licenses that can be provisioned (for example, if the acquisition kind is \"free\").", "format": "int32" }, "productId": { "type": "string", - "description": "The ID of the product that the license is for, e.g. \"app:com.google.android.gm\"." + "description": "The ID of the product that the license is for. For example, \"app:com.google.android.gm\"." } } }, @@ -549,7 +549,7 @@ "Install": { "id": "Install", "type": "object", - "description": "The existence of an install resource indicates that an app is installed on a particular device (or that an install is pending).\n\nThe API can be used to create an install resource using the update method. This triggers the actual install of the app on the device. If the user does not already have an entitlement for the app then an attempt is made to create one. If this fails (e.g. because the app is not free and there is no available license) then the creation of the install fails.\n\nThe API can also be used to update an installed app. If the update method is used on an existing install then the app will be updated to the latest available version.\n\nNote that it is not possible to force the installation of a specific version of an app; the version code is read-only.\n\nIf a user installs an app themselves (as permitted by the enterprise), then again an install resource and possibly an entitlement resource are automatically created.\n\nThe API can also be used to delete an install resource, which triggers the removal of the app from the device. Note that deleting an install does not automatically remove the corresponding entitlement, even if there are no remaining installs. The install resource will also be deleted if the user uninstalls the app themselves.", + "description": "The existence of an Installs resource indicates that an app is installed on a particular device (or that an install is pending).\n\nThe API can be used to create an install resource using the update method. This triggers the actual install of the app on the device. If the user does not already have an entitlement for the app, then an attempt is made to create one. If this fails (for example, because the app is not free and there is no available license), then the creation of the install fails.\n\nThe API can also be used to update an installed app. If the update method is used on an existing install, then the app will be updated to the latest available version.\n\nNote that it is not possible to force the installation of a specific version of an app: the version code is read-only.\n\nIf a user installs an app themselves (as permitted by the enterprise), then again an install resource and possibly an entitlement resource are automatically created.\n\nThe API can also be used to delete an install resource, which triggers the removal of the app from the device. Note that deleting an install does not automatically remove the corresponding entitlement, even if there are no remaining installs. The install resource will also be deleted if the user uninstalls the app themselves.", "properties": { "installState": { "type": "string", @@ -562,7 +562,7 @@ }, "productId": { "type": "string", - "description": "The ID of the product that the install is for, e.g. \"app:com.google.android.gm\"." + "description": "The ID of the product that the install is for. For example, \"app:com.google.android.gm\"." }, "versionCode": { "type": "integer", @@ -760,7 +760,7 @@ }, "managementType": { "type": "string", - "description": "Identifies the extent to which the device is controlled by an Android for Work EMM in various deployment configurations.\n\nPossible values include: \n- \"managedDevice\", a device that has the EMM's device policy controller (DPC) as the device owner, \n- \"managedProfile\", a device that has a work profile managed by the DPC (DPC is profile owner) in addition to a separate, personal profile that is unavailable to the DPC," + "description": "Identifies the extent to which the device is controlled by an Android EMM in various deployment configurations.\n\nPossible values include: \n- \"managedDevice\", a device where the DPC is set as device owner, \n- \"managedProfile\", a device where the DPC is set as profile owner." }, "userId": { "type": "string", @@ -881,11 +881,11 @@ "Permission": { "id": "Permission", "type": "object", - "description": "A permission represents some extra capability, to be granted to an Android app, which requires explicit consent. An enterprise admin must consent to these permissions on behalf of their users before an entitlement for the app can be created.\n\nThe permissions collection is read-only. The information provided for each permission (localized name and description) is intended to be used in the EMM user interface when obtaining consent from the enterprise.", + "description": "A Permissions resource represents some extra capability, to be granted to an Android app, which requires explicit consent. An enterprise admin must consent to these permissions on behalf of their users before an entitlement for the app can be created.\n\nThe permissions collection is read-only. The information provided for each permission (localized name and description) is intended to be used in the MDM user interface when obtaining consent from the enterprise.", "properties": { "description": { "type": "string", - "description": "A longer description of the permissions giving more details of what it affects." + "description": "A longer description of the Permissions resource, giving more details of what it affects." }, "kind": { "type": "string", @@ -916,7 +916,7 @@ }, "authorName": { "type": "string", - "description": "The name of the author of the product (e.g. the app developer)." + "description": "The name of the author of the product (for example, the app developer)." }, "detailsUrl": { "type": "string", @@ -1048,7 +1048,7 @@ }, "productSetBehavior": { "type": "string", - "description": "The interpretation of this product set. \"unknown\" should never be sent and ignored if received. \"whitelist\" means that this product set constitutes a whitelist. \"includeAll\" means that all products are accessible, including products that are approved, not approved, and even products where approval has been revoked. If the value is \"includeAll\", the value of the productId field is therefore ignored. If a value is not supplied, it is interpreted to be \"whitelist\" for backwards compatibility." + "description": "The interpretation of this product set. \"unknown\" should never be sent and is ignored if received. \"whitelist\" means that this product set constitutes a whitelist. \"includeAll\" means that all products are accessible, including products that are approved, products with revoked approval, and products that have never been approved. If the value is \"includeAll\", the value of the productId field is therefore ignored. If a value is not supplied, it is interpreted to be \"whitelist\" for backwards compatibility." } } }, @@ -1626,7 +1626,7 @@ "id": "androidenterprise.enterprises.delete", "path": "enterprises/{enterpriseId}", "httpMethod": "DELETE", - "description": "Deletes the binding between the EMM and enterprise. This is now deprecated; use this to unenroll customers that were previously enrolled with the 'insert' call, then enroll them again with the 'enroll' call.", + "description": "Deletes the binding between the EMM and enterprise. This is now deprecated. Use this method only to unenroll customers that were previously enrolled with the insert call, then enroll them again with the enroll call.", "parameters": { "enterpriseId": { "type": "string", @@ -1822,7 +1822,7 @@ "id": "androidenterprise.enterprises.pullNotificationSet", "path": "enterprises/pullNotificationSet", "httpMethod": "POST", - "description": "Pulls and returns a notification set for the enterprises associated with the service account authenticated for the request. The notification set may be empty if no notification are pending.\nA notification set returned needs to be acknowledged within 20 seconds by calling Enterprises\u200b.AcknowledgeNotificationSet, unless the notification set is empty.\nNotifications that are not acknowledged within the 20 seconds will eventually be included again in the response to another PullNotificationSet request, and those that are never acknowledged will ultimately be deleted according to the Google Cloud Platform Pub/Sub system policy.\nMultiple requests might be performed concurrently to retrieve notifications, in which case the pending notifications (if any) will be split among each caller, if any are pending.\nIf no notifications are present, an empty notification list is returned. Subsequent requests may return more notifications once they become available.", + "description": "Pulls and returns a notification set for the enterprises associated with the service account authenticated for the request. The notification set may be empty if no notification are pending.\nA notification set returned needs to be acknowledged within 20 seconds by calling Enterprises.AcknowledgeNotificationSet, unless the notification set is empty.\nNotifications that are not acknowledged within the 20 seconds will eventually be included again in the response to another PullNotificationSet request, and those that are never acknowledged will ultimately be deleted according to the Google Cloud Platform Pub/Sub system policy.\nMultiple requests might be performed concurrently to retrieve notifications, in which case the pending notifications (if any) will be split among each caller, if any are pending.\nIf no notifications are present, an empty notification list is returned. Subsequent requests may return more notifications once they become available.", "parameters": { "requestMode": { "type": "string", @@ -1872,7 +1872,7 @@ "id": "androidenterprise.enterprises.setAccount", "path": "enterprises/{enterpriseId}/account", "httpMethod": "PUT", - "description": "Set the account that will be used to authenticate to the API as the enterprise.", + "description": "Sets the account that will be used to authenticate to the API as the enterprise.", "parameters": { "enterpriseId": { "type": "string", @@ -1948,7 +1948,7 @@ "id": "androidenterprise.entitlements.delete", "path": "enterprises/{enterpriseId}/users/{userId}/entitlements/{entitlementId}", "httpMethod": "DELETE", - "description": "Removes an entitlement to an app for a user and uninstalls it.", + "description": "Removes an entitlement to an app for a user.", "parameters": { "enterpriseId": { "type": "string", @@ -2019,7 +2019,7 @@ "id": "androidenterprise.entitlements.list", "path": "enterprises/{enterpriseId}/users/{userId}/entitlements", "httpMethod": "GET", - "description": "List of all entitlements for the specified user. Only the ID is set.", + "description": "Lists all entitlements for the specified user. Only the ID is set.", "parameters": { "enterpriseId": { "type": "string", @@ -2356,7 +2356,7 @@ "id": "androidenterprise.installs.patch", "path": "enterprises/{enterpriseId}/users/{userId}/devices/{deviceId}/installs/{installId}", "httpMethod": "PATCH", - "description": "Requests to install the latest version of an app to a device. If the app is already installed then it is updated to the latest version if necessary. This method supports patch semantics.", + "description": "Requests to install the latest version of an app to a device. If the app is already installed, then it is updated to the latest version if necessary. This method supports patch semantics.", "parameters": { "deviceId": { "type": "string", @@ -2403,7 +2403,7 @@ "id": "androidenterprise.installs.update", "path": "enterprises/{enterpriseId}/users/{userId}/devices/{deviceId}/installs/{installId}", "httpMethod": "PUT", - "description": "Requests to install the latest version of an app to a device. If the app is already installed then it is updated to the latest version if necessary.", + "description": "Requests to install the latest version of an app to a device. If the app is already installed, then it is updated to the latest version if necessary.", "parameters": { "deviceId": { "type": "string", @@ -3087,7 +3087,7 @@ }, "token": { "type": "string", - "description": "A pagination token is contained in a request\u0092s response when there are more products. The token can be used in a subsequent request to obtain more products, and so forth. This parameter cannot be used in the initial request.", + "description": "A pagination token is contained in a request''s response when there are more products. The token can be used in a subsequent request to obtain more products, and so forth. This parameter cannot be used in the initial request.", "location": "query" } }, @@ -3873,7 +3873,7 @@ "id": "androidenterprise.users.setAvailableProductSet", "path": "enterprises/{enterpriseId}/users/{userId}/availableProductSet", "httpMethod": "PUT", - "description": "Modifies the set of products a user is entitled to access.", + "description": "Modifies the set of products that a user is entitled to access (referred to as whitelisted products). Only products that are approved or products that were previously approved (products with revoked approval) can be whitelisted.", "parameters": { "enterpriseId": { "type": "string", diff --git a/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-gen.go b/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-gen.go index 9b01033f2..3fbcac0a8 100644 --- a/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-gen.go +++ b/vendor/google.golang.org/api/androidenterprise/v1/androidenterprise-gen.go @@ -74,9 +74,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Devices *DevicesService @@ -114,6 +115,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDevicesService(s *Service) *DevicesService { rs := &DevicesService{s: s} return rs @@ -665,14 +670,14 @@ func (s *AuthenticationToken) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Device: A device resource represents a mobile device managed by the +// Device: A Devices resource represents a mobile device managed by the // EMM and belonging to a specific enterprise user. // -// This collection cannot be modified via the API; it is automatically +// This collection cannot be modified via the API. It is automatically // populated as devices are set up to be managed. type Device struct { // AndroidId: The Google Play Services Android ID for the device encoded - // as a lowercase hex string, e.g. "123456789abcdef0". + // as a lowercase hex string. For example, "123456789abcdef0". AndroidId string `json:"androidId,omitempty"` // Kind: Identifies what kind of resource this is. Value: the fixed @@ -968,50 +973,45 @@ func (s *EnterprisesSendTestPushNotificationResponse) MarshalJSON() ([]byte, err return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Entitlement: The existence of an entitlement resource means that a -// user has the right to use a particular app on any of their devices. -// This might be because the app is free or because they have been -// allocated a license to the app from a group license purchased by the -// enterprise. +// Entitlement: The presence of an Entitlements resource indicates that +// a user has the right to use a particular app. Entitlements are user +// specific, not device specific. This allows a user with an entitlement +// to an app to install the app on all their devices. It's also possible +// for a user to hold an entitlement to an app without installing the +// app on any device. // -// It should always be true that a user has an app installed on one of -// their devices only if they have an entitlement to it. So if an -// entitlement is deleted, the app will be uninstalled from all devices. -// Similarly if the user installs an app (and is permitted to do so), or -// the EMM triggers an install of the app, an entitlement to that app is -// automatically created. If this is impossible - e.g. the enterprise -// has not purchased sufficient licenses - then installation -// fails. +// The API can be used to create an entitlement. As an option, you can +// also use the API to trigger the installation of an app on all a +// user's managed devices at the same time the entitlement is +// created. // -// Note that entitlements are always user specific, not device specific; -// a user may have an entitlement even though they have not installed -// the app anywhere. Once they have an entitlement they can install the -// app on multiple devices. +// If the app is free, creating the entitlement also creates a group +// license for that app. For paid apps, creating the entitlement +// consumes one license, and that license remains consumed until the +// entitlement is removed. If the enterprise hasn't purchased enough +// licenses, then no entitlement is created and the installation fails. +// An entitlement is also not created for an app if the app requires +// permissions that the enterprise hasn't accepted. // -// The API can be used to create an entitlement. If the app is a free -// app, a group license for that app is created. If it's a paid app, -// creating the entitlement consumes one license; it remains consumed -// until the entitlement is removed. Optionally an installation of the -// app on all the user's managed devices can be triggered at the time -// the entitlement is created. An entitlement cannot be created for an -// app if the app requires permissions that the enterprise has not yet -// accepted. +// If an entitlement is deleted, the app may be uninstalled from a +// user's device. As a best practice, uninstall the app by calling +// Installs.delete() before deleting the entitlement. // -// Entitlements for paid apps that are due to purchases by the user on a -// non-managed profile will have "userPurchase" as entitlement reason; -// those entitlements cannot be removed via the API. +// Entitlements for apps that a user pays for on an unmanaged profile +// have "userPurchase" as the entitlement reason. These entitlements +// cannot be removed via the API. type Entitlement struct { // Kind: Identifies what kind of resource this is. Value: the fixed // string "androidenterprise#entitlement". Kind string `json:"kind,omitempty"` - // ProductId: The ID of the product that the entitlement is for, e.g. - // "app:com.google.android.gm". + // ProductId: The ID of the product that the entitlement is for. For + // example, "app:com.google.android.gm". ProductId string `json:"productId,omitempty"` - // Reason: The reason for the entitlement, e.g. "free" for free apps. - // This is temporary, it will be replaced by the acquisition kind field - // of group licenses. + // Reason: The reason for the entitlement. For example, "free" for free + // apps. This property is temporary: it will be replaced by the + // acquisition kind field of group licenses. Reason string `json:"reason,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1103,18 +1103,17 @@ func (s *EntitlementsListResponse) MarshalJSON() ([]byte, error) { // Play. type GroupLicense struct { // AcquisitionKind: How this group license was acquired. "bulkPurchase" - // means that this group license object was created because the - // enterprise purchased licenses for this product; this is "free" - // otherwise (for free products). + // means that this Grouplicenses resource was created because the + // enterprise purchased licenses for this product; otherwise, the value + // is "free" (for free products). AcquisitionKind string `json:"acquisitionKind,omitempty"` // Approval: Whether the product to which this group license relates is - // currently approved by the enterprise, as either "approved" or - // "unapproved". Products are approved when a group license is first - // created, but this approval may be revoked by an enterprise admin via - // Google Play. Unapproved products will not be visible to end users in - // collections and new entitlements to them should not normally be - // created. + // currently approved by the enterprise. Products are approved when a + // group license is first created, but this approval may be revoked by + // an enterprise admin via Google Play. Unapproved products will not be + // visible to end users in collections, and new entitlements to them + // should not normally be created. Approval string `json:"approval,omitempty"` // Kind: Identifies what kind of resource this is. Value: the fixed @@ -1127,13 +1126,13 @@ type GroupLicense struct { NumProvisioned int64 `json:"numProvisioned,omitempty"` // NumPurchased: The number of purchased licenses (possibly in multiple - // purchases). If this field is omitted then there is no limit on the - // number of licenses that can be provisioned (e.g. if the acquisition - // kind is "free"). + // purchases). If this field is omitted, then there is no limit on the + // number of licenses that can be provisioned (for example, if the + // acquisition kind is "free"). NumPurchased int64 `json:"numPurchased,omitempty"` - // ProductId: The ID of the product that the license is for, e.g. - // "app:com.google.android.gm". + // ProductId: The ID of the product that the license is for. For + // example, "app:com.google.android.gm". ProductId string `json:"productId,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1239,23 +1238,23 @@ func (s *GroupLicensesListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Install: The existence of an install resource indicates that an app +// Install: The existence of an Installs resource indicates that an app // is installed on a particular device (or that an install is // pending). // // The API can be used to create an install resource using the update // method. This triggers the actual install of the app on the device. If -// the user does not already have an entitlement for the app then an -// attempt is made to create one. If this fails (e.g. because the app is -// not free and there is no available license) then the creation of the -// install fails. +// the user does not already have an entitlement for the app, then an +// attempt is made to create one. If this fails (for example, because +// the app is not free and there is no available license), then the +// creation of the install fails. // -// The API can also be used to update an installed app. If the update -// method is used on an existing install then the app will be updated to -// the latest available version. +// The API can also be used to update an installed app. If +// the update method is used on an existing install, then the app will +// be updated to the latest available version. // // Note that it is not possible to force the installation of a specific -// version of an app; the version code is read-only. +// version of an app: the version code is read-only. // // If a user installs an app themselves (as permitted by the // enterprise), then again an install resource and possibly an @@ -1278,8 +1277,8 @@ type Install struct { // string "androidenterprise#install". Kind string `json:"kind,omitempty"` - // ProductId: The ID of the product that the install is for, e.g. - // "app:com.google.android.gm". + // ProductId: The ID of the product that the install is for. For + // example, "app:com.google.android.gm". ProductId string `json:"productId,omitempty"` // VersionCode: The version of the installed product. Guaranteed to be @@ -1641,15 +1640,12 @@ type NewDeviceEvent struct { DeviceId string `json:"deviceId,omitempty"` // ManagementType: Identifies the extent to which the device is - // controlled by an Android for Work EMM in various deployment + // controlled by an Android EMM in various deployment // configurations. // // Possible values include: - // - "managedDevice", a device that has the EMM's device policy - // controller (DPC) as the device owner, - // - "managedProfile", a device that has a work profile managed by the - // DPC (DPC is profile owner) in addition to a separate, personal - // profile that is unavailable to the DPC, + // - "managedDevice", a device where the DPC is set as device owner, + // - "managedProfile", a device where the DPC is set as profile owner. ManagementType string `json:"managementType,omitempty"` // UserId: The ID of the user. This field will always be present. @@ -1853,18 +1849,18 @@ func (s *PageInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Permission: A permission represents some extra capability, to be -// granted to an Android app, which requires explicit consent. An +// Permission: A Permissions resource represents some extra capability, +// to be granted to an Android app, which requires explicit consent. An // enterprise admin must consent to these permissions on behalf of their // users before an entitlement for the app can be created. // // The permissions collection is read-only. The information provided for // each permission (localized name and description) is intended to be -// used in the EMM user interface when obtaining consent from the +// used in the MDM user interface when obtaining consent from the // enterprise. type Permission struct { - // Description: A longer description of the permissions giving more - // details of what it affects. + // Description: A longer description of the Permissions resource, giving + // more details of what it affects. Description string `json:"description,omitempty"` // Kind: Identifies what kind of resource this is. Value: the fixed @@ -1918,8 +1914,8 @@ type Product struct { // are not included. AppVersion []*AppVersion `json:"appVersion,omitempty"` - // AuthorName: The name of the author of the product (e.g. the app - // developer). + // AuthorName: The name of the author of the product (for example, the + // app developer). AuthorName string `json:"authorName,omitempty"` // DetailsUrl: A link to the (consumer) Google Play details page for the @@ -2154,13 +2150,13 @@ type ProductSet struct { ProductId []string `json:"productId,omitempty"` // ProductSetBehavior: The interpretation of this product set. "unknown" - // should never be sent and ignored if received. "whitelist" means that - // this product set constitutes a whitelist. "includeAll" means that all - // products are accessible, including products that are approved, not - // approved, and even products where approval has been revoked. If the - // value is "includeAll", the value of the productId field is therefore - // ignored. If a value is not supplied, it is interpreted to be - // "whitelist" for backwards compatibility. + // should never be sent and is ignored if received. "whitelist" means + // that this product set constitutes a whitelist. "includeAll" means + // that all products are accessible, including products that are + // approved, products with revoked approval, and products that have + // never been approved. If the value is "includeAll", the value of the + // productId field is therefore ignored. If a value is not supplied, it + // is interpreted to be "whitelist" for backwards compatibility. ProductSetBehavior string `json:"productSetBehavior,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2967,6 +2963,7 @@ func (c *DevicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3129,6 +3126,7 @@ func (c *DevicesGetStateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3284,6 +3282,7 @@ func (c *DevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3429,6 +3428,7 @@ func (c *DevicesSetStateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.devicestate) if err != nil { @@ -3584,6 +3584,7 @@ func (c *EnterprisesAcknowledgeNotificationSetCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/acknowledgeNotificationSet") @@ -3686,6 +3687,7 @@ func (c *EnterprisesCompleteSignupCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/completeSignup") @@ -3813,6 +3815,7 @@ func (c *EnterprisesCreateWebTokenCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.administratorwebtokenspec) if err != nil { @@ -3907,9 +3910,9 @@ type EnterprisesDeleteCall struct { } // Delete: Deletes the binding between the EMM and enterprise. This is -// now deprecated; use this to unenroll customers that were previously -// enrolled with the 'insert' call, then enroll them again with the -// 'enroll' call. +// now deprecated. Use this method only to unenroll customers that were +// previously enrolled with the insert call, then enroll them again with +// the enroll call. func (r *EnterprisesService) Delete(enterpriseId string) *EnterprisesDeleteCall { c := &EnterprisesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.enterpriseId = enterpriseId @@ -3947,6 +3950,7 @@ func (c *EnterprisesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}") @@ -3972,7 +3976,7 @@ func (c *EnterprisesDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Deletes the binding between the EMM and enterprise. This is now deprecated; use this to unenroll customers that were previously enrolled with the 'insert' call, then enroll them again with the 'enroll' call.", + // "description": "Deletes the binding between the EMM and enterprise. This is now deprecated. Use this method only to unenroll customers that were previously enrolled with the insert call, then enroll them again with the enroll call.", // "httpMethod": "DELETE", // "id": "androidenterprise.enterprises.delete", // "parameterOrder": [ @@ -4043,6 +4047,7 @@ func (c *EnterprisesEnrollCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.enterprise) if err != nil { @@ -4184,6 +4189,7 @@ func (c *EnterprisesGenerateSignupUrlCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/signupUrl") @@ -4311,6 +4317,7 @@ func (c *EnterprisesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4474,6 +4481,7 @@ func (c *EnterprisesGetServiceAccountCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4626,6 +4634,7 @@ func (c *EnterprisesGetStoreLayoutCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4754,6 +4763,7 @@ func (c *EnterprisesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.enterprise) if err != nil { @@ -4896,6 +4906,7 @@ func (c *EnterprisesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4985,8 +4996,8 @@ type EnterprisesPullNotificationSetCall struct { // request. The notification set may be empty if no notification are // pending. // A notification set returned needs to be acknowledged within 20 -// seconds by calling Enterprises​.AcknowledgeNotificationSet, unless -// the notification set is empty. +// seconds by calling Enterprises.AcknowledgeNotificationSet, unless the +// notification set is empty. // Notifications that are not acknowledged within the 20 seconds will // eventually be included again in the response to another // PullNotificationSet request, and those that are never acknowledged @@ -5053,6 +5064,7 @@ func (c *EnterprisesPullNotificationSetCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/pullNotificationSet") @@ -5100,7 +5112,7 @@ func (c *EnterprisesPullNotificationSetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Pulls and returns a notification set for the enterprises associated with the service account authenticated for the request. The notification set may be empty if no notification are pending.\nA notification set returned needs to be acknowledged within 20 seconds by calling Enterprises​.AcknowledgeNotificationSet, unless the notification set is empty.\nNotifications that are not acknowledged within the 20 seconds will eventually be included again in the response to another PullNotificationSet request, and those that are never acknowledged will ultimately be deleted according to the Google Cloud Platform Pub/Sub system policy.\nMultiple requests might be performed concurrently to retrieve notifications, in which case the pending notifications (if any) will be split among each caller, if any are pending.\nIf no notifications are present, an empty notification list is returned. Subsequent requests may return more notifications once they become available.", + // "description": "Pulls and returns a notification set for the enterprises associated with the service account authenticated for the request. The notification set may be empty if no notification are pending.\nA notification set returned needs to be acknowledged within 20 seconds by calling Enterprises.AcknowledgeNotificationSet, unless the notification set is empty.\nNotifications that are not acknowledged within the 20 seconds will eventually be included again in the response to another PullNotificationSet request, and those that are never acknowledged will ultimately be deleted according to the Google Cloud Platform Pub/Sub system policy.\nMultiple requests might be performed concurrently to retrieve notifications, in which case the pending notifications (if any) will be split among each caller, if any are pending.\nIf no notifications are present, an empty notification list is returned. Subsequent requests may return more notifications once they become available.", // "httpMethod": "POST", // "id": "androidenterprise.enterprises.pullNotificationSet", // "parameters": { @@ -5179,6 +5191,7 @@ func (c *EnterprisesSendTestPushNotificationCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/sendTestPushNotification") @@ -5267,7 +5280,7 @@ type EnterprisesSetAccountCall struct { header_ http.Header } -// SetAccount: Set the account that will be used to authenticate to the +// SetAccount: Sets the account that will be used to authenticate to the // API as the enterprise. func (r *EnterprisesService) SetAccount(enterpriseId string, enterpriseaccount *EnterpriseAccount) *EnterprisesSetAccountCall { c := &EnterprisesSetAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5307,6 +5320,7 @@ func (c *EnterprisesSetAccountCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.enterpriseaccount) if err != nil { @@ -5362,7 +5376,7 @@ func (c *EnterprisesSetAccountCall) Do(opts ...googleapi.CallOption) (*Enterpris } return ret, nil // { - // "description": "Set the account that will be used to authenticate to the API as the enterprise.", + // "description": "Sets the account that will be used to authenticate to the API as the enterprise.", // "httpMethod": "PUT", // "id": "androidenterprise.enterprises.setAccount", // "parameterOrder": [ @@ -5447,6 +5461,7 @@ func (c *EnterprisesSetStoreLayoutCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.storelayout) if err != nil { @@ -5578,6 +5593,7 @@ func (c *EnterprisesUnenrollCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/unenroll") @@ -5637,8 +5653,7 @@ type EntitlementsDeleteCall struct { header_ http.Header } -// Delete: Removes an entitlement to an app for a user and uninstalls -// it. +// Delete: Removes an entitlement to an app for a user. func (r *EntitlementsService) Delete(enterpriseId string, userId string, entitlementId string) *EntitlementsDeleteCall { c := &EntitlementsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.enterpriseId = enterpriseId @@ -5678,6 +5693,7 @@ func (c *EntitlementsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}/entitlements/{entitlementId}") @@ -5705,7 +5721,7 @@ func (c *EntitlementsDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Removes an entitlement to an app for a user and uninstalls it.", + // "description": "Removes an entitlement to an app for a user.", // "httpMethod": "DELETE", // "id": "androidenterprise.entitlements.delete", // "parameterOrder": [ @@ -5804,6 +5820,7 @@ func (c *EntitlementsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5910,7 +5927,7 @@ type EntitlementsListCall struct { header_ http.Header } -// List: List of all entitlements for the specified user. Only the ID is +// List: Lists all entitlements for the specified user. Only the ID is // set. func (r *EntitlementsService) List(enterpriseId string, userId string) *EntitlementsListCall { c := &EntitlementsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5960,6 +5977,7 @@ func (c *EntitlementsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6014,7 +6032,7 @@ func (c *EntitlementsListCall) Do(opts ...googleapi.CallOption) (*EntitlementsLi } return ret, nil // { - // "description": "List of all entitlements for the specified user. Only the ID is set.", + // "description": "Lists all entitlements for the specified user. Only the ID is set.", // "httpMethod": "GET", // "id": "androidenterprise.entitlements.list", // "parameterOrder": [ @@ -6111,6 +6129,7 @@ func (c *EntitlementsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entitlement) if err != nil { @@ -6279,6 +6298,7 @@ func (c *EntitlementsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.entitlement) if err != nil { @@ -6445,6 +6465,7 @@ func (c *GrouplicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6591,6 +6612,7 @@ func (c *GrouplicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6731,6 +6753,7 @@ func (c *GrouplicenseusersListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6873,6 +6896,7 @@ func (c *InstallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}/devices/{deviceId}/installs/{installId}") @@ -7009,6 +7033,7 @@ func (c *InstallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7175,6 +7200,7 @@ func (c *InstallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7284,7 +7310,7 @@ type InstallsPatchCall struct { } // Patch: Requests to install the latest version of an app to a device. -// If the app is already installed then it is updated to the latest +// If the app is already installed, then it is updated to the latest // version if necessary. This method supports patch semantics. func (r *InstallsService) Patch(enterpriseId string, userId string, deviceId string, installId string, install *Install) *InstallsPatchCall { c := &InstallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7327,6 +7353,7 @@ func (c *InstallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.install) if err != nil { @@ -7385,7 +7412,7 @@ func (c *InstallsPatchCall) Do(opts ...googleapi.CallOption) (*Install, error) { } return ret, nil // { - // "description": "Requests to install the latest version of an app to a device. If the app is already installed then it is updated to the latest version if necessary. This method supports patch semantics.", + // "description": "Requests to install the latest version of an app to a device. If the app is already installed, then it is updated to the latest version if necessary. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "androidenterprise.installs.patch", // "parameterOrder": [ @@ -7449,7 +7476,7 @@ type InstallsUpdateCall struct { } // Update: Requests to install the latest version of an app to a device. -// If the app is already installed then it is updated to the latest +// If the app is already installed, then it is updated to the latest // version if necessary. func (r *InstallsService) Update(enterpriseId string, userId string, deviceId string, installId string, install *Install) *InstallsUpdateCall { c := &InstallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7492,6 +7519,7 @@ func (c *InstallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.install) if err != nil { @@ -7550,7 +7578,7 @@ func (c *InstallsUpdateCall) Do(opts ...googleapi.CallOption) (*Install, error) } return ret, nil // { - // "description": "Requests to install the latest version of an app to a device. If the app is already installed then it is updated to the latest version if necessary.", + // "description": "Requests to install the latest version of an app to a device. If the app is already installed, then it is updated to the latest version if necessary.", // "httpMethod": "PUT", // "id": "androidenterprise.installs.update", // "parameterOrder": [ @@ -7654,6 +7682,7 @@ func (c *ManagedconfigurationsfordeviceDeleteCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}/devices/{deviceId}/managedConfigurationsForDevice/{managedConfigurationForDeviceId}") @@ -7790,6 +7819,7 @@ func (c *ManagedconfigurationsfordeviceGetCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7956,6 +7986,7 @@ func (c *ManagedconfigurationsfordeviceListCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8109,6 +8140,7 @@ func (c *ManagedconfigurationsfordevicePatchCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedconfiguration) if err != nil { @@ -8273,6 +8305,7 @@ func (c *ManagedconfigurationsfordeviceUpdateCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedconfiguration) if err != nil { @@ -8433,6 +8466,7 @@ func (c *ManagedconfigurationsforuserDeleteCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}/managedConfigurationsForUser/{managedConfigurationForUserId}") @@ -8560,6 +8594,7 @@ func (c *ManagedconfigurationsforuserGetCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8716,6 +8751,7 @@ func (c *ManagedconfigurationsforuserListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8859,6 +8895,7 @@ func (c *ManagedconfigurationsforuserPatchCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedconfiguration) if err != nil { @@ -9013,6 +9050,7 @@ func (c *ManagedconfigurationsforuserUpdateCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedconfiguration) if err != nil { @@ -9179,6 +9217,7 @@ func (c *PermissionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9320,6 +9359,7 @@ func (c *ProductsApproveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.productsapproverequest) if err != nil { @@ -9451,6 +9491,7 @@ func (c *ProductsGenerateApprovalUrlCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/products/{productId}/generateApprovalUrl") @@ -9609,6 +9650,7 @@ func (c *ProductsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9774,6 +9816,7 @@ func (c *ProductsGetAppRestrictionsSchemaCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9927,6 +9970,7 @@ func (c *ProductsGetPermissionsCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10068,7 +10112,7 @@ func (c *ProductsListCall) Query(query string) *ProductsListCall { } // Token sets the optional parameter "token": A pagination token is -// contained in a request’s response when there are more products. The +// contained in a request''s response when there are more products. The // token can be used in a subsequent request to obtain more products, // and so forth. This parameter cannot be used in the initial request. func (c *ProductsListCall) Token(token string) *ProductsListCall { @@ -10117,6 +10161,7 @@ func (c *ProductsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10205,7 +10250,7 @@ func (c *ProductsListCall) Do(opts ...googleapi.CallOption) (*ProductsListRespon // "type": "string" // }, // "token": { - // "description": "A pagination token is contained in a request’s response when there are more products. The token can be used in a subsequent request to obtain more products, and so forth. This parameter cannot be used in the initial request.", + // "description": "A pagination token is contained in a request''s response when there are more products. The token can be used in a subsequent request to obtain more products, and so forth. This parameter cannot be used in the initial request.", // "location": "query", // "type": "string" // } @@ -10272,6 +10317,7 @@ func (c *ProductsUnapproveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/products/{productId}/unapprove") @@ -10381,6 +10427,7 @@ func (c *ServiceaccountkeysDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/serviceAccountKeys/{keyId}") @@ -10493,6 +10540,7 @@ func (c *ServiceaccountkeysInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.serviceaccountkey) if err != nil { @@ -10639,6 +10687,7 @@ func (c *ServiceaccountkeysListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10769,6 +10818,7 @@ func (c *StorelayoutclustersDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/storeLayout/pages/{pageId}/clusters/{clusterId}") @@ -10895,6 +10945,7 @@ func (c *StorelayoutclustersGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11041,6 +11092,7 @@ func (c *StorelayoutclustersInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.storecluster) if err != nil { @@ -11193,6 +11245,7 @@ func (c *StorelayoutclustersListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11333,6 +11386,7 @@ func (c *StorelayoutclustersPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.storecluster) if err != nil { @@ -11486,6 +11540,7 @@ func (c *StorelayoutclustersUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.storecluster) if err != nil { @@ -11635,6 +11690,7 @@ func (c *StorelayoutpagesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/storeLayout/pages/{pageId}") @@ -11751,6 +11807,7 @@ func (c *StorelayoutpagesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11887,6 +11944,7 @@ func (c *StorelayoutpagesInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.storepage) if err != nil { @@ -12029,6 +12087,7 @@ func (c *StorelayoutpagesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12160,6 +12219,7 @@ func (c *StorelayoutpagesPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.storepage) if err != nil { @@ -12303,6 +12363,7 @@ func (c *StorelayoutpagesUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.storepage) if err != nil { @@ -12444,6 +12505,7 @@ func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}") @@ -12554,6 +12616,7 @@ func (c *UsersGenerateAuthenticationTokenCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}/authenticationToken") @@ -12691,6 +12754,7 @@ func (c *UsersGenerateTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}/token") @@ -12835,6 +12899,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12983,6 +13048,7 @@ func (c *UsersGetAvailableProductSetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13125,6 +13191,7 @@ func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -13271,6 +13338,7 @@ func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13414,6 +13482,7 @@ func (c *UsersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -13556,6 +13625,7 @@ func (c *UsersRevokeTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "enterprises/{enterpriseId}/users/{userId}/token") @@ -13623,8 +13693,10 @@ type UsersSetAvailableProductSetCall struct { header_ http.Header } -// SetAvailableProductSet: Modifies the set of products a user is -// entitled to access. +// SetAvailableProductSet: Modifies the set of products that a user is +// entitled to access (referred to as whitelisted products). Only +// products that are approved or products that were previously approved +// (products with revoked approval) can be whitelisted. func (r *UsersService) SetAvailableProductSet(enterpriseId string, userId string, productset *ProductSet) *UsersSetAvailableProductSetCall { c := &UsersSetAvailableProductSetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.enterpriseId = enterpriseId @@ -13664,6 +13736,7 @@ func (c *UsersSetAvailableProductSetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.productset) if err != nil { @@ -13720,7 +13793,7 @@ func (c *UsersSetAvailableProductSetCall) Do(opts ...googleapi.CallOption) (*Pro } return ret, nil // { - // "description": "Modifies the set of products a user is entitled to access.", + // "description": "Modifies the set of products that a user is entitled to access (referred to as whitelisted products). Only products that are approved or products that were previously approved (products with revoked approval) can be whitelisted.", // "httpMethod": "PUT", // "id": "androidenterprise.users.setAvailableProductSet", // "parameterOrder": [ @@ -13812,6 +13885,7 @@ func (c *UsersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { diff --git a/vendor/google.golang.org/api/androidpublisher/v1.1/androidpublisher-gen.go b/vendor/google.golang.org/api/androidpublisher/v1.1/androidpublisher-gen.go index c4d799f6b..60c4317c2 100644 --- a/vendor/google.golang.org/api/androidpublisher/v1.1/androidpublisher-gen.go +++ b/vendor/google.golang.org/api/androidpublisher/v1.1/androidpublisher-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Inapppurchases *InapppurchasesService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewInapppurchasesService(s *Service) *InapppurchasesService { rs := &InapppurchasesService{s: s} return rs @@ -260,6 +265,7 @@ func (c *InapppurchasesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -407,6 +413,7 @@ func (c *PurchasesCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/subscriptions/{subscriptionId}/purchases/{token}/cancel") @@ -534,6 +541,7 @@ func (c *PurchasesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/androidpublisher/v1/androidpublisher-gen.go b/vendor/google.golang.org/api/androidpublisher/v1/androidpublisher-gen.go index dfa9987a8..62566d30f 100644 --- a/vendor/google.golang.org/api/androidpublisher/v1/androidpublisher-gen.go +++ b/vendor/google.golang.org/api/androidpublisher/v1/androidpublisher-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Purchases *PurchasesService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewPurchasesService(s *Service) *PurchasesService { rs := &PurchasesService{s: s} return rs @@ -183,6 +188,7 @@ func (c *PurchasesCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/subscriptions/{subscriptionId}/purchases/{token}/cancel") @@ -310,6 +316,7 @@ func (c *PurchasesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go b/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go index 19217d416..ae0640b54 100644 --- a/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go +++ b/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Edits *EditsService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewEditsService(s *Service) *EditsService { rs := &EditsService{s: s} rs.Apklistings = NewEditsApklistingsService(s) @@ -2514,6 +2519,7 @@ func (c *EditsCommitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}:commit") @@ -2649,6 +2655,7 @@ func (c *EditsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}") @@ -2767,6 +2774,7 @@ func (c *EditsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2904,6 +2912,7 @@ func (c *EditsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.appedit) if err != nil { @@ -3038,6 +3047,7 @@ func (c *EditsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}:validate") @@ -3176,6 +3186,7 @@ func (c *EditsApklistingsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/apks/{apkVersionCode}/listings/{language}") @@ -3301,6 +3312,7 @@ func (c *EditsApklistingsDeleteallCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/apks/{apkVersionCode}/listings") @@ -3431,6 +3443,7 @@ func (c *EditsApklistingsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3598,6 +3611,7 @@ func (c *EditsApklistingsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3751,6 +3765,7 @@ func (c *EditsApklistingsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.apklisting) if err != nil { @@ -3916,6 +3931,7 @@ func (c *EditsApklistingsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.apklisting) if err != nil { @@ -4080,6 +4096,7 @@ func (c *EditsApksAddexternallyhostedCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.apksaddexternallyhostedrequest) if err != nil { @@ -4232,6 +4249,7 @@ func (c *EditsApksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4420,6 +4438,7 @@ func (c *EditsApksUploadCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/apks") @@ -4676,6 +4695,7 @@ func (c *EditsDeobfuscationfilesUploadCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/apks/{apkVersionCode}/deobfuscationFiles/{deobfuscationFileType}") @@ -4908,6 +4928,7 @@ func (c *EditsDetailsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5047,6 +5068,7 @@ func (c *EditsDetailsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.appdetails) if err != nil { @@ -5190,6 +5212,7 @@ func (c *EditsDetailsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.appdetails) if err != nil { @@ -5346,6 +5369,7 @@ func (c *EditsExpansionfilesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5514,6 +5538,7 @@ func (c *EditsExpansionfilesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.expansionfile) if err != nil { @@ -5687,6 +5712,7 @@ func (c *EditsExpansionfilesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.expansionfile) if err != nil { @@ -5910,6 +5936,7 @@ func (c *EditsExpansionfilesUploadCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/apks/{apkVersionCode}/expansionFiles/{expansionFileType}") @@ -6138,6 +6165,7 @@ func (c *EditsImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/listings/{language}/{imageType}/{imageId}") @@ -6293,6 +6321,7 @@ func (c *EditsImagesDeleteallCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/listings/{language}/{imageType}") @@ -6478,6 +6507,7 @@ func (c *EditsImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6709,6 +6739,7 @@ func (c *EditsImagesUploadCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/listings/{language}/{imageType}") @@ -6946,6 +6977,7 @@ func (c *EditsListingsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/listings/{language}") @@ -7059,6 +7091,7 @@ func (c *EditsListingsDeleteallCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/edits/{editId}/listings") @@ -7177,6 +7210,7 @@ func (c *EditsListingsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7333,6 +7367,7 @@ func (c *EditsListingsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7474,6 +7509,7 @@ func (c *EditsListingsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.listing) if err != nil { @@ -7627,6 +7663,7 @@ func (c *EditsListingsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.listing) if err != nil { @@ -7789,6 +7826,7 @@ func (c *EditsTestersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7947,6 +7985,7 @@ func (c *EditsTestersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testers) if err != nil { @@ -8110,6 +8149,7 @@ func (c *EditsTestersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testers) if err != nil { @@ -8283,6 +8323,7 @@ func (c *EditsTracksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8450,6 +8491,7 @@ func (c *EditsTracksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8593,6 +8635,7 @@ func (c *EditsTracksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.track2) if err != nil { @@ -8760,6 +8803,7 @@ func (c *EditsTracksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.track2) if err != nil { @@ -8957,6 +9001,7 @@ func (c *EntitlementsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9099,6 +9144,7 @@ func (c *InappproductsBatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.inappproductsbatchrequest) if err != nil { @@ -9217,6 +9263,7 @@ func (c *InappproductsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/inappproducts/{sku}") @@ -9333,6 +9380,7 @@ func (c *InappproductsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9478,6 +9526,7 @@ func (c *InappproductsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.inappproduct) if err != nil { @@ -9644,6 +9693,7 @@ func (c *InappproductsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9799,6 +9849,7 @@ func (c *InappproductsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.inappproduct) if err != nil { @@ -9957,6 +10008,7 @@ func (c *InappproductsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.inappproduct) if err != nil { @@ -10116,6 +10168,7 @@ func (c *PurchasesProductsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10263,6 +10316,7 @@ func (c *PurchasesSubscriptionsCancelCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:cancel") @@ -10381,6 +10435,7 @@ func (c *PurchasesSubscriptionsDeferCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscriptionpurchasesdeferrequest) if err != nil { @@ -10545,6 +10600,7 @@ func (c *PurchasesSubscriptionsGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10693,6 +10749,7 @@ func (c *PurchasesSubscriptionsRefundCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:refund") @@ -10810,6 +10867,7 @@ func (c *PurchasesSubscriptionsRevokeCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:revoke") @@ -10971,6 +11029,7 @@ func (c *PurchasesVoidedpurchasesListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11143,6 +11202,7 @@ func (c *ReviewsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11316,6 +11376,7 @@ func (c *ReviewsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11464,6 +11525,7 @@ func (c *ReviewsReplyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reviewsreplyrequest) if err != nil { diff --git a/vendor/google.golang.org/api/api-list.json b/vendor/google.golang.org/api/api-list.json index 30384f911..61564117c 100644 --- a/vendor/google.golang.org/api/api-list.json +++ b/vendor/google.golang.org/api/api-list.json @@ -338,12 +338,42 @@ "documentationLink": "https://developers.google.com/android-publisher", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "appengine:v1alpha", + "name": "appengine", + "version": "v1alpha", + "title": "Google App Engine Admin API", + "description": "The App Engine Admin API enables developers to provision and manage their App Engine applications.", + "discoveryRestUrl": "https://appengine.googleapis.com/$discovery/rest?version=v1alpha", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "appengine:v1beta", + "name": "appengine", + "version": "v1beta", + "title": "Google App Engine Admin API", + "description": "The App Engine Admin API enables developers to provision and manage their App Engine applications.", + "discoveryRestUrl": "https://appengine.googleapis.com/$discovery/rest?version=v1beta", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "preferred": false + }, { "kind": "discovery#directoryItem", "id": "appsactivity:v1", "name": "appsactivity", "version": "v1", - "title": "Google Apps Activity API", + "title": "G Suite Activity API", "description": "Provides a historical view of activity.", "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/appsactivity/v1/rest", "discoveryLink": "./apis/appsactivity/v1/rest", @@ -578,6 +608,21 @@ "documentationLink": "https://cloud.google.com/resource-manager", "preferred": false }, + { + "kind": "discovery#directoryItem", + "id": "cloudtrace:v1", + "name": "cloudtrace", + "version": "v1", + "title": "Stackdriver Trace API", + "description": "Send and retrieve trace data from Stackdriver Trace. Data is generated and available by default for all App Engine applications. Data from other applications can be written to Stackdriver Trace for display, reporting, and analysis.", + "discoveryRestUrl": "https://cloudtrace.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/trace", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "clouduseraccounts:alpha", @@ -753,6 +798,66 @@ "documentationLink": "https://developers.google.com/custom-search/v1/using_rest", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "dataflow:v1b3", + "name": "dataflow", + "version": "v1b3", + "title": "Google Dataflow API", + "description": "Manages Google Cloud Dataflow projects on Google Cloud Platform.", + "discoveryRestUrl": "https://dataflow.googleapis.com/$discovery/rest?version=v1b3", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dataflow", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dataproc:v1alpha1", + "name": "dataproc", + "version": "v1alpha1", + "title": "Google Cloud Dataproc API", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "discoveryRestUrl": "https://dataproc.googleapis.com/$discovery/rest?version=v1alpha1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dataproc/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "dataproc:v1", + "name": "dataproc", + "version": "v1", + "title": "Google Cloud Dataproc API", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "discoveryRestUrl": "https://dataproc.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dataproc/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dataproc:v1beta1", + "name": "dataproc", + "version": "v1beta1", + "title": "Google Cloud Dataproc API", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "discoveryRestUrl": "https://dataproc.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dataproc/", + "preferred": false + }, { "kind": "discovery#directoryItem", "id": "datastore:v1", @@ -1289,21 +1394,6 @@ "documentationLink": "https://cloud.google.com/billing/", "preferred": true }, - { - "kind": "discovery#directoryItem", - "id": "cloudtrace:v1", - "name": "cloudtrace", - "version": "v1", - "title": "Google Cloud Trace API", - "description": "Send and retrieve trace data from Google Cloud Trace. Data is generated and available by default for all App Engine applications. Data from other applications can be written to Cloud Trace for display, reporting, and analysis.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/cloudtrace/v1/rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/tools/cloud-trace", - "preferred": true - }, { "kind": "discovery#directoryItem", "id": "container:v1", @@ -1319,81 +1409,6 @@ "documentationLink": "https://cloud.google.com/container-engine/", "preferred": true }, - { - "kind": "discovery#directoryItem", - "id": "dataflow:v1b3", - "name": "dataflow", - "version": "v1b3", - "title": "Google Dataflow API", - "description": "Manages Google Cloud Dataflow projects on Google Cloud Platform.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dataflow/v1b3/rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/dataflow", - "preferred": true - }, - { - "kind": "discovery#directoryItem", - "id": "dataproc:v1alpha1", - "name": "dataproc", - "version": "v1alpha1", - "title": "Google Cloud Dataproc API", - "description": "An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dataproc/v1alpha1/rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/dataproc/", - "preferred": false - }, - { - "kind": "discovery#directoryItem", - "id": "dataproc:v1", - "name": "dataproc", - "version": "v1", - "title": "Google Cloud Dataproc API", - "description": "An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dataproc/v1/rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/dataproc/", - "preferred": true - }, - { - "kind": "discovery#directoryItem", - "id": "dataproc:v1beta1", - "name": "dataproc", - "version": "v1beta1", - "title": "Google Cloud Dataproc API", - "description": "An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dataproc/v1beta1/rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/dataproc/", - "preferred": false - }, - { - "kind": "discovery#directoryItem", - "id": "iam:v1", - "name": "iam", - "version": "v1", - "title": "Google Identity and Access Management (IAM) API", - "description": "Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/iam/v1/rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/iam/", - "preferred": true - }, { "kind": "discovery#directoryItem", "id": "partners:v2", @@ -1409,21 +1424,6 @@ "documentationLink": "https://developers.google.com/partners/", "preferred": true }, - { - "kind": "discovery#directoryItem", - "id": "people:v1", - "name": "people", - "version": "v1", - "title": "Google People API", - "description": "The Google People API service gives access to information about profiles and contacts.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/people/v1/rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://developers.google.com/people/", - "preferred": true - }, { "kind": "discovery#directoryItem", "id": "playmoviespartner:v1", @@ -1471,32 +1471,32 @@ }, { "kind": "discovery#directoryItem", - "id": "script:v1", - "name": "script", + "id": "storagetransfer:v1", + "name": "storagetransfer", "version": "v1", - "title": "Google Apps Script Execution API", - "description": "Executes Google Apps Script projects.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/script/v1/rest", + "title": "Google Storage Transfer API", + "description": "Transfers data from external data sources to a Google Cloud Storage bucket or between Google Cloud Storage buckets.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/storagetransfer/v1/rest", "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", "x32": "http://www.google.com/images/icons/product/search-32.gif" }, - "documentationLink": "https://developers.google.com/apps-script/execution/rest/v1/scripts/run", + "documentationLink": "https://cloud.google.com/storage/transfer", "preferred": true }, { "kind": "discovery#directoryItem", - "id": "storagetransfer:v1", - "name": "storagetransfer", + "id": "iam:v1", + "name": "iam", "version": "v1", - "title": "Google Storage Transfer API", - "description": "Transfers data from external data sources to a Google Cloud Storage bucket or between Google Cloud Storage buckets.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/storagetransfer/v1/rest", + "title": "Google Identity and Access Management (IAM) API", + "description": "Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.", + "discoveryRestUrl": "https://iam.googleapis.com/$discovery/rest?version=v1", "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" }, - "documentationLink": "https://cloud.google.com/storage/transfer", + "documentationLink": "https://cloud.google.com/iam/", "preferred": true }, { @@ -1645,7 +1645,7 @@ "id": "ml:v1beta1", "name": "ml", "version": "v1beta1", - "title": "Google Cloud Machine Learning", + "title": "Google Cloud Machine Learning Engine", "description": "An API to enable creating and using machine learning models.", "discoveryRestUrl": "https://ml.googleapis.com/$discovery/rest?version=v1beta1", "icons": { @@ -1734,6 +1734,21 @@ "documentationLink": "https://developers.google.com/speed/docs/insights/v2/getting-started", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "people:v1", + "name": "people", + "version": "v1", + "title": "Google People API", + "description": "Provides access to information about profiles and contacts.", + "discoveryRestUrl": "https://people.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/people/", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "plus:v1", @@ -2051,6 +2066,36 @@ "documentationLink": "https://cloud.google.com/deployment-manager/runtime-configurator/", "preferred": false }, + { + "kind": "discovery#directoryItem", + "id": "script:v1", + "name": "script", + "version": "v1", + "title": "Google Apps Script Execution API", + "description": "Executes Google Apps Script projects.", + "discoveryRestUrl": "https://script.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/apps-script/execution/rest/v1/scripts/run", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "searchconsole:v1", + "name": "searchconsole", + "version": "v1", + "title": "Google Search Console URL Testing Tools API", + "description": "Provides tools for running validation tests against single URLs", + "discoveryRestUrl": "https://searchconsole.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/webmaster-tools/search-console-api/", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "servicecontrol:v1", @@ -2081,6 +2126,21 @@ "documentationLink": "https://cloud.google.com/service-management/", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "serviceuser:v1", + "name": "serviceuser", + "version": "v1", + "title": "Google Service User API", + "description": "Enables services that service consumers want to use on Google Cloud Platform, lists the available or enabled services, or disables services that service consumers no longer use.", + "discoveryRestUrl": "https://serviceuser.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/service-management/", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "sheets:v4", @@ -2127,6 +2187,36 @@ "documentationLink": "https://developers.google.com/slides/", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "sourcerepo:v1", + "name": "sourcerepo", + "version": "v1", + "title": "Cloud Source Repositories API", + "description": "Access source code repositories hosted by Google.", + "discoveryRestUrl": "https://sourcerepo.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/eap/cloud-repositories/cloud-sourcerepo-api", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "spanner:v1", + "name": "spanner", + "version": "v1", + "title": "Cloud Spanner API", + "description": "Cloud Spanner is a managed, mission-critical, globally consistent and scalable relational database service.", + "discoveryRestUrl": "https://spanner.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/spanner/", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "spectrum:v1explorer", @@ -2326,6 +2416,22 @@ "documentationLink": "https://developers.google.com/google-apps/tasks/firstapp", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "toolresults:v1beta3firstparty", + "name": "toolresults", + "version": "v1beta3firstparty", + "title": "Cloud Tool Results firstparty API", + "description": "Reads and publishes results from Cloud Test Lab.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/toolresults/v1beta3firstparty/rest", + "discoveryLink": "./apis/toolresults/v1beta3firstparty/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/cloud-test-lab/", + "preferred": false + }, { "kind": "discovery#directoryItem", "id": "toolresults:v1beta3", @@ -2342,6 +2448,21 @@ "documentationLink": "https://developers.google.com/cloud-test-lab/", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "tracing:v1", + "name": "tracing", + "version": "v1", + "title": "Google Tracing API", + "description": "Send and retrieve trace data from Google Stackdriver Trace.", + "discoveryRestUrl": "https://tracing.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/trace", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "translate:v2", diff --git a/vendor/google.golang.org/api/appengine/v1/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1/appengine-gen.go index 8c16dd252..6472c5dfe 100644 --- a/vendor/google.golang.org/api/appengine/v1/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1/appengine-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*APIService, error) { } type APIService struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Apps *AppsService } @@ -81,6 +82,10 @@ func (s *APIService) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAppsService(s *APIService) *AppsService { rs := &AppsService{s: s} rs.Locations = NewAppsLocationsService(s) @@ -2374,6 +2379,7 @@ func (c *AppsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) if err != nil { @@ -2502,6 +2508,7 @@ func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2644,6 +2651,7 @@ func (c *AppsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) if err != nil { @@ -2786,6 +2794,7 @@ func (c *AppsRepairCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.repairapplicationrequest) if err != nil { @@ -2930,6 +2939,7 @@ func (c *AppsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3099,6 +3109,7 @@ func (c *AppsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3279,6 +3290,7 @@ func (c *AppsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3451,6 +3463,7 @@ func (c *AppsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3618,6 +3631,7 @@ func (c *AppsServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/apps/{appsId}/services/{servicesId}") @@ -3762,6 +3776,7 @@ func (c *AppsServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3923,6 +3938,7 @@ func (c *AppsServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4114,6 +4130,7 @@ func (c *AppsServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.service) if err != nil { @@ -4267,6 +4284,7 @@ func (c *AppsServicesVersionsCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -4410,6 +4428,7 @@ func (c *AppsServicesVersionsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/apps/{appsId}/services/{servicesId}/versions/{versionsId}") @@ -4577,6 +4596,7 @@ func (c *AppsServicesVersionsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4768,6 +4788,7 @@ func (c *AppsServicesVersionsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4976,6 +4997,7 @@ func (c *AppsServicesVersionsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -5142,6 +5164,7 @@ func (c *AppsServicesVersionsInstancesDebugCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.debuginstancerequest) if err != nil { @@ -5303,6 +5326,7 @@ func (c *AppsServicesVersionsInstancesDeleteCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}") @@ -5467,6 +5491,7 @@ func (c *AppsServicesVersionsInstancesGetCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5648,6 +5673,7 @@ func (c *AppsServicesVersionsInstancesListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/appengine/v1alpha/appengine-api.json b/vendor/google.golang.org/api/appengine/v1alpha/appengine-api.json new file mode 100644 index 000000000..d6c99a338 --- /dev/null +++ b/vendor/google.golang.org/api/appengine/v1alpha/appengine-api.json @@ -0,0 +1,2522 @@ +{ + "resources": { + "apps": { + "methods": { + "repair": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the application to repair. Example: apps/myapp", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha/apps/{appsId}:repair", + "path": "v1alpha/apps/{appsId}:repair", + "id": "appengine.apps.repair", + "description": "Recreates the required App Engine features for the specified App Engine application, for example a Cloud Storage bucket or App Engine service account. Use this method if you receive an error message about a missing feature, for example, Error retrieving the App Engine service account.", + "request": { + "$ref": "RepairApplicationRequest" + } + }, + "get": { + "description": "Gets information about an application.", + "parameterOrder": [ + "appsId" + ], + "response": { + "$ref": "Application" + }, + "httpMethod": "GET", + "parameters": { + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the Application resource to get. Example: apps/myapp.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1alpha/apps/{appsId}", + "path": "v1alpha/apps/{appsId}", + "id": "appengine.apps.get" + }, + "patch": { + "request": { + "$ref": "Application" + }, + "description": "Updates the specified Application resource. You can update the following fields:\nauth_domain - Google authentication domain for controlling user access to the application.\ndefault_cookie_expiration - Cookie expiration policy for the application.", + "httpMethod": "PATCH", + "parameterOrder": [ + "appsId" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "updateMask": { + "description": "Standard field mask for the set of fields to be updated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the Application resource to update. Example: apps/myapp.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha/apps/{appsId}", + "id": "appengine.apps.patch", + "path": "v1alpha/apps/{appsId}" + }, + "create": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha/apps", + "path": "v1alpha/apps", + "id": "appengine.apps.create", + "request": { + "$ref": "Application" + }, + "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields:\nid - The ID of the target Cloud Platform project.\nlocation - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/python/console/)." + } + }, + "resources": { + "operations": { + "methods": { + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.", + "httpMethod": "GET", + "parameterOrder": [ + "appsId" + ], + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "filter": { + "location": "query", + "description": "The standard list filter.", + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. The name of the operation collection.", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "The standard list page token.", + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "type": "integer", + "location": "query" + } + }, + "flatPath": "v1alpha/apps/{appsId}/operations", + "id": "appengine.apps.operations.list", + "path": "v1alpha/apps/{appsId}/operations" + }, + "get": { + "flatPath": "v1alpha/apps/{appsId}/operations/{operationsId}", + "path": "v1alpha/apps/{appsId}/operations/{operationsId}", + "id": "appengine.apps.operations.get", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "operationsId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "appsId": { + "description": "Part of `name`. The name of the operation resource.", + "required": true, + "type": "string", + "location": "path" + }, + "operationsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + } + } + } + } + }, + "locations": { + "methods": { + "list": { + "description": "Lists information about the supported locations for this service.", + "response": { + "$ref": "ListLocationsResponse" + }, + "parameterOrder": [ + "appsId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "type": "string", + "location": "query" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. The resource that owns the locations collection, if applicable.", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "The standard list page token.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + } + }, + "flatPath": "v1alpha/apps/{appsId}/locations", + "path": "v1alpha/apps/{appsId}/locations", + "id": "appengine.apps.locations.list" + }, + "get": { + "response": { + "$ref": "Location" + }, + "parameterOrder": [ + "appsId", + "locationsId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "locationsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Resource name for the location.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha/apps/{appsId}/locations/{locationsId}", + "path": "v1alpha/apps/{appsId}/locations/{locationsId}", + "id": "appengine.apps.locations.get", + "description": "Get information about a location." + } + } + }, + "services": { + "methods": { + "list": { + "description": "Lists all the services in the application.", + "response": { + "$ref": "ListServicesResponse" + }, + "parameterOrder": [ + "appsId" + ], + "httpMethod": "GET", + "parameters": { + "appsId": { + "description": "Part of `parent`. Name of the parent Application resource. Example: apps/myapp.", + "required": true, + "type": "string", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "Continuation token for fetching the next page of results.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Maximum results to return per page.", + "format": "int32", + "type": "integer" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1alpha/apps/{appsId}/services", + "path": "v1alpha/apps/{appsId}/services", + "id": "appengine.apps.services.list" + }, + "get": { + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}", + "id": "appengine.apps.services.get", + "path": "v1alpha/apps/{appsId}/services/{servicesId}", + "description": "Gets the current configuration of the specified service.", + "httpMethod": "GET", + "parameterOrder": [ + "appsId", + "servicesId" + ], + "response": { + "$ref": "Service" + }, + "parameters": { + "servicesId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "patch": { + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}", + "path": "v1alpha/apps/{appsId}/services/{servicesId}", + "id": "appengine.apps.services.patch", + "description": "Updates the configuration of the specified service.", + "request": { + "$ref": "Service" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "updateMask": { + "description": "Standard field mask for the set of fields to be updated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + }, + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default.", + "required": true, + "type": "string" + }, + "migrateTraffic": { + "description": "Set to true to gradually shift traffic to one or more versions that you specify. By default, traffic is shifted immediately. For gradual traffic migration, the target versions must be located within instances that are configured for both warmup requests (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#inboundservicetype) and automatic scaling (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#automaticscaling). You must specify the shardBy (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services#shardby) field in the Service resource. Gradual traffic migration is not supported in the App Engine flexible environment. For examples, see Migrating and Splitting Traffic (https://cloud.google.com/appengine/docs/admin-api/migrating-splitting-traffic).", + "type": "boolean", + "location": "query" + } + } + }, + "delete": { + "description": "Deletes the specified service and all enclosed versions.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}", + "path": "v1alpha/apps/{appsId}/services/{servicesId}", + "id": "appengine.apps.services.delete" + } + }, + "resources": { + "versions": { + "methods": { + "delete": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "httpMethod": "DELETE", + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + "required": true, + "type": "string" + }, + "versionsId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "id": "appengine.apps.services.versions.delete", + "description": "Deletes an existing Version resource." + }, + "list": { + "description": "Lists the versions of a service.", + "response": { + "$ref": "ListVersionsResponse" + }, + "parameterOrder": [ + "appsId", + "servicesId" + ], + "httpMethod": "GET", + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `parent`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "description": "Part of `parent`. Name of the parent Service resource. Example: apps/myapp/services/default.", + "required": true, + "type": "string", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "Continuation token for fetching the next page of results.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Maximum results to return per page.", + "format": "int32", + "type": "integer" + }, + "view": { + "location": "query", + "enum": [ + "BASIC", + "FULL" + ], + "description": "Controls the set of fields returned in the List response.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + "id": "appengine.apps.services.versions.list" + }, + "get": { + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "id": "appengine.apps.services.versions.get", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "description": "Gets the specified Version resource. By default, only a BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get the full resource.", + "httpMethod": "GET", + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "response": { + "$ref": "Version" + }, + "parameters": { + "versionsId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "view": { + "location": "query", + "enum": [ + "BASIC", + "FULL" + ], + "description": "Controls the set of fields returned in the Get response.", + "type": "string" + }, + "servicesId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "appsId": { + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "patch": { + "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.serving_status): For Version resources that use basic scaling, manual scaling, or run in the App Engine flexible environment.\ninstance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.instance_class): For Version resources that run in the App Engine standard environment.\nautomatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.\nautomatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.", + "request": { + "$ref": "Version" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "versionsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "updateMask": { + "location": "query", + "description": "Standard field mask for the set of fields to be updated.", + "format": "google-fieldmask", + "type": "string" + }, + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default/versions/1.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "id": "appengine.apps.services.versions.patch" + }, + "create": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `parent`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `parent`. Name of the parent resource to create this version under. Example: apps/myapp/services/default.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + "id": "appengine.apps.services.versions.create", + "description": "Deploys code and resource files to a new version.", + "request": { + "$ref": "Version" + } + } + }, + "resources": { + "instances": { + "methods": { + "delete": { + "httpMethod": "DELETE", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId", + "instancesId" + ], + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + "required": true, + "type": "string" + }, + "instancesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "versionsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + "id": "appengine.apps.services.versions.instances.delete", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + "description": "Stops a running instance." + }, + "list": { + "description": "Lists the instances of a version.", + "httpMethod": "GET", + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "response": { + "$ref": "ListInstancesResponse" + }, + "parameters": { + "pageSize": { + "location": "query", + "description": "Maximum results to return per page.", + "format": "int32", + "type": "integer" + }, + "versionsId": { + "location": "path", + "description": "Part of `parent`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "servicesId": { + "location": "path", + "description": "Part of `parent`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "description": "Part of `parent`. Name of the parent Version resource. Example: apps/myapp/services/default/versions/v1.", + "required": true, + "type": "string", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "Continuation token for fetching the next page of results.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances", + "id": "appengine.apps.services.versions.instances.list", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances" + }, + "get": { + "description": "Gets instance information.", + "httpMethod": "GET", + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId", + "instancesId" + ], + "response": { + "$ref": "Instance" + }, + "parameters": { + "servicesId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + "required": true, + "type": "string" + }, + "instancesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "versionsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + "id": "appengine.apps.services.versions.instances.get", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}" + }, + "debug": { + "description": "Enables debugging on a VM instance. This allows you to use the SSH command to connect to the virtual machine where the instance lives. While in \"debug mode\", the instance continues to serve live traffic. You should delete the instance when you are done debugging and then allow the system to take over and determine if another instance should be started.Only applicable for instances in App Engine flexible environment.", + "request": { + "$ref": "DebugInstanceRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId", + "instancesId" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "versionsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + "required": true, + "type": "string" + }, + "instancesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug", + "id": "appengine.apps.services.versions.instances.debug", + "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug" + } + } + } + } + } + } + } + } + } + }, + "parameters": { + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + } + }, + "schemas": { + "ListInstancesResponse": { + "description": "Response message for Instances.ListInstances.", + "type": "object", + "properties": { + "instances": { + "description": "The instances belonging to the requested version.", + "type": "array", + "items": { + "$ref": "Instance" + } + }, + "nextPageToken": { + "description": "Continuation token for fetching the next page of results.", + "type": "string" + } + }, + "id": "ListInstancesResponse" + }, + "OperationMetadataV1Alpha": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "warning": { + "description": "Durable messages that persist on every operation poll. @OutputOnly", + "type": "array", + "items": { + "type": "string" + } + }, + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + }, + "ephemeralMessage": { + "description": "Ephemeral message that may change every time the operation is polled. @OutputOnly", + "type": "string" + }, + "method": { + "description": "API method that initiated this operation. Example: google.appengine.v1alpha.Versions.CreateVersion.@OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadataV1Alpha" + }, + "UrlDispatchRule": { + "description": "Rules to match an HTTP request and dispatch that request to a service.", + "type": "object", + "properties": { + "path": { + "description": "Pathname within the host. Must start with a \"/\". A single \"*\" can be included at the end of the path. The sum of the lengths of the domain and path may not exceed 100 characters.", + "type": "string" + }, + "domain": { + "description": "Domain name to match against. The wildcard \"*\" is supported if specified before a period: \"*.\".Defaults to matching all domains: \"*\".", + "type": "string" + }, + "service": { + "description": "Resource ID of a service in this application that should serve the matched request. The service must already exist. Example: default.", + "type": "string" + } + }, + "id": "UrlDispatchRule" + }, + "ListVersionsResponse": { + "description": "Response message for Versions.ListVersions.", + "type": "object", + "properties": { + "versions": { + "description": "The versions belonging to the requested service.", + "type": "array", + "items": { + "$ref": "Version" + } + }, + "nextPageToken": { + "description": "Continuation token for fetching the next page of results.", + "type": "string" + } + }, + "id": "ListVersionsResponse" + }, + "ApiEndpointHandler": { + "description": "Uses Google Cloud Endpoints to handle requests.", + "type": "object", + "properties": { + "scriptPath": { + "description": "Path to the script from the application root directory.", + "type": "string" + } + }, + "id": "ApiEndpointHandler" + }, + "AutomaticScaling": { + "description": "Automatic scaling is based on request rate, response latencies, and other application metrics.", + "type": "object", + "properties": { + "diskUtilization": { + "$ref": "DiskUtilization", + "description": "Target scaling by disk usage." + }, + "minPendingLatency": { + "description": "Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it.", + "format": "google-duration", + "type": "string" + }, + "requestUtilization": { + "description": "Target scaling by request utilization.", + "$ref": "RequestUtilization" + }, + "maxIdleInstances": { + "description": "Maximum number of idle instances that should be maintained for this version.", + "format": "int32", + "type": "integer" + }, + "minIdleInstances": { + "description": "Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service.", + "format": "int32", + "type": "integer" + }, + "maxTotalInstances": { + "description": "Maximum number of instances that should be started to handle requests.", + "format": "int32", + "type": "integer" + }, + "minTotalInstances": { + "description": "Minimum number of instances that should be maintained for this version.", + "format": "int32", + "type": "integer" + }, + "networkUtilization": { + "$ref": "NetworkUtilization", + "description": "Target scaling by network usage." + }, + "maxConcurrentRequests": { + "description": "Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance.Defaults to a runtime-specific value.", + "format": "int32", + "type": "integer" + }, + "coolDownPeriod": { + "description": "Amount of time that the Autoscaler (https://cloud.google.com/compute/docs/autoscaler/) should wait between changes to the number of virtual machines. Only applicable for VM runtimes.", + "format": "google-duration", + "type": "string" + }, + "maxPendingLatency": { + "description": "Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it.", + "format": "google-duration", + "type": "string" + }, + "cpuUtilization": { + "description": "Target scaling by CPU usage.", + "$ref": "CpuUtilization" + } + }, + "id": "AutomaticScaling" + }, + "ZipInfo": { + "description": "The zip file information for a zip deployment.", + "type": "object", + "properties": { + "sourceUrl": { + "description": "URL of the zip file to deploy from. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com/\u003cbucket\u003e/\u003cobject\u003e'.", + "type": "string" + }, + "filesCount": { + "description": "An estimate of the number of files in a zip for a zip deployment. If set, must be greater than or equal to the actual number of files. Used for optimizing performance; if not provided, deployment may be slow.", + "format": "int32", + "type": "integer" + } + }, + "id": "ZipInfo" + }, + "Library": { + "description": "Third-party Python runtime library that is required by the application.", + "type": "object", + "properties": { + "name": { + "description": "Name of the library. Example: \"django\".", + "type": "string" + }, + "version": { + "description": "Version of the library to select, or \"latest\".", + "type": "string" + } + }, + "id": "Library" + }, + "ListLocationsResponse": { + "description": "The response message for Locations.ListLocations.", + "type": "object", + "properties": { + "locations": { + "description": "A list of locations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Location" + } + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + } + }, + "id": "ListLocationsResponse" + }, + "ContainerInfo": { + "description": "Docker image that is used to create a container and start a VM instance for the version that you deploy. Only applicable for instances running in the App Engine flexible environment.", + "type": "object", + "properties": { + "image": { + "description": "URI to the hosted container image in Google Container Registry. The URI must be fully qualified and include a tag or digest. Examples: \"gcr.io/my-project/image:tag\" or \"gcr.io/my-project/image@digest\"", + "type": "string" + } + }, + "id": "ContainerInfo" + }, + "RequestUtilization": { + "description": "Target scaling by request utilization. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "targetConcurrentRequests": { + "description": "Target number of concurrent requests.", + "format": "int32", + "type": "integer" + }, + "targetRequestCountPerSecond": { + "description": "Target requests per second.", + "format": "int32", + "type": "integer" + } + }, + "id": "RequestUtilization" + }, + "EndpointsApiService": { + "description": "Cloud Endpoints (https://cloud.google.com/endpoints) configuration. The Endpoints API Service provides tooling for serving Open API and gRPC endpoints via an NGINX proxy.The fields here refer to the name and configuration id of a \"service\" resource in the Service Management API (https://cloud.google.com/service-management/overview).", + "type": "object", + "properties": { + "name": { + "description": "Endpoints service name which is the name of the \"service\" resource in the Service Management API. For example \"myapi.endpoints.myproject.cloud.goog\"", + "type": "string" + }, + "configId": { + "description": "Endpoints service configuration id as specified by the Service Management API. For example \"2016-09-19r1\"", + "type": "string" + } + }, + "id": "EndpointsApiService" + }, + "UrlMap": { + "description": "URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript.", + "type": "object", + "properties": { + "securityLevel": { + "enumDescriptions": [ + "Not specified.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used, and respond accordingly.", + "Requests for a URL that match this handler that use HTTPS are automatically redirected to the HTTP equivalent URL.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used and respond accordingly.", + "Requests for a URL that match this handler that do not use HTTPS are automatically redirected to the HTTPS URL with the same path. Query parameters are reserved for the redirect." + ], + "enum": [ + "SECURE_UNSPECIFIED", + "SECURE_DEFAULT", + "SECURE_NEVER", + "SECURE_OPTIONAL", + "SECURE_ALWAYS" + ], + "description": "Security (HTTPS) enforcement for this URL.", + "type": "string" + }, + "authFailAction": { + "description": "Action to take when users access resources that require authentication. Defaults to redirect.", + "type": "string", + "enumDescriptions": [ + "Not specified. AUTH_FAIL_ACTION_REDIRECT is assumed.", + "Redirects user to \"accounts.google.com\". The user is redirected back to the application URL after signing in or creating an account.", + "Rejects request with a 401 HTTP status code and an error message." + ], + "enum": [ + "AUTH_FAIL_ACTION_UNSPECIFIED", + "AUTH_FAIL_ACTION_REDIRECT", + "AUTH_FAIL_ACTION_UNAUTHORIZED" + ] + }, + "script": { + "$ref": "ScriptHandler", + "description": "Executes a script to handle the request that matches this URL pattern." + }, + "urlRegex": { + "description": "URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path.", + "type": "string" + }, + "login": { + "description": "Level of login required to access this resource.", + "type": "string", + "enumDescriptions": [ + "Not specified. LOGIN_OPTIONAL is assumed.", + "Does not require that the user is signed in.", + "If the user is not signed in, the auth_fail_action is taken. In addition, if the user is not an administrator for the application, they are given an error message regardless of auth_fail_action. If the user is an administrator, the handler proceeds.", + "If the user has signed in, the handler proceeds normally. Otherwise, the auth_fail_action is taken." + ], + "enum": [ + "LOGIN_UNSPECIFIED", + "LOGIN_OPTIONAL", + "LOGIN_ADMIN", + "LOGIN_REQUIRED" + ] + }, + "apiEndpoint": { + "$ref": "ApiEndpointHandler", + "description": "Uses API Endpoints to handle requests." + }, + "staticFiles": { + "$ref": "StaticFilesHandler", + "description": "Returns the contents of a file, such as an image, as the response." + }, + "redirectHttpResponseCode": { + "enumDescriptions": [ + "Not specified. 302 is assumed.", + "301 Moved Permanently code.", + "302 Moved Temporarily code.", + "303 See Other code.", + "307 Temporary Redirect code." + ], + "enum": [ + "REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED", + "REDIRECT_HTTP_RESPONSE_CODE_301", + "REDIRECT_HTTP_RESPONSE_CODE_302", + "REDIRECT_HTTP_RESPONSE_CODE_303", + "REDIRECT_HTTP_RESPONSE_CODE_307" + ], + "description": "30x code to use when performing redirects for the secure field. Defaults to 302.", + "type": "string" + } + }, + "id": "UrlMap" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "type": "object", + "properties": { + "done": { + "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", + "type": "boolean" + }, + "response": { + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should have the format of operations/some/unique/name.", + "type": "string" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + } + }, + "id": "Operation" + }, + "ApiConfigHandler": { + "description": "Google Cloud Endpoints (https://cloud.google.com/appengine/docs/python/endpoints/) configuration for API handlers.", + "type": "object", + "properties": { + "login": { + "description": "Level of login required to access this resource. Defaults to optional.", + "type": "string", + "enumDescriptions": [ + "Not specified. LOGIN_OPTIONAL is assumed.", + "Does not require that the user is signed in.", + "If the user is not signed in, the auth_fail_action is taken. In addition, if the user is not an administrator for the application, they are given an error message regardless of auth_fail_action. If the user is an administrator, the handler proceeds.", + "If the user has signed in, the handler proceeds normally. Otherwise, the auth_fail_action is taken." + ], + "enum": [ + "LOGIN_UNSPECIFIED", + "LOGIN_OPTIONAL", + "LOGIN_ADMIN", + "LOGIN_REQUIRED" + ] + }, + "url": { + "description": "URL to serve the endpoint at.", + "type": "string" + }, + "securityLevel": { + "description": "Security (HTTPS) enforcement for this URL.", + "type": "string", + "enumDescriptions": [ + "Not specified.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used, and respond accordingly.", + "Requests for a URL that match this handler that use HTTPS are automatically redirected to the HTTP equivalent URL.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used and respond accordingly.", + "Requests for a URL that match this handler that do not use HTTPS are automatically redirected to the HTTPS URL with the same path. Query parameters are reserved for the redirect." + ], + "enum": [ + "SECURE_UNSPECIFIED", + "SECURE_DEFAULT", + "SECURE_NEVER", + "SECURE_OPTIONAL", + "SECURE_ALWAYS" + ] + }, + "authFailAction": { + "description": "Action to take when users access resources that require authentication. Defaults to redirect.", + "type": "string", + "enumDescriptions": [ + "Not specified. AUTH_FAIL_ACTION_REDIRECT is assumed.", + "Redirects user to \"accounts.google.com\". The user is redirected back to the application URL after signing in or creating an account.", + "Rejects request with a 401 HTTP status code and an error message." + ], + "enum": [ + "AUTH_FAIL_ACTION_UNSPECIFIED", + "AUTH_FAIL_ACTION_REDIRECT", + "AUTH_FAIL_ACTION_UNAUTHORIZED" + ] + }, + "script": { + "description": "Path to the script from the application root directory.", + "type": "string" + } + }, + "id": "ApiConfigHandler" + }, + "StaticFilesHandler": { + "description": "Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them.", + "type": "object", + "properties": { + "expiration": { + "description": "Time a static file served by this handler should be cached by web proxies and browsers.", + "format": "google-duration", + "type": "string" + }, + "applicationReadable": { + "description": "Whether files should also be uploaded as code data. By default, files declared in static file handlers are uploaded as static data and are only served to end users; they cannot be read by the application. If enabled, uploads are charged against both your code and static data storage resource quotas.", + "type": "boolean" + }, + "httpHeaders": { + "description": "HTTP headers to use for all responses from these URLs.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "uploadPathRegex": { + "description": "Regular expression that matches the file paths for all files that should be referenced by this handler.", + "type": "string" + }, + "path": { + "description": "Path to the static files matched by the URL pattern, from the application root directory. The path can refer to text matched in groupings in the URL pattern.", + "type": "string" + }, + "mimeType": { + "description": "MIME type used to serve all files served by this handler.Defaults to file-specific MIME types, which are derived from each file's filename extension.", + "type": "string" + }, + "requireMatchingFile": { + "description": "Whether this handler should match the request if the file referenced by the handler does not exist.", + "type": "boolean" + } + }, + "id": "StaticFilesHandler" + }, + "BasicScaling": { + "description": "A service with basic scaling will create an instance when the application receives a request. The instance will be turned down when the app becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.", + "type": "object", + "properties": { + "maxInstances": { + "description": "Maximum number of instances to create for this version.", + "format": "int32", + "type": "integer" + }, + "idleTimeout": { + "description": "Duration of time after the last request that an instance must wait before the instance is shut down.", + "format": "google-duration", + "type": "string" + } + }, + "id": "BasicScaling" + }, + "DiskUtilization": { + "description": "Target scaling by disk usage. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "targetWriteOpsPerSecond": { + "description": "Target ops written per second.", + "format": "int32", + "type": "integer" + }, + "targetWriteBytesPerSecond": { + "description": "Target bytes written per second.", + "format": "int32", + "type": "integer" + }, + "targetReadBytesPerSecond": { + "description": "Target bytes read per second.", + "format": "int32", + "type": "integer" + }, + "targetReadOpsPerSecond": { + "description": "Target ops read per seconds.", + "format": "int32", + "type": "integer" + } + }, + "id": "DiskUtilization" + }, + "CpuUtilization": { + "description": "Target scaling by CPU usage.", + "type": "object", + "properties": { + "aggregationWindowLength": { + "description": "Period of time over which CPU utilization is calculated.", + "format": "google-duration", + "type": "string" + }, + "targetUtilization": { + "description": "Target CPU utilization ratio to maintain when scaling. Must be between 0 and 1.", + "format": "double", + "type": "number" + } + }, + "id": "CpuUtilization" + }, + "IdentityAwareProxy": { + "description": "Identity-Aware Proxy", + "type": "object", + "properties": { + "enabled": { + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests.If true, the oauth2_client_id and oauth2_client_secret fields must be non-empty.", + "type": "boolean" + }, + "oauth2ClientSecret": { + "description": "OAuth2 client secret to use for the authentication flow.For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2_client_secret_sha256 field.@InputOnly", + "type": "string" + }, + "oauth2ClientId": { + "description": "OAuth2 client ID to use for the authentication flow.", + "type": "string" + }, + "oauth2ClientSecretSha256": { + "description": "Hex-encoded SHA-256 hash of the client secret.@OutputOnly", + "type": "string" + } + }, + "id": "IdentityAwareProxy" + }, + "Status": { + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc which can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting purpose.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { + "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + } + } + }, + "id": "Status" + }, + "ManualScaling": { + "description": "A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.", + "type": "object", + "properties": { + "instances": { + "description": "Number of instances to assign to the service at the start. This number can later be altered by using the Modules API (https://cloud.google.com/appengine/docs/python/modules/functions) set_num_instances() function.", + "format": "int32", + "type": "integer" + } + }, + "id": "ManualScaling" + }, + "LocationMetadata": { + "description": "Metadata for the given google.cloud.location.Location.", + "type": "object", + "properties": { + "flexibleEnvironmentAvailable": { + "description": "App Engine Flexible Environment is available in the given location.@OutputOnly", + "type": "boolean" + }, + "standardEnvironmentAvailable": { + "description": "App Engine Standard Environment is available in the given location.@OutputOnly", + "type": "boolean" + } + }, + "id": "LocationMetadata" + }, + "Service": { + "description": "A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.", + "type": "object", + "properties": { + "split": { + "$ref": "TrafficSplit", + "description": "Mapping that defines fractional HTTP traffic diversion to different versions within the service." + }, + "id": { + "description": "Relative name of the service within the application. Example: default.@OutputOnly", + "type": "string" + }, + "name": { + "description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + } + }, + "id": "Service" + }, + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Operation" + } + } + }, + "id": "ListOperationsResponse" + }, + "OperationMetadata": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "endTime": { + "description": "Timestamp that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "operationType": { + "description": "Type of this operation. Deprecated, use method field instead. Example: \"create_version\".@OutputOnly", + "type": "string" + }, + "insertTime": { + "description": "Timestamp that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/modules/default.@OutputOnly", + "type": "string" + }, + "method": { + "description": "API method that initiated this operation. Example: google.appengine.v1beta4.Version.CreateVersion.@OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadata" + }, + "OperationMetadataV1": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "ephemeralMessage": { + "description": "Ephemeral message that may change every time the operation is polled. @OutputOnly", + "type": "string" + }, + "method": { + "description": "API method that initiated this operation. Example: google.appengine.v1.Versions.CreateVersion.@OutputOnly", + "type": "string" + }, + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "warning": { + "description": "Durable messages that persist on every operation poll. @OutputOnly", + "type": "array", + "items": { + "type": "string" + } + }, + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadataV1" + }, + "ErrorHandler": { + "description": "Custom static error page to be served when an error occurs.", + "type": "object", + "properties": { + "errorCode": { + "enumDescriptions": [ + "Not specified. ERROR_CODE_DEFAULT is assumed.", + "All other error types.", + "Application has exceeded a resource quota.", + "Client blocked by the application's Denial of Service protection configuration.", + "Deadline reached before the application responds." + ], + "enum": [ + "ERROR_CODE_UNSPECIFIED", + "ERROR_CODE_DEFAULT", + "ERROR_CODE_OVER_QUOTA", + "ERROR_CODE_DOS_API_DENIAL", + "ERROR_CODE_TIMEOUT" + ], + "description": "Error condition this handler applies to.", + "type": "string" + }, + "mimeType": { + "description": "MIME type of file. Defaults to text/html.", + "type": "string" + }, + "staticFile": { + "description": "Static file content to be served for this error.", + "type": "string" + } + }, + "id": "ErrorHandler" + }, + "Network": { + "description": "Extra network settings. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "forwardedPorts": { + "description": "List of ports, or port pairs, to forward from the virtual machine to the application container.", + "type": "array", + "items": { + "type": "string" + } + }, + "instanceTag": { + "description": "Tag to apply to the VM instance during creation.", + "type": "string" + }, + "subnetworkName": { + "description": "Google Cloud Platform sub-network where the virtual machines are created. Specify the short name, not the resource path.If a subnetwork name is specified, a network name will also be required unless it is for the default network.\nIf the network the VM instance is being created in is a Legacy network, then the IP address is allocated from the IPv4Range.\nIf the network the VM instance is being created in is an auto Subnet Mode Network, then only network name should be specified (not the subnetwork_name) and the IP address is created from the IPCidrRange of the subnetwork that exists in that zone for that network.\nIf the network the VM instance is being created in is a custom Subnet Mode Network, then the subnetwork_name must be specified and the IP address is created from the IPCidrRange of the subnetwork.If specified, the subnetwork must exist in the same region as the Flex app.", + "type": "string" + }, + "name": { + "description": "Google Cloud Platform network where the virtual machines are created. Specify the short name, not the resource path.Defaults to default.", + "type": "string" + } + }, + "id": "Network" + }, + "Application": { + "description": "An Application resource contains the top-level configuration of an App Engine application.", + "type": "object", + "properties": { + "codeBucket": { + "description": "Google Cloud Storage bucket that can be used for storing files associated with this application. This bucket is associated with the application and can be used by the gcloud deployment commands.@OutputOnly", + "type": "string" + }, + "defaultBucket": { + "description": "Google Cloud Storage bucket that can be used by this application to store content.@OutputOnly", + "type": "string" + }, + "locationId": { + "description": "Location from which this application will be run. Application instances will run out of data centers in the chosen location, which is also where all of the application's end user content is stored.Defaults to us-central.Options are:us-central - Central USeurope-west - Western Europeus-east1 - Eastern US", + "type": "string" + }, + "dispatchRules": { + "description": "HTTP path dispatch rules for requests to the application that do not explicitly target a service or version. Rules are order-dependent.@OutputOnly", + "type": "array", + "items": { + "$ref": "UrlDispatchRule" + } + }, + "defaultHostname": { + "description": "Hostname used to reach this application, as resolved by App Engine.@OutputOnly", + "type": "string" + }, + "name": { + "description": "Full path to the Application resource in the API. Example: apps/myapp.@OutputOnly", + "type": "string" + }, + "authDomain": { + "description": "Google Apps authentication domain that controls which users can access this application.Defaults to open access for any Google Account.", + "type": "string" + }, + "iap": { + "$ref": "IdentityAwareProxy" + }, + "defaultCookieExpiration": { + "description": "Cookie expiration policy for this application.", + "format": "google-duration", + "type": "string" + }, + "id": { + "description": "Identifier of the Application resource. This identifier is equivalent to the project ID of the Google Cloud Platform project where you want to deploy your application. Example: myapp.", + "type": "string" + } + }, + "id": "Application" + }, + "Instance": { + "description": "An Instance resource is the computing unit that App Engine uses to automatically scale an application.", + "type": "object", + "properties": { + "requests": { + "description": "Number of requests since this instance was started.@OutputOnly", + "format": "int32", + "type": "integer" + }, + "appEngineRelease": { + "description": "App Engine release this instance is running on.@OutputOnly", + "type": "string" + }, + "vmName": { + "description": "Name of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "vmId": { + "description": "Virtual machine ID of this instance. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "qps": { + "description": "Average queries per second (QPS) over the last minute.@OutputOnly", + "format": "float", + "type": "number" + }, + "name": { + "description": "Full path to the Instance resource in the API. Example: apps/myapp/services/default/versions/v1/instances/instance-1.@OutputOnly", + "type": "string" + }, + "vmZoneName": { + "description": "Zone where the virtual machine is located. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "averageLatency": { + "description": "Average latency (ms) over the last minute.@OutputOnly", + "format": "int32", + "type": "integer" + }, + "memoryUsage": { + "description": "Total memory in use (bytes).@OutputOnly", + "format": "int64", + "type": "string" + }, + "vmIp": { + "description": "The IP address of this instance. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "id": { + "description": "Relative name of the instance within the version. Example: instance-1.@OutputOnly", + "type": "string" + }, + "errors": { + "description": "Number of errors since this instance was started.@OutputOnly", + "format": "int32", + "type": "integer" + }, + "availability": { + "enumDescriptions": [ + "", + "", + "" + ], + "enum": [ + "UNSPECIFIED", + "RESIDENT", + "DYNAMIC" + ], + "description": "Availability of the instance.@OutputOnly", + "type": "string" + }, + "vmStatus": { + "description": "Status of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "startTime": { + "description": "Time that this instance was started.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "vmDebugEnabled": { + "description": "Whether this instance is in debug mode. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "boolean" + } + }, + "id": "Instance" + }, + "LivenessCheck": { + "description": "Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances.", + "type": "object", + "properties": { + "checkInterval": { + "description": "Interval between health checks.", + "format": "google-duration", + "type": "string" + }, + "timeout": { + "description": "Time before the check is considered failed.", + "format": "google-duration", + "type": "string" + }, + "initialDelay": { + "description": "The initial delay before starting to execute the checks.", + "format": "google-duration", + "type": "string" + }, + "unhealthyThreshold": { + "description": "Number of consecutive failed checks required before considering the VM unhealthy.", + "format": "uint32", + "type": "integer" + }, + "path": { + "description": "The request path.", + "type": "string" + }, + "host": { + "description": "Host header to send when performing a HTTP Liveness check. Example: \"myapp.appspot.com\"", + "type": "string" + }, + "healthyThreshold": { + "description": "Number of consecutive successful checks required before considering the VM healthy.", + "format": "uint32", + "type": "integer" + } + }, + "id": "LivenessCheck" + }, + "Location": { + "description": "A resource that represents Google Cloud Platform location.", + "type": "object", + "properties": { + "locationId": { + "description": "The canonical id for this location. For example: \"us-east1\".", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata. For example the available capacity at the given location.", + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Cross-service attributes for the location. For example\n{\"cloud.googleapis.com/region\": \"us-east1\"}\n", + "type": "object" + }, + "name": { + "description": "Resource name for the location, which may vary between implementations. For example: \"projects/example-project/locations/us-east1\"", + "type": "string" + } + }, + "id": "Location" + }, + "NetworkUtilization": { + "description": "Target scaling by network usage. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "targetSentBytesPerSecond": { + "description": "Target bytes sent per second.", + "format": "int32", + "type": "integer" + }, + "targetSentPacketsPerSecond": { + "description": "Target packets sent per second.", + "format": "int32", + "type": "integer" + }, + "targetReceivedBytesPerSecond": { + "description": "Target bytes received per second.", + "format": "int32", + "type": "integer" + }, + "targetReceivedPacketsPerSecond": { + "description": "Target packets received per second.", + "format": "int32", + "type": "integer" + } + }, + "id": "NetworkUtilization" + }, + "HealthCheck": { + "description": "Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances. Only applicable for instances in App Engine flexible environment.", + "type": "object", + "properties": { + "host": { + "description": "Host header to send when performing an HTTP health check. Example: \"myapp.appspot.com\"", + "type": "string" + }, + "healthyThreshold": { + "description": "Number of consecutive successful health checks required before receiving traffic.", + "format": "uint32", + "type": "integer" + }, + "restartThreshold": { + "description": "Number of consecutive failed health checks required before an instance is restarted.", + "format": "uint32", + "type": "integer" + }, + "checkInterval": { + "description": "Interval between health checks.", + "format": "google-duration", + "type": "string" + }, + "timeout": { + "description": "Time before the health check is considered failed.", + "format": "google-duration", + "type": "string" + }, + "unhealthyThreshold": { + "description": "Number of consecutive failed health checks required before removing traffic.", + "format": "uint32", + "type": "integer" + }, + "disableHealthCheck": { + "description": "Whether to explicitly disable health checks for this instance.", + "type": "boolean" + } + }, + "id": "HealthCheck" + }, + "ReadinessCheck": { + "description": "Readiness checking configuration for VM instances. Unhealthy instances are removed from traffic rotation.", + "type": "object", + "properties": { + "checkInterval": { + "description": "Interval between health checks.", + "format": "google-duration", + "type": "string" + }, + "timeout": { + "description": "Time before the check is considered failed.", + "format": "google-duration", + "type": "string" + }, + "unhealthyThreshold": { + "description": "Number of consecutive failed checks required before removing traffic.", + "format": "uint32", + "type": "integer" + }, + "path": { + "description": "The request path.", + "type": "string" + }, + "host": { + "description": "Host header to send when performing a HTTP Readiness check. Example: \"myapp.appspot.com\"", + "type": "string" + }, + "healthyThreshold": { + "description": "Number of consecutive successful checks required before receiving traffic.", + "format": "uint32", + "type": "integer" + } + }, + "id": "ReadinessCheck" + }, + "DebugInstanceRequest": { + "description": "Request message for Instances.DebugInstance.", + "type": "object", + "properties": { + "sshKey": { + "description": "Public SSH key to add to the instance. Examples:\n[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]\n[USERNAME]:ssh-rsa [KEY_VALUE] google-ssh {\"userName\":\"[USERNAME]\",\"expireOn\":\"[EXPIRE_TIME]\"}For more information, see Adding and Removing SSH Keys (https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys).", + "type": "string" + } + }, + "id": "DebugInstanceRequest" + }, + "OperationMetadataV1Beta5": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "method": { + "description": "API method name that initiated this operation. Example: google.appengine.v1beta5.Version.CreateVersion.@OutputOnly", + "type": "string" + }, + "insertTime": { + "description": "Timestamp that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "Timestamp that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadataV1Beta5" + }, + "Version": { + "description": "A Version resource is a specific set of source code and configuration files that are deployed into a service.", + "type": "object", + "properties": { + "betaSettings": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata settings that are supplied to this version to enable beta runtime features.", + "type": "object" + }, + "env": { + "description": "App Engine execution environment for this version.Defaults to standard.", + "type": "string" + }, + "handlers": { + "description": "An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set.", + "type": "array", + "items": { + "$ref": "UrlMap" + } + }, + "automaticScaling": { + "$ref": "AutomaticScaling", + "description": "Automatic scaling is based on request rate, response latencies, and other application metrics." + }, + "diskUsageBytes": { + "description": "Total size in bytes of all the files that are included in this version and curerntly hosted on the App Engine disk.@OutputOnly", + "format": "int64", + "type": "string" + }, + "healthCheck": { + "description": "Configures health checking for VM instances. Unhealthy instances are stopped and replaced with new instances. Only applicable for VM runtimes.Only returned in GET requests if view=FULL is set.", + "$ref": "HealthCheck" + }, + "threadsafe": { + "description": "Whether multiple requests can be dispatched to this version at once.", + "type": "boolean" + }, + "readinessCheck": { + "$ref": "ReadinessCheck", + "description": "Configures readiness health checking for VM instances. Unhealthy instances are not put into the backend traffic rotation.Only returned in GET requests if view=FULL is set." + }, + "manualScaling": { + "$ref": "ManualScaling", + "description": "A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time." + }, + "name": { + "description": "Full path to the Version resource in the API. Example: apps/myapp/services/default/versions/v1.@OutputOnly", + "type": "string" + }, + "apiConfig": { + "description": "Serving configuration for Google Cloud Endpoints (https://cloud.google.com/appengine/docs/python/endpoints/).Only returned in GET requests if view=FULL is set.", + "$ref": "ApiConfigHandler" + }, + "endpointsApiService": { + "$ref": "EndpointsApiService", + "description": "Cloud Endpoints configuration.If endpoints_api_service is set, the Cloud Endpoints Extensible Service Proxy will be provided to serve the API implemented by the app." + }, + "vm": { + "description": "Whether to deploy this version in a container on a virtual machine.", + "type": "boolean" + }, + "versionUrl": { + "description": "Serving URL for this version. Example: \"https://myversion-dot-myservice-dot-myapp.appspot.com\"@OutputOnly", + "type": "string" + }, + "instanceClass": { + "description": "Instance class that is used to run this version. Valid values are:\nAutomaticScaling: F1, F2, F4, F4_1G\nManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 for AutomaticScaling and B1 for ManualScaling or BasicScaling.", + "type": "string" + }, + "servingStatus": { + "enumDescriptions": [ + "Not specified.", + "Currently serving. Instances are created according to the scaling settings of the version.", + "Disabled. No instances will be created and the scaling settings are ignored until the state of the version changes to SERVING." + ], + "enum": [ + "SERVING_STATUS_UNSPECIFIED", + "SERVING", + "STOPPED" + ], + "description": "Current serving status of this version. Only the versions with a SERVING status create instances and can be billed.SERVING_STATUS_UNSPECIFIED is an invalid value. Defaults to SERVING.", + "type": "string" + }, + "deployment": { + "description": "Code and application artifacts that make up this version.Only returned in GET requests if view=FULL is set.", + "$ref": "Deployment" + }, + "createTime": { + "description": "Time that this version was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "resources": { + "$ref": "Resources", + "description": "Machine resources for this version. Only applicable for VM runtimes." + }, + "inboundServices": { + "description": "Before an application can receive email or XMPP messages, the application must be configured to enable the service.", + "type": "array", + "items": { + "enum": [ + "INBOUND_SERVICE_UNSPECIFIED", + "INBOUND_SERVICE_MAIL", + "INBOUND_SERVICE_MAIL_BOUNCE", + "INBOUND_SERVICE_XMPP_ERROR", + "INBOUND_SERVICE_XMPP_MESSAGE", + "INBOUND_SERVICE_XMPP_SUBSCRIBE", + "INBOUND_SERVICE_XMPP_PRESENCE", + "INBOUND_SERVICE_CHANNEL_PRESENCE", + "INBOUND_SERVICE_WARMUP" + ], + "type": "string" + }, + "enumDescriptions": [ + "Not specified.", + "Allows an application to receive mail.", + "Allows an application to receive email-bound notifications.", + "Allows an application to receive error stanzas.", + "Allows an application to receive instant messages.", + "Allows an application to receive user subscription POSTs.", + "Allows an application to receive a user's chat presence.", + "Registers an application for notifications when a client connects or disconnects from a channel.", + "Enables warmup requests." + ] + }, + "errorHandlers": { + "description": "Custom static error pages. Limited to 10KB per page.Only returned in GET requests if view=FULL is set.", + "type": "array", + "items": { + "$ref": "ErrorHandler" + } + }, + "defaultExpiration": { + "description": "Duration that static files should be cached by web proxies and browsers. Only applicable if the corresponding StaticFilesHandler (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#staticfileshandler) does not specify its own expiration time.Only returned in GET requests if view=FULL is set.", + "format": "google-duration", + "type": "string" + }, + "libraries": { + "description": "Configuration for third-party Python runtime libraries that are required by the application.Only returned in GET requests if view=FULL is set.", + "type": "array", + "items": { + "$ref": "Library" + } + }, + "nobuildFilesRegex": { + "description": "Files that match this pattern will not be built into this version. Only applicable for Go runtimes.Only returned in GET requests if view=FULL is set.", + "type": "string" + }, + "basicScaling": { + "description": "A service with basic scaling will create an instance when the application receives a request. The instance will be turned down when the app becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.", + "$ref": "BasicScaling" + }, + "runtime": { + "description": "Desired runtime. Example: python27.", + "type": "string" + }, + "id": { + "description": "Relative name of the version within the service. Example: v1. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names: \"default\", \"latest\", and any name with the prefix \"ah-\".", + "type": "string" + }, + "createdBy": { + "description": "Email address of the user who created this version.@OutputOnly", + "type": "string" + }, + "envVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Environment variables available to the application.Only returned in GET requests if view=FULL is set.", + "type": "object" + }, + "livenessCheck": { + "$ref": "LivenessCheck", + "description": "Configures liveness health checking for VM instances. Unhealthy instances are stopped and replaced with new instancesOnly returned in GET requests if view=FULL is set." + }, + "network": { + "$ref": "Network", + "description": "Extra network settings. Only applicable for VM runtimes." + } + }, + "id": "Version" + }, + "RepairApplicationRequest": { + "description": "Request message for 'Applications.RepairApplication'.", + "type": "object", + "properties": {}, + "id": "RepairApplicationRequest" + }, + "FileInfo": { + "description": "Single source file that is part of the version to be deployed. Each source file that is deployed must be specified separately.", + "type": "object", + "properties": { + "mimeType": { + "description": "The MIME type of the file.Defaults to the value from Google Cloud Storage.", + "type": "string" + }, + "sourceUrl": { + "description": "URL source to use to fetch this file. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com/\u003cbucket\u003e/\u003cobject\u003e'.", + "type": "string" + }, + "sha1Sum": { + "description": "The SHA1 hash of the file, in hex.", + "type": "string" + } + }, + "id": "FileInfo" + }, + "ScriptHandler": { + "description": "Executes a script to handle the request that matches the URL pattern.", + "type": "object", + "properties": { + "scriptPath": { + "description": "Path to the script from the application root directory.", + "type": "string" + } + }, + "id": "ScriptHandler" + }, + "OperationMetadataExperimental": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "method": { + "description": "API method that initiated this operation. Example: google.appengine.experimental.CustomDomains.CreateCustomDomain.@OutputOnly", + "type": "string" + }, + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/customDomains/example.com.@OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadataExperimental" + }, + "TrafficSplit": { + "description": "Traffic routing configuration for versions within a single service. Traffic splits define how traffic directed to the service is assigned to versions.", + "type": "object", + "properties": { + "shardBy": { + "enumDescriptions": [ + "Diversion method unspecified.", + "Diversion based on a specially named cookie, \"GOOGAPPUID.\" The cookie must be set by the application itself or no diversion will occur.", + "Diversion based on applying the modulus operation to a fingerprint of the IP address." + ], + "enum": [ + "UNSPECIFIED", + "COOKIE", + "IP" + ], + "description": "Mechanism used to determine which version a request is sent to. The traffic selection algorithm will be stable for either type until allocations are changed.", + "type": "string" + }, + "allocations": { + "description": "Mapping from version IDs within the service to fractional (0.000, 1] allocations of traffic for that version. Each version can be specified only once, but some versions in the service may not have any traffic allocation. Services that have traffic allocated cannot be deleted until either the service is deleted or their traffic allocation is removed. Allocations must sum to 1. Up to two decimal place precision is supported for IP-based splits and up to three decimal places is supported for cookie-based splits.", + "type": "object", + "additionalProperties": { + "format": "double", + "type": "number" + } + } + }, + "id": "TrafficSplit" + }, + "OperationMetadataV1Beta": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "ephemeralMessage": { + "description": "Ephemeral message that may change every time the operation is polled. @OutputOnly", + "type": "string" + }, + "method": { + "description": "API method that initiated this operation. Example: google.appengine.v1beta.Versions.CreateVersion.@OutputOnly", + "type": "string" + }, + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "warning": { + "description": "Durable messages that persist on every operation poll. @OutputOnly", + "type": "array", + "items": { + "type": "string" + } + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadataV1Beta" + }, + "ListServicesResponse": { + "description": "Response message for Services.ListServices.", + "type": "object", + "properties": { + "services": { + "description": "The services belonging to the requested application.", + "type": "array", + "items": { + "$ref": "Service" + } + }, + "nextPageToken": { + "description": "Continuation token for fetching the next page of results.", + "type": "string" + } + }, + "id": "ListServicesResponse" + }, + "Resources": { + "description": "Machine resources for a version.", + "type": "object", + "properties": { + "cpu": { + "description": "Number of CPU cores needed.", + "format": "double", + "type": "number" + }, + "memoryGb": { + "description": "Memory (GB) needed.", + "format": "double", + "type": "number" + }, + "volumes": { + "description": "User specified volumes.", + "type": "array", + "items": { + "$ref": "Volume" + } + }, + "diskGb": { + "description": "Disk size (GB) needed.", + "format": "double", + "type": "number" + } + }, + "id": "Resources" + }, + "Deployment": { + "description": "Code and application artifacts used to deploy a version to App Engine.", + "type": "object", + "properties": { + "files": { + "additionalProperties": { + "$ref": "FileInfo" + }, + "description": "Manifest of the files stored in Google Cloud Storage that are included as part of this version. All files must be readable using the credentials supplied with this call.", + "type": "object" + }, + "zip": { + "$ref": "ZipInfo", + "description": "The zip file for this deployment, if this is a zip deployment." + }, + "container": { + "description": "The Docker image for the container that runs the version. Only applicable for instances running in the App Engine flexible environment.", + "$ref": "ContainerInfo" + } + }, + "id": "Deployment" + }, + "Volume": { + "description": "Volumes mounted within the app container. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "sizeGb": { + "description": "Volume size in gigabytes.", + "format": "double", + "type": "number" + }, + "name": { + "description": "Unique name for the volume.", + "type": "string" + }, + "volumeType": { + "description": "Underlying volume type, e.g. 'tmpfs'.", + "type": "string" + } + }, + "id": "Volume" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "version": "v1alpha", + "baseUrl": "https://appengine.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/appengine.admin": { + "description": "View and manage your applications deployed on Google App Engine" + } + } + } + }, + "kind": "discovery#restDescription", + "description": "Provisions and manages App Engine applications.", + "servicePath": "", + "rootUrl": "https://appengine.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "appengine", + "batchPath": "batch", + "revision": "20170221", + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "id": "appengine:v1alpha", + "title": "Google App Engine Admin API", + "ownerName": "Google", + "discoveryVersion": "v1" +} diff --git a/vendor/google.golang.org/api/appengine/v1alpha/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1alpha/appengine-gen.go new file mode 100644 index 000000000..399196e9b --- /dev/null +++ b/vendor/google.golang.org/api/appengine/v1alpha/appengine-gen.go @@ -0,0 +1,6151 @@ +// Package appengine provides access to the Google App Engine Admin API. +// +// See https://cloud.google.com/appengine/docs/admin-api/ +// +// Usage example: +// +// import "google.golang.org/api/appengine/v1alpha" +// ... +// appengineService, err := appengine.New(oauthHttpClient) +package appengine // import "google.golang.org/api/appengine/v1alpha" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "appengine:v1alpha" +const apiName = "appengine" +const apiVersion = "v1alpha" +const basePath = "https://appengine.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your applications deployed on Google App Engine + AppengineAdminScope = "https://www.googleapis.com/auth/appengine.admin" + + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" +) + +func New(client *http.Client) (*APIService, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &APIService{client: client, BasePath: basePath} + s.Apps = NewAppsService(s) + return s, nil +} + +type APIService struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Apps *AppsService +} + +func (s *APIService) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewAppsService(s *APIService) *AppsService { + rs := &AppsService{s: s} + rs.Locations = NewAppsLocationsService(s) + rs.Operations = NewAppsOperationsService(s) + rs.Services = NewAppsServicesService(s) + return rs +} + +type AppsService struct { + s *APIService + + Locations *AppsLocationsService + + Operations *AppsOperationsService + + Services *AppsServicesService +} + +func NewAppsLocationsService(s *APIService) *AppsLocationsService { + rs := &AppsLocationsService{s: s} + return rs +} + +type AppsLocationsService struct { + s *APIService +} + +func NewAppsOperationsService(s *APIService) *AppsOperationsService { + rs := &AppsOperationsService{s: s} + return rs +} + +type AppsOperationsService struct { + s *APIService +} + +func NewAppsServicesService(s *APIService) *AppsServicesService { + rs := &AppsServicesService{s: s} + rs.Versions = NewAppsServicesVersionsService(s) + return rs +} + +type AppsServicesService struct { + s *APIService + + Versions *AppsServicesVersionsService +} + +func NewAppsServicesVersionsService(s *APIService) *AppsServicesVersionsService { + rs := &AppsServicesVersionsService{s: s} + rs.Instances = NewAppsServicesVersionsInstancesService(s) + return rs +} + +type AppsServicesVersionsService struct { + s *APIService + + Instances *AppsServicesVersionsInstancesService +} + +func NewAppsServicesVersionsInstancesService(s *APIService) *AppsServicesVersionsInstancesService { + rs := &AppsServicesVersionsInstancesService{s: s} + return rs +} + +type AppsServicesVersionsInstancesService struct { + s *APIService +} + +// ApiConfigHandler: Google Cloud Endpoints +// (https://cloud.google.com/appengine/docs/python/endpoints/) +// configuration for API handlers. +type ApiConfigHandler struct { + // AuthFailAction: Action to take when users access resources that + // require authentication. Defaults to redirect. + // + // Possible values: + // "AUTH_FAIL_ACTION_UNSPECIFIED" - Not specified. + // AUTH_FAIL_ACTION_REDIRECT is assumed. + // "AUTH_FAIL_ACTION_REDIRECT" - Redirects user to + // "accounts.google.com". The user is redirected back to the application + // URL after signing in or creating an account. + // "AUTH_FAIL_ACTION_UNAUTHORIZED" - Rejects request with a 401 HTTP + // status code and an error message. + AuthFailAction string `json:"authFailAction,omitempty"` + + // Login: Level of login required to access this resource. Defaults to + // optional. + // + // Possible values: + // "LOGIN_UNSPECIFIED" - Not specified. LOGIN_OPTIONAL is assumed. + // "LOGIN_OPTIONAL" - Does not require that the user is signed in. + // "LOGIN_ADMIN" - If the user is not signed in, the auth_fail_action + // is taken. In addition, if the user is not an administrator for the + // application, they are given an error message regardless of + // auth_fail_action. If the user is an administrator, the handler + // proceeds. + // "LOGIN_REQUIRED" - If the user has signed in, the handler proceeds + // normally. Otherwise, the auth_fail_action is taken. + Login string `json:"login,omitempty"` + + // Script: Path to the script from the application root directory. + Script string `json:"script,omitempty"` + + // SecurityLevel: Security (HTTPS) enforcement for this URL. + // + // Possible values: + // "SECURE_UNSPECIFIED" - Not specified. + // "SECURE_DEFAULT" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used, and respond + // accordingly. + // "SECURE_NEVER" - Requests for a URL that match this handler that + // use HTTPS are automatically redirected to the HTTP equivalent URL. + // "SECURE_OPTIONAL" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used and respond + // accordingly. + // "SECURE_ALWAYS" - Requests for a URL that match this handler that + // do not use HTTPS are automatically redirected to the HTTPS URL with + // the same path. Query parameters are reserved for the redirect. + SecurityLevel string `json:"securityLevel,omitempty"` + + // Url: URL to serve the endpoint at. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuthFailAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuthFailAction") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ApiConfigHandler) MarshalJSON() ([]byte, error) { + type noMethod ApiConfigHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ApiEndpointHandler: Uses Google Cloud Endpoints to handle requests. +type ApiEndpointHandler struct { + // ScriptPath: Path to the script from the application root directory. + ScriptPath string `json:"scriptPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ScriptPath") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ScriptPath") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ApiEndpointHandler) MarshalJSON() ([]byte, error) { + type noMethod ApiEndpointHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Application: An Application resource contains the top-level +// configuration of an App Engine application. +type Application struct { + // AuthDomain: Google Apps authentication domain that controls which + // users can access this application.Defaults to open access for any + // Google Account. + AuthDomain string `json:"authDomain,omitempty"` + + // CodeBucket: Google Cloud Storage bucket that can be used for storing + // files associated with this application. This bucket is associated + // with the application and can be used by the gcloud deployment + // commands.@OutputOnly + CodeBucket string `json:"codeBucket,omitempty"` + + // DefaultBucket: Google Cloud Storage bucket that can be used by this + // application to store content.@OutputOnly + DefaultBucket string `json:"defaultBucket,omitempty"` + + // DefaultCookieExpiration: Cookie expiration policy for this + // application. + DefaultCookieExpiration string `json:"defaultCookieExpiration,omitempty"` + + // DefaultHostname: Hostname used to reach this application, as resolved + // by App Engine.@OutputOnly + DefaultHostname string `json:"defaultHostname,omitempty"` + + // DispatchRules: HTTP path dispatch rules for requests to the + // application that do not explicitly target a service or version. Rules + // are order-dependent.@OutputOnly + DispatchRules []*UrlDispatchRule `json:"dispatchRules,omitempty"` + + Iap *IdentityAwareProxy `json:"iap,omitempty"` + + // Id: Identifier of the Application resource. This identifier is + // equivalent to the project ID of the Google Cloud Platform project + // where you want to deploy your application. Example: myapp. + Id string `json:"id,omitempty"` + + // LocationId: Location from which this application will be run. + // Application instances will run out of data centers in the chosen + // location, which is also where all of the application's end user + // content is stored.Defaults to us-central.Options are:us-central - + // Central USeurope-west - Western Europeus-east1 - Eastern US + LocationId string `json:"locationId,omitempty"` + + // Name: Full path to the Application resource in the API. Example: + // apps/myapp.@OutputOnly + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuthDomain") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuthDomain") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Application) MarshalJSON() ([]byte, error) { + type noMethod Application + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AutomaticScaling: Automatic scaling is based on request rate, +// response latencies, and other application metrics. +type AutomaticScaling struct { + // CoolDownPeriod: Amount of time that the Autoscaler + // (https://cloud.google.com/compute/docs/autoscaler/) should wait + // between changes to the number of virtual machines. Only applicable + // for VM runtimes. + CoolDownPeriod string `json:"coolDownPeriod,omitempty"` + + // CpuUtilization: Target scaling by CPU usage. + CpuUtilization *CpuUtilization `json:"cpuUtilization,omitempty"` + + // DiskUtilization: Target scaling by disk usage. + DiskUtilization *DiskUtilization `json:"diskUtilization,omitempty"` + + // MaxConcurrentRequests: Number of concurrent requests an automatic + // scaling instance can accept before the scheduler spawns a new + // instance.Defaults to a runtime-specific value. + MaxConcurrentRequests int64 `json:"maxConcurrentRequests,omitempty"` + + // MaxIdleInstances: Maximum number of idle instances that should be + // maintained for this version. + MaxIdleInstances int64 `json:"maxIdleInstances,omitempty"` + + // MaxPendingLatency: Maximum amount of time that a request should wait + // in the pending queue before starting a new instance to handle it. + MaxPendingLatency string `json:"maxPendingLatency,omitempty"` + + // MaxTotalInstances: Maximum number of instances that should be started + // to handle requests. + MaxTotalInstances int64 `json:"maxTotalInstances,omitempty"` + + // MinIdleInstances: Minimum number of idle instances that should be + // maintained for this version. Only applicable for the default version + // of a service. + MinIdleInstances int64 `json:"minIdleInstances,omitempty"` + + // MinPendingLatency: Minimum amount of time a request should wait in + // the pending queue before starting a new instance to handle it. + MinPendingLatency string `json:"minPendingLatency,omitempty"` + + // MinTotalInstances: Minimum number of instances that should be + // maintained for this version. + MinTotalInstances int64 `json:"minTotalInstances,omitempty"` + + // NetworkUtilization: Target scaling by network usage. + NetworkUtilization *NetworkUtilization `json:"networkUtilization,omitempty"` + + // RequestUtilization: Target scaling by request utilization. + RequestUtilization *RequestUtilization `json:"requestUtilization,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CoolDownPeriod") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CoolDownPeriod") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AutomaticScaling) MarshalJSON() ([]byte, error) { + type noMethod AutomaticScaling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BasicScaling: A service with basic scaling will create an instance +// when the application receives a request. The instance will be turned +// down when the app becomes idle. Basic scaling is ideal for work that +// is intermittent or driven by user activity. +type BasicScaling struct { + // IdleTimeout: Duration of time after the last request that an instance + // must wait before the instance is shut down. + IdleTimeout string `json:"idleTimeout,omitempty"` + + // MaxInstances: Maximum number of instances to create for this version. + MaxInstances int64 `json:"maxInstances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IdleTimeout") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IdleTimeout") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BasicScaling) MarshalJSON() ([]byte, error) { + type noMethod BasicScaling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ContainerInfo: Docker image that is used to create a container and +// start a VM instance for the version that you deploy. Only applicable +// for instances running in the App Engine flexible environment. +type ContainerInfo struct { + // Image: URI to the hosted container image in Google Container + // Registry. The URI must be fully qualified and include a tag or + // digest. Examples: "gcr.io/my-project/image:tag" or + // "gcr.io/my-project/image@digest" + Image string `json:"image,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Image") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Image") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ContainerInfo) MarshalJSON() ([]byte, error) { + type noMethod ContainerInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CpuUtilization: Target scaling by CPU usage. +type CpuUtilization struct { + // AggregationWindowLength: Period of time over which CPU utilization is + // calculated. + AggregationWindowLength string `json:"aggregationWindowLength,omitempty"` + + // TargetUtilization: Target CPU utilization ratio to maintain when + // scaling. Must be between 0 and 1. + TargetUtilization float64 `json:"targetUtilization,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AggregationWindowLength") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AggregationWindowLength") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CpuUtilization) MarshalJSON() ([]byte, error) { + type noMethod CpuUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *CpuUtilization) UnmarshalJSON(data []byte) error { + type noMethod CpuUtilization + var s1 struct { + TargetUtilization gensupport.JSONFloat64 `json:"targetUtilization"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.TargetUtilization = float64(s1.TargetUtilization) + return nil +} + +// DebugInstanceRequest: Request message for Instances.DebugInstance. +type DebugInstanceRequest struct { + // SshKey: Public SSH key to add to the instance. + // Examples: + // [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] + // [USERNAME]:ssh-rsa [KEY_VALUE] google-ssh + // {"userName":"[USERNAME]","expireOn":"[EXPIRE_TIME]"}For more + // information, see Adding and Removing SSH Keys + // (https://cloud.google.com/compute/docs/instances/adding-removing-ssh-k + // eys). + SshKey string `json:"sshKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SshKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SshKey") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DebugInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod DebugInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Deployment: Code and application artifacts used to deploy a version +// to App Engine. +type Deployment struct { + // Container: The Docker image for the container that runs the version. + // Only applicable for instances running in the App Engine flexible + // environment. + Container *ContainerInfo `json:"container,omitempty"` + + // Files: Manifest of the files stored in Google Cloud Storage that are + // included as part of this version. All files must be readable using + // the credentials supplied with this call. + Files map[string]FileInfo `json:"files,omitempty"` + + // Zip: The zip file for this deployment, if this is a zip deployment. + Zip *ZipInfo `json:"zip,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Container") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Container") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Deployment) MarshalJSON() ([]byte, error) { + type noMethod Deployment + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskUtilization: Target scaling by disk usage. Only applicable for VM +// runtimes. +type DiskUtilization struct { + // TargetReadBytesPerSecond: Target bytes read per second. + TargetReadBytesPerSecond int64 `json:"targetReadBytesPerSecond,omitempty"` + + // TargetReadOpsPerSecond: Target ops read per seconds. + TargetReadOpsPerSecond int64 `json:"targetReadOpsPerSecond,omitempty"` + + // TargetWriteBytesPerSecond: Target bytes written per second. + TargetWriteBytesPerSecond int64 `json:"targetWriteBytesPerSecond,omitempty"` + + // TargetWriteOpsPerSecond: Target ops written per second. + TargetWriteOpsPerSecond int64 `json:"targetWriteOpsPerSecond,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "TargetReadBytesPerSecond") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetReadBytesPerSecond") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskUtilization) MarshalJSON() ([]byte, error) { + type noMethod DiskUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// EndpointsApiService: Cloud Endpoints +// (https://cloud.google.com/endpoints) configuration. The Endpoints API +// Service provides tooling for serving Open API and gRPC endpoints via +// an NGINX proxy.The fields here refer to the name and configuration id +// of a "service" resource in the Service Management API +// (https://cloud.google.com/service-management/overview). +type EndpointsApiService struct { + // ConfigId: Endpoints service configuration id as specified by the + // Service Management API. For example "2016-09-19r1" + ConfigId string `json:"configId,omitempty"` + + // Name: Endpoints service name which is the name of the "service" + // resource in the Service Management API. For example + // "myapi.endpoints.myproject.cloud.goog" + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConfigId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConfigId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *EndpointsApiService) MarshalJSON() ([]byte, error) { + type noMethod EndpointsApiService + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ErrorHandler: Custom static error page to be served when an error +// occurs. +type ErrorHandler struct { + // ErrorCode: Error condition this handler applies to. + // + // Possible values: + // "ERROR_CODE_UNSPECIFIED" - Not specified. ERROR_CODE_DEFAULT is + // assumed. + // "ERROR_CODE_DEFAULT" - All other error types. + // "ERROR_CODE_OVER_QUOTA" - Application has exceeded a resource + // quota. + // "ERROR_CODE_DOS_API_DENIAL" - Client blocked by the application's + // Denial of Service protection configuration. + // "ERROR_CODE_TIMEOUT" - Deadline reached before the application + // responds. + ErrorCode string `json:"errorCode,omitempty"` + + // MimeType: MIME type of file. Defaults to text/html. + MimeType string `json:"mimeType,omitempty"` + + // StaticFile: Static file content to be served for this error. + StaticFile string `json:"staticFile,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorCode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorCode") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ErrorHandler) MarshalJSON() ([]byte, error) { + type noMethod ErrorHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FileInfo: Single source file that is part of the version to be +// deployed. Each source file that is deployed must be specified +// separately. +type FileInfo struct { + // MimeType: The MIME type of the file.Defaults to the value from Google + // Cloud Storage. + MimeType string `json:"mimeType,omitempty"` + + // Sha1Sum: The SHA1 hash of the file, in hex. + Sha1Sum string `json:"sha1Sum,omitempty"` + + // SourceUrl: URL source to use to fetch this file. Must be a URL to a + // resource in Google Cloud Storage in the form + // 'http(s)://storage.googleapis.com//'. + SourceUrl string `json:"sourceUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MimeType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MimeType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FileInfo) MarshalJSON() ([]byte, error) { + type noMethod FileInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthCheck: Health checking configuration for VM instances. +// Unhealthy instances are killed and replaced with new instances. Only +// applicable for instances in App Engine flexible environment. +type HealthCheck struct { + // CheckInterval: Interval between health checks. + CheckInterval string `json:"checkInterval,omitempty"` + + // DisableHealthCheck: Whether to explicitly disable health checks for + // this instance. + DisableHealthCheck bool `json:"disableHealthCheck,omitempty"` + + // HealthyThreshold: Number of consecutive successful health checks + // required before receiving traffic. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: Host header to send when performing an HTTP health check. + // Example: "myapp.appspot.com" + Host string `json:"host,omitempty"` + + // RestartThreshold: Number of consecutive failed health checks required + // before an instance is restarted. + RestartThreshold int64 `json:"restartThreshold,omitempty"` + + // Timeout: Time before the health check is considered failed. + Timeout string `json:"timeout,omitempty"` + + // UnhealthyThreshold: Number of consecutive failed health checks + // required before removing traffic. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CheckInterval") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckInterval") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// IdentityAwareProxy: Identity-Aware Proxy +type IdentityAwareProxy struct { + // Enabled: Whether the serving infrastructure will authenticate and + // authorize all incoming requests.If true, the oauth2_client_id and + // oauth2_client_secret fields must be non-empty. + Enabled bool `json:"enabled,omitempty"` + + // Oauth2ClientId: OAuth2 client ID to use for the authentication flow. + Oauth2ClientId string `json:"oauth2ClientId,omitempty"` + + // Oauth2ClientSecret: OAuth2 client secret to use for the + // authentication flow.For security reasons, this value cannot be + // retrieved via the API. Instead, the SHA-256 hash of the value is + // returned in the oauth2_client_secret_sha256 field.@InputOnly + Oauth2ClientSecret string `json:"oauth2ClientSecret,omitempty"` + + // Oauth2ClientSecretSha256: Hex-encoded SHA-256 hash of the client + // secret.@OutputOnly + Oauth2ClientSecretSha256 string `json:"oauth2ClientSecretSha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *IdentityAwareProxy) MarshalJSON() ([]byte, error) { + type noMethod IdentityAwareProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Instance: An Instance resource is the computing unit that App Engine +// uses to automatically scale an application. +type Instance struct { + // AppEngineRelease: App Engine release this instance is running + // on.@OutputOnly + AppEngineRelease string `json:"appEngineRelease,omitempty"` + + // Availability: Availability of the instance.@OutputOnly + // + // Possible values: + // "UNSPECIFIED" + // "RESIDENT" + // "DYNAMIC" + Availability string `json:"availability,omitempty"` + + // AverageLatency: Average latency (ms) over the last minute.@OutputOnly + AverageLatency int64 `json:"averageLatency,omitempty"` + + // Errors: Number of errors since this instance was started.@OutputOnly + Errors int64 `json:"errors,omitempty"` + + // Id: Relative name of the instance within the version. Example: + // instance-1.@OutputOnly + Id string `json:"id,omitempty"` + + // MemoryUsage: Total memory in use (bytes).@OutputOnly + MemoryUsage int64 `json:"memoryUsage,omitempty,string"` + + // Name: Full path to the Instance resource in the API. Example: + // apps/myapp/services/default/versions/v1/instances/instance-1.@OutputOn + // ly + Name string `json:"name,omitempty"` + + // Qps: Average queries per second (QPS) over the last + // minute.@OutputOnly + Qps float64 `json:"qps,omitempty"` + + // Requests: Number of requests since this instance was + // started.@OutputOnly + Requests int64 `json:"requests,omitempty"` + + // StartTime: Time that this instance was started.@OutputOnly + StartTime string `json:"startTime,omitempty"` + + // VmDebugEnabled: Whether this instance is in debug mode. Only + // applicable for instances in App Engine flexible + // environment.@OutputOnly + VmDebugEnabled bool `json:"vmDebugEnabled,omitempty"` + + // VmId: Virtual machine ID of this instance. Only applicable for + // instances in App Engine flexible environment.@OutputOnly + VmId string `json:"vmId,omitempty"` + + // VmIp: The IP address of this instance. Only applicable for instances + // in App Engine flexible environment.@OutputOnly + VmIp string `json:"vmIp,omitempty"` + + // VmName: Name of the virtual machine where this instance lives. Only + // applicable for instances in App Engine flexible + // environment.@OutputOnly + VmName string `json:"vmName,omitempty"` + + // VmStatus: Status of the virtual machine where this instance lives. + // Only applicable for instances in App Engine flexible + // environment.@OutputOnly + VmStatus string `json:"vmStatus,omitempty"` + + // VmZoneName: Zone where the virtual machine is located. Only + // applicable for instances in App Engine flexible + // environment.@OutputOnly + VmZoneName string `json:"vmZoneName,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AppEngineRelease") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AppEngineRelease") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Instance) MarshalJSON() ([]byte, error) { + type noMethod Instance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Instance) UnmarshalJSON(data []byte) error { + type noMethod Instance + var s1 struct { + Qps gensupport.JSONFloat64 `json:"qps"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Qps = float64(s1.Qps) + return nil +} + +// Library: Third-party Python runtime library that is required by the +// application. +type Library struct { + // Name: Name of the library. Example: "django". + Name string `json:"name,omitempty"` + + // Version: Version of the library to select, or "latest". + Version string `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Library) MarshalJSON() ([]byte, error) { + type noMethod Library + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListInstancesResponse: Response message for Instances.ListInstances. +type ListInstancesResponse struct { + // Instances: The instances belonging to the requested version. + Instances []*Instance `json:"instances,omitempty"` + + // NextPageToken: Continuation token for fetching the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListInstancesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListLocationsResponse: The response message for +// Locations.ListLocations. +type ListLocationsResponse struct { + // Locations: A list of locations that matches the specified filter in + // the request. + Locations []*Location `json:"locations,omitempty"` + + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Locations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListLocationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListOperationsResponse: The response message for +// Operations.ListOperations. +type ListOperationsResponse struct { + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Operations: A list of operations that matches the specified filter in + // the request. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListOperationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListServicesResponse: Response message for Services.ListServices. +type ListServicesResponse struct { + // NextPageToken: Continuation token for fetching the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Services: The services belonging to the requested application. + Services []*Service `json:"services,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListServicesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListVersionsResponse: Response message for Versions.ListVersions. +type ListVersionsResponse struct { + // NextPageToken: Continuation token for fetching the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Versions: The versions belonging to the requested service. + Versions []*Version `json:"versions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListVersionsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListVersionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LivenessCheck: Health checking configuration for VM instances. +// Unhealthy instances are killed and replaced with new instances. +type LivenessCheck struct { + // CheckInterval: Interval between health checks. + CheckInterval string `json:"checkInterval,omitempty"` + + // HealthyThreshold: Number of consecutive successful checks required + // before considering the VM healthy. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: Host header to send when performing a HTTP Liveness check. + // Example: "myapp.appspot.com" + Host string `json:"host,omitempty"` + + // InitialDelay: The initial delay before starting to execute the + // checks. + InitialDelay string `json:"initialDelay,omitempty"` + + // Path: The request path. + Path string `json:"path,omitempty"` + + // Timeout: Time before the check is considered failed. + Timeout string `json:"timeout,omitempty"` + + // UnhealthyThreshold: Number of consecutive failed checks required + // before considering the VM unhealthy. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CheckInterval") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckInterval") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LivenessCheck) MarshalJSON() ([]byte, error) { + type noMethod LivenessCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Location: A resource that represents Google Cloud Platform location. +type Location struct { + // Labels: Cross-service attributes for the location. For + // example + // {"cloud.googleapis.com/region": "us-east1"} + // + Labels map[string]string `json:"labels,omitempty"` + + // LocationId: The canonical id for this location. For example: + // "us-east1". + LocationId string `json:"locationId,omitempty"` + + // Metadata: Service-specific metadata. For example the available + // capacity at the given location. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: Resource name for the location, which may vary between + // implementations. For example: + // "projects/example-project/locations/us-east1" + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Labels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Labels") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Location) MarshalJSON() ([]byte, error) { + type noMethod Location + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LocationMetadata: Metadata for the given +// google.cloud.location.Location. +type LocationMetadata struct { + // FlexibleEnvironmentAvailable: App Engine Flexible Environment is + // available in the given location.@OutputOnly + FlexibleEnvironmentAvailable bool `json:"flexibleEnvironmentAvailable,omitempty"` + + // StandardEnvironmentAvailable: App Engine Standard Environment is + // available in the given location.@OutputOnly + StandardEnvironmentAvailable bool `json:"standardEnvironmentAvailable,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "FlexibleEnvironmentAvailable") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "FlexibleEnvironmentAvailable") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocationMetadata) MarshalJSON() ([]byte, error) { + type noMethod LocationMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ManualScaling: A service with manual scaling runs continuously, +// allowing you to perform complex initialization and rely on the state +// of its memory over time. +type ManualScaling struct { + // Instances: Number of instances to assign to the service at the start. + // This number can later be altered by using the Modules API + // (https://cloud.google.com/appengine/docs/python/modules/functions) + // set_num_instances() function. + Instances int64 `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManualScaling) MarshalJSON() ([]byte, error) { + type noMethod ManualScaling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Network: Extra network settings. Only applicable for VM runtimes. +type Network struct { + // ForwardedPorts: List of ports, or port pairs, to forward from the + // virtual machine to the application container. + ForwardedPorts []string `json:"forwardedPorts,omitempty"` + + // InstanceTag: Tag to apply to the VM instance during creation. + InstanceTag string `json:"instanceTag,omitempty"` + + // Name: Google Cloud Platform network where the virtual machines are + // created. Specify the short name, not the resource path.Defaults to + // default. + Name string `json:"name,omitempty"` + + // SubnetworkName: Google Cloud Platform sub-network where the virtual + // machines are created. Specify the short name, not the resource + // path.If a subnetwork name is specified, a network name will also be + // required unless it is for the default network. + // If the network the VM instance is being created in is a Legacy + // network, then the IP address is allocated from the IPv4Range. + // If the network the VM instance is being created in is an auto Subnet + // Mode Network, then only network name should be specified (not the + // subnetwork_name) and the IP address is created from the IPCidrRange + // of the subnetwork that exists in that zone for that network. + // If the network the VM instance is being created in is a custom Subnet + // Mode Network, then the subnetwork_name must be specified and the IP + // address is created from the IPCidrRange of the subnetwork.If + // specified, the subnetwork must exist in the same region as the Flex + // app. + SubnetworkName string `json:"subnetworkName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForwardedPorts") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ForwardedPorts") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Network) MarshalJSON() ([]byte, error) { + type noMethod Network + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkUtilization: Target scaling by network usage. Only applicable +// for VM runtimes. +type NetworkUtilization struct { + // TargetReceivedBytesPerSecond: Target bytes received per second. + TargetReceivedBytesPerSecond int64 `json:"targetReceivedBytesPerSecond,omitempty"` + + // TargetReceivedPacketsPerSecond: Target packets received per second. + TargetReceivedPacketsPerSecond int64 `json:"targetReceivedPacketsPerSecond,omitempty"` + + // TargetSentBytesPerSecond: Target bytes sent per second. + TargetSentBytesPerSecond int64 `json:"targetSentBytesPerSecond,omitempty"` + + // TargetSentPacketsPerSecond: Target packets sent per second. + TargetSentPacketsPerSecond int64 `json:"targetSentPacketsPerSecond,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "TargetReceivedBytesPerSecond") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "TargetReceivedBytesPerSecond") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkUtilization) MarshalJSON() ([]byte, error) { + type noMethod NetworkUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Operation: This resource represents a long-running operation that is +// the result of a network API call. +type Operation struct { + // Done: If the value is false, it means the operation is still in + // progress. If true, the operation is completed, and either error or + // response is available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *Status `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that originally returns it. If you use the default HTTP + // mapping, the name should have the format of + // operations/some/unique/name. + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as Delete, the + // response is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other + // methods, the response should have the type XxxResponse, where Xxx is + // the original method name. For example, if the original method name is + // TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadata: Metadata for the given +// google.longrunning.Operation. +type OperationMetadata struct { + // EndTime: Timestamp that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // InsertTime: Timestamp that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1beta4.Version.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // OperationType: Type of this operation. Deprecated, use method field + // instead. Example: "create_version".@OutputOnly + OperationType string `json:"operationType,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/modules/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadata) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataExperimental: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataExperimental struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.experimental.CustomDomains.CreateCustomDomain.@Output + // Only + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/customDomains/example.com.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataExperimental) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataExperimental + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1 struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // EphemeralMessage: Ephemeral message that may change every time the + // operation is polled. @OutputOnly + EphemeralMessage string `json:"ephemeralMessage,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1.Versions.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // Warning: Durable messages that persist on every operation poll. + // @OutputOnly + Warning []string `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1 + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1Alpha: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1Alpha struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // EphemeralMessage: Ephemeral message that may change every time the + // operation is polled. @OutputOnly + EphemeralMessage string `json:"ephemeralMessage,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1alpha.Versions.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // Warning: Durable messages that persist on every operation poll. + // @OutputOnly + Warning []string `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1Alpha) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1Alpha + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1Beta: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1Beta struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // EphemeralMessage: Ephemeral message that may change every time the + // operation is polled. @OutputOnly + EphemeralMessage string `json:"ephemeralMessage,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1beta.Versions.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // Warning: Durable messages that persist on every operation poll. + // @OutputOnly + Warning []string `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1Beta) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1Beta + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1Beta5: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1Beta5 struct { + // EndTime: Timestamp that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // InsertTime: Timestamp that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method name that initiated this operation. Example: + // google.appengine.v1beta5.Version.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1Beta5) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1Beta5 + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReadinessCheck: Readiness checking configuration for VM instances. +// Unhealthy instances are removed from traffic rotation. +type ReadinessCheck struct { + // CheckInterval: Interval between health checks. + CheckInterval string `json:"checkInterval,omitempty"` + + // HealthyThreshold: Number of consecutive successful checks required + // before receiving traffic. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: Host header to send when performing a HTTP Readiness check. + // Example: "myapp.appspot.com" + Host string `json:"host,omitempty"` + + // Path: The request path. + Path string `json:"path,omitempty"` + + // Timeout: Time before the check is considered failed. + Timeout string `json:"timeout,omitempty"` + + // UnhealthyThreshold: Number of consecutive failed checks required + // before removing traffic. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CheckInterval") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckInterval") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReadinessCheck) MarshalJSON() ([]byte, error) { + type noMethod ReadinessCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RepairApplicationRequest: Request message for +// 'Applications.RepairApplication'. +type RepairApplicationRequest struct { +} + +// RequestUtilization: Target scaling by request utilization. Only +// applicable for VM runtimes. +type RequestUtilization struct { + // TargetConcurrentRequests: Target number of concurrent requests. + TargetConcurrentRequests int64 `json:"targetConcurrentRequests,omitempty"` + + // TargetRequestCountPerSecond: Target requests per second. + TargetRequestCountPerSecond int64 `json:"targetRequestCountPerSecond,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "TargetConcurrentRequests") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetConcurrentRequests") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RequestUtilization) MarshalJSON() ([]byte, error) { + type noMethod RequestUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Resources: Machine resources for a version. +type Resources struct { + // Cpu: Number of CPU cores needed. + Cpu float64 `json:"cpu,omitempty"` + + // DiskGb: Disk size (GB) needed. + DiskGb float64 `json:"diskGb,omitempty"` + + // MemoryGb: Memory (GB) needed. + MemoryGb float64 `json:"memoryGb,omitempty"` + + // Volumes: User specified volumes. + Volumes []*Volume `json:"volumes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Cpu") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Cpu") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Resources) MarshalJSON() ([]byte, error) { + type noMethod Resources + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Resources) UnmarshalJSON(data []byte) error { + type noMethod Resources + var s1 struct { + Cpu gensupport.JSONFloat64 `json:"cpu"` + DiskGb gensupport.JSONFloat64 `json:"diskGb"` + MemoryGb gensupport.JSONFloat64 `json:"memoryGb"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Cpu = float64(s1.Cpu) + s.DiskGb = float64(s1.DiskGb) + s.MemoryGb = float64(s1.MemoryGb) + return nil +} + +// ScriptHandler: Executes a script to handle the request that matches +// the URL pattern. +type ScriptHandler struct { + // ScriptPath: Path to the script from the application root directory. + ScriptPath string `json:"scriptPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ScriptPath") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ScriptPath") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ScriptHandler) MarshalJSON() ([]byte, error) { + type noMethod ScriptHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Service: A Service resource is a logical component of an application +// that can share state and communicate in a secure fashion with other +// services. For example, an application that handles customer requests +// might include separate services to handle tasks such as backend data +// analysis or API requests from mobile devices. Each service has a +// collection of versions that define a specific set of code used to +// implement the functionality of that service. +type Service struct { + // Id: Relative name of the service within the application. Example: + // default.@OutputOnly + Id string `json:"id,omitempty"` + + // Name: Full path to the Service resource in the API. Example: + // apps/myapp/services/default.@OutputOnly + Name string `json:"name,omitempty"` + + // Split: Mapping that defines fractional HTTP traffic diversion to + // different versions within the service. + Split *TrafficSplit `json:"split,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Service) MarshalJSON() ([]byte, error) { + type noMethod Service + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StaticFilesHandler: Files served directly to the user for a given +// URL, such as images, CSS stylesheets, or JavaScript source files. +// Static file handlers describe which files in the application +// directory are static files, and which URLs serve them. +type StaticFilesHandler struct { + // ApplicationReadable: Whether files should also be uploaded as code + // data. By default, files declared in static file handlers are uploaded + // as static data and are only served to end users; they cannot be read + // by the application. If enabled, uploads are charged against both your + // code and static data storage resource quotas. + ApplicationReadable bool `json:"applicationReadable,omitempty"` + + // Expiration: Time a static file served by this handler should be + // cached by web proxies and browsers. + Expiration string `json:"expiration,omitempty"` + + // HttpHeaders: HTTP headers to use for all responses from these URLs. + HttpHeaders map[string]string `json:"httpHeaders,omitempty"` + + // MimeType: MIME type used to serve all files served by this + // handler.Defaults to file-specific MIME types, which are derived from + // each file's filename extension. + MimeType string `json:"mimeType,omitempty"` + + // Path: Path to the static files matched by the URL pattern, from the + // application root directory. The path can refer to text matched in + // groupings in the URL pattern. + Path string `json:"path,omitempty"` + + // RequireMatchingFile: Whether this handler should match the request if + // the file referenced by the handler does not exist. + RequireMatchingFile bool `json:"requireMatchingFile,omitempty"` + + // UploadPathRegex: Regular expression that matches the file paths for + // all files that should be referenced by this handler. + UploadPathRegex string `json:"uploadPathRegex,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ApplicationReadable") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApplicationReadable") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *StaticFilesHandler) MarshalJSON() ([]byte, error) { + type noMethod StaticFilesHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The Status type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by gRPC (https://github.com/grpc). The error +// model is designed to be: +// Simple to use and understand for most users +// Flexible enough to meet unexpected needsOverviewThe Status message +// contains three pieces of data: error code, error message, and error +// details. The error code should be an enum value of google.rpc.Code, +// but it may accept additional error codes if needed. The error message +// should be a developer-facing English message that helps developers +// understand and resolve the error. If a localized user-facing error +// message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of +// error detail types in the package google.rpc which can be used for +// common error conditions.Language mappingThe Status message is the +// logical representation of the error model, but it is not necessarily +// the actual wire format. When the Status message is exposed in +// different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions in Java, but more likely mapped to some error codes in +// C.Other usesThe error model and the Status message can be used in a +// variety of environments, either with or without APIs, to provide a +// consistent developer experience across different environments.Example +// uses of this error model include: +// Partial errors. If a service needs to return partial errors to the +// client, it may embed the Status in the normal response to indicate +// the partial errors. +// Workflow errors. A typical workflow has multiple steps. Each step may +// have a Status message for error reporting purpose. +// Batch operations. If a client uses batch request and batch response, +// the Status message should be used directly inside batch response, one +// for each error sub-response. +// Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the Status message. +// Logging. If some API errors are stored in logs, the message Status +// could be used directly after any stripping needed for +// security/privacy reasons. +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There will + // be a common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type noMethod Status + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TrafficSplit: Traffic routing configuration for versions within a +// single service. Traffic splits define how traffic directed to the +// service is assigned to versions. +type TrafficSplit struct { + // Allocations: Mapping from version IDs within the service to + // fractional (0.000, 1] allocations of traffic for that version. Each + // version can be specified only once, but some versions in the service + // may not have any traffic allocation. Services that have traffic + // allocated cannot be deleted until either the service is deleted or + // their traffic allocation is removed. Allocations must sum to 1. Up to + // two decimal place precision is supported for IP-based splits and up + // to three decimal places is supported for cookie-based splits. + Allocations map[string]float64 `json:"allocations,omitempty"` + + // ShardBy: Mechanism used to determine which version a request is sent + // to. The traffic selection algorithm will be stable for either type + // until allocations are changed. + // + // Possible values: + // "UNSPECIFIED" - Diversion method unspecified. + // "COOKIE" - Diversion based on a specially named cookie, + // "GOOGAPPUID." The cookie must be set by the application itself or no + // diversion will occur. + // "IP" - Diversion based on applying the modulus operation to a + // fingerprint of the IP address. + ShardBy string `json:"shardBy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Allocations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Allocations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TrafficSplit) MarshalJSON() ([]byte, error) { + type noMethod TrafficSplit + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlDispatchRule: Rules to match an HTTP request and dispatch that +// request to a service. +type UrlDispatchRule struct { + // Domain: Domain name to match against. The wildcard "*" is supported + // if specified before a period: "*.".Defaults to matching all domains: + // "*". + Domain string `json:"domain,omitempty"` + + // Path: Pathname within the host. Must start with a "/". A single "*" + // can be included at the end of the path. The sum of the lengths of the + // domain and path may not exceed 100 characters. + Path string `json:"path,omitempty"` + + // Service: Resource ID of a service in this application that should + // serve the matched request. The service must already exist. Example: + // default. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Domain") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Domain") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlDispatchRule) MarshalJSON() ([]byte, error) { + type noMethod UrlDispatchRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMap: URL pattern and description of how the URL should be handled. +// App Engine can handle URLs by executing application code or by +// serving static files uploaded with the version, such as images, CSS, +// or JavaScript. +type UrlMap struct { + // ApiEndpoint: Uses API Endpoints to handle requests. + ApiEndpoint *ApiEndpointHandler `json:"apiEndpoint,omitempty"` + + // AuthFailAction: Action to take when users access resources that + // require authentication. Defaults to redirect. + // + // Possible values: + // "AUTH_FAIL_ACTION_UNSPECIFIED" - Not specified. + // AUTH_FAIL_ACTION_REDIRECT is assumed. + // "AUTH_FAIL_ACTION_REDIRECT" - Redirects user to + // "accounts.google.com". The user is redirected back to the application + // URL after signing in or creating an account. + // "AUTH_FAIL_ACTION_UNAUTHORIZED" - Rejects request with a 401 HTTP + // status code and an error message. + AuthFailAction string `json:"authFailAction,omitempty"` + + // Login: Level of login required to access this resource. + // + // Possible values: + // "LOGIN_UNSPECIFIED" - Not specified. LOGIN_OPTIONAL is assumed. + // "LOGIN_OPTIONAL" - Does not require that the user is signed in. + // "LOGIN_ADMIN" - If the user is not signed in, the auth_fail_action + // is taken. In addition, if the user is not an administrator for the + // application, they are given an error message regardless of + // auth_fail_action. If the user is an administrator, the handler + // proceeds. + // "LOGIN_REQUIRED" - If the user has signed in, the handler proceeds + // normally. Otherwise, the auth_fail_action is taken. + Login string `json:"login,omitempty"` + + // RedirectHttpResponseCode: 30x code to use when performing redirects + // for the secure field. Defaults to 302. + // + // Possible values: + // "REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED" - Not specified. 302 is + // assumed. + // "REDIRECT_HTTP_RESPONSE_CODE_301" - 301 Moved Permanently code. + // "REDIRECT_HTTP_RESPONSE_CODE_302" - 302 Moved Temporarily code. + // "REDIRECT_HTTP_RESPONSE_CODE_303" - 303 See Other code. + // "REDIRECT_HTTP_RESPONSE_CODE_307" - 307 Temporary Redirect code. + RedirectHttpResponseCode string `json:"redirectHttpResponseCode,omitempty"` + + // Script: Executes a script to handle the request that matches this URL + // pattern. + Script *ScriptHandler `json:"script,omitempty"` + + // SecurityLevel: Security (HTTPS) enforcement for this URL. + // + // Possible values: + // "SECURE_UNSPECIFIED" - Not specified. + // "SECURE_DEFAULT" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used, and respond + // accordingly. + // "SECURE_NEVER" - Requests for a URL that match this handler that + // use HTTPS are automatically redirected to the HTTP equivalent URL. + // "SECURE_OPTIONAL" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used and respond + // accordingly. + // "SECURE_ALWAYS" - Requests for a URL that match this handler that + // do not use HTTPS are automatically redirected to the HTTPS URL with + // the same path. Query parameters are reserved for the redirect. + SecurityLevel string `json:"securityLevel,omitempty"` + + // StaticFiles: Returns the contents of a file, such as an image, as the + // response. + StaticFiles *StaticFilesHandler `json:"staticFiles,omitempty"` + + // UrlRegex: URL prefix. Uses regular expression syntax, which means + // regexp special characters must be escaped, but should not contain + // groupings. All URLs that begin with this prefix are handled by this + // handler, using the portion of the URL after the prefix as part of the + // file path. + UrlRegex string `json:"urlRegex,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ApiEndpoint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApiEndpoint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type noMethod UrlMap + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Version: A Version resource is a specific set of source code and +// configuration files that are deployed into a service. +type Version struct { + // ApiConfig: Serving configuration for Google Cloud Endpoints + // (https://cloud.google.com/appengine/docs/python/endpoints/).Only + // returned in GET requests if view=FULL is set. + ApiConfig *ApiConfigHandler `json:"apiConfig,omitempty"` + + // AutomaticScaling: Automatic scaling is based on request rate, + // response latencies, and other application metrics. + AutomaticScaling *AutomaticScaling `json:"automaticScaling,omitempty"` + + // BasicScaling: A service with basic scaling will create an instance + // when the application receives a request. The instance will be turned + // down when the app becomes idle. Basic scaling is ideal for work that + // is intermittent or driven by user activity. + BasicScaling *BasicScaling `json:"basicScaling,omitempty"` + + // BetaSettings: Metadata settings that are supplied to this version to + // enable beta runtime features. + BetaSettings map[string]string `json:"betaSettings,omitempty"` + + // CreateTime: Time that this version was created.@OutputOnly + CreateTime string `json:"createTime,omitempty"` + + // CreatedBy: Email address of the user who created this + // version.@OutputOnly + CreatedBy string `json:"createdBy,omitempty"` + + // DefaultExpiration: Duration that static files should be cached by web + // proxies and browsers. Only applicable if the corresponding + // StaticFilesHandler + // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al + // pha/apps.services.versions#staticfileshandler) does not specify its + // own expiration time.Only returned in GET requests if view=FULL is + // set. + DefaultExpiration string `json:"defaultExpiration,omitempty"` + + // Deployment: Code and application artifacts that make up this + // version.Only returned in GET requests if view=FULL is set. + Deployment *Deployment `json:"deployment,omitempty"` + + // DiskUsageBytes: Total size in bytes of all the files that are + // included in this version and curerntly hosted on the App Engine + // disk.@OutputOnly + DiskUsageBytes int64 `json:"diskUsageBytes,omitempty,string"` + + // EndpointsApiService: Cloud Endpoints configuration.If + // endpoints_api_service is set, the Cloud Endpoints Extensible Service + // Proxy will be provided to serve the API implemented by the app. + EndpointsApiService *EndpointsApiService `json:"endpointsApiService,omitempty"` + + // Env: App Engine execution environment for this version.Defaults to + // standard. + Env string `json:"env,omitempty"` + + // EnvVariables: Environment variables available to the application.Only + // returned in GET requests if view=FULL is set. + EnvVariables map[string]string `json:"envVariables,omitempty"` + + // ErrorHandlers: Custom static error pages. Limited to 10KB per + // page.Only returned in GET requests if view=FULL is set. + ErrorHandlers []*ErrorHandler `json:"errorHandlers,omitempty"` + + // Handlers: An ordered list of URL-matching patterns that should be + // applied to incoming requests. The first matching URL handles the + // request and other request handlers are not attempted.Only returned in + // GET requests if view=FULL is set. + Handlers []*UrlMap `json:"handlers,omitempty"` + + // HealthCheck: Configures health checking for VM instances. Unhealthy + // instances are stopped and replaced with new instances. Only + // applicable for VM runtimes.Only returned in GET requests if view=FULL + // is set. + HealthCheck *HealthCheck `json:"healthCheck,omitempty"` + + // Id: Relative name of the version within the service. Example: v1. + // Version names can contain only lowercase letters, numbers, or + // hyphens. Reserved names: "default", "latest", and any name with the + // prefix "ah-". + Id string `json:"id,omitempty"` + + // InboundServices: Before an application can receive email or XMPP + // messages, the application must be configured to enable the service. + // + // Possible values: + // "INBOUND_SERVICE_UNSPECIFIED" - Not specified. + // "INBOUND_SERVICE_MAIL" - Allows an application to receive mail. + // "INBOUND_SERVICE_MAIL_BOUNCE" - Allows an application to receive + // email-bound notifications. + // "INBOUND_SERVICE_XMPP_ERROR" - Allows an application to receive + // error stanzas. + // "INBOUND_SERVICE_XMPP_MESSAGE" - Allows an application to receive + // instant messages. + // "INBOUND_SERVICE_XMPP_SUBSCRIBE" - Allows an application to receive + // user subscription POSTs. + // "INBOUND_SERVICE_XMPP_PRESENCE" - Allows an application to receive + // a user's chat presence. + // "INBOUND_SERVICE_CHANNEL_PRESENCE" - Registers an application for + // notifications when a client connects or disconnects from a channel. + // "INBOUND_SERVICE_WARMUP" - Enables warmup requests. + InboundServices []string `json:"inboundServices,omitempty"` + + // InstanceClass: Instance class that is used to run this version. Valid + // values are: + // AutomaticScaling: F1, F2, F4, F4_1G + // ManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 + // for AutomaticScaling and B1 for ManualScaling or BasicScaling. + InstanceClass string `json:"instanceClass,omitempty"` + + // Libraries: Configuration for third-party Python runtime libraries + // that are required by the application.Only returned in GET requests if + // view=FULL is set. + Libraries []*Library `json:"libraries,omitempty"` + + // LivenessCheck: Configures liveness health checking for VM instances. + // Unhealthy instances are stopped and replaced with new instancesOnly + // returned in GET requests if view=FULL is set. + LivenessCheck *LivenessCheck `json:"livenessCheck,omitempty"` + + // ManualScaling: A service with manual scaling runs continuously, + // allowing you to perform complex initialization and rely on the state + // of its memory over time. + ManualScaling *ManualScaling `json:"manualScaling,omitempty"` + + // Name: Full path to the Version resource in the API. Example: + // apps/myapp/services/default/versions/v1.@OutputOnly + Name string `json:"name,omitempty"` + + // Network: Extra network settings. Only applicable for VM runtimes. + Network *Network `json:"network,omitempty"` + + // NobuildFilesRegex: Files that match this pattern will not be built + // into this version. Only applicable for Go runtimes.Only returned in + // GET requests if view=FULL is set. + NobuildFilesRegex string `json:"nobuildFilesRegex,omitempty"` + + // ReadinessCheck: Configures readiness health checking for VM + // instances. Unhealthy instances are not put into the backend traffic + // rotation.Only returned in GET requests if view=FULL is set. + ReadinessCheck *ReadinessCheck `json:"readinessCheck,omitempty"` + + // Resources: Machine resources for this version. Only applicable for VM + // runtimes. + Resources *Resources `json:"resources,omitempty"` + + // Runtime: Desired runtime. Example: python27. + Runtime string `json:"runtime,omitempty"` + + // ServingStatus: Current serving status of this version. Only the + // versions with a SERVING status create instances and can be + // billed.SERVING_STATUS_UNSPECIFIED is an invalid value. Defaults to + // SERVING. + // + // Possible values: + // "SERVING_STATUS_UNSPECIFIED" - Not specified. + // "SERVING" - Currently serving. Instances are created according to + // the scaling settings of the version. + // "STOPPED" - Disabled. No instances will be created and the scaling + // settings are ignored until the state of the version changes to + // SERVING. + ServingStatus string `json:"servingStatus,omitempty"` + + // Threadsafe: Whether multiple requests can be dispatched to this + // version at once. + Threadsafe bool `json:"threadsafe,omitempty"` + + // VersionUrl: Serving URL for this version. Example: + // "https://myversion-dot-myservice-dot-myapp.appspot.com"@OutputOnly + VersionUrl string `json:"versionUrl,omitempty"` + + // Vm: Whether to deploy this version in a container on a virtual + // machine. + Vm bool `json:"vm,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ApiConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApiConfig") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Version) MarshalJSON() ([]byte, error) { + type noMethod Version + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Volume: Volumes mounted within the app container. Only applicable for +// VM runtimes. +type Volume struct { + // Name: Unique name for the volume. + Name string `json:"name,omitempty"` + + // SizeGb: Volume size in gigabytes. + SizeGb float64 `json:"sizeGb,omitempty"` + + // VolumeType: Underlying volume type, e.g. 'tmpfs'. + VolumeType string `json:"volumeType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Volume) MarshalJSON() ([]byte, error) { + type noMethod Volume + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Volume) UnmarshalJSON(data []byte) error { + type noMethod Volume + var s1 struct { + SizeGb gensupport.JSONFloat64 `json:"sizeGb"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.SizeGb = float64(s1.SizeGb) + return nil +} + +// ZipInfo: The zip file information for a zip deployment. +type ZipInfo struct { + // FilesCount: An estimate of the number of files in a zip for a zip + // deployment. If set, must be greater than or equal to the actual + // number of files. Used for optimizing performance; if not provided, + // deployment may be slow. + FilesCount int64 `json:"filesCount,omitempty"` + + // SourceUrl: URL of the zip file to deploy from. Must be a URL to a + // resource in Google Cloud Storage in the form + // 'http(s)://storage.googleapis.com//'. + SourceUrl string `json:"sourceUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FilesCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FilesCount") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZipInfo) MarshalJSON() ([]byte, error) { + type noMethod ZipInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "appengine.apps.create": + +type AppsCreateCall struct { + s *APIService + application *Application + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an App Engine application for a Google Cloud Platform +// project. Required fields: +// id - The ID of the target Cloud Platform project. +// location - The region +// (https://cloud.google.com/appengine/docs/locations) where you want +// the App Engine application located.For more information about App +// Engine applications, see Managing Projects, Applications, and Billing +// (https://cloud.google.com/appengine/docs/python/console/). +func (r *AppsService) Create(application *Application) *AppsCreateCall { + c := &AppsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.application = application + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsCreateCall) Fields(s ...googleapi.Field) *AppsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsCreateCall) Context(ctx context.Context) *AppsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields:\nid - The ID of the target Cloud Platform project.\nlocation - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/python/console/).", + // "flatPath": "v1alpha/apps", + // "httpMethod": "POST", + // "id": "appengine.apps.create", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v1alpha/apps", + // "request": { + // "$ref": "Application" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.get": + +type AppsGetCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about an application. +func (r *AppsService) Get(appsId string) *AppsGetCall { + c := &AppsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsGetCall) Fields(s ...googleapi.Field) *AppsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsGetCall) IfNoneMatch(entityTag string) *AppsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsGetCall) Context(ctx context.Context) *AppsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.get" call. +// Exactly one of *Application or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Application.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsGetCall) Do(opts ...googleapi.CallOption) (*Application, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Application{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about an application.", + // "flatPath": "v1alpha/apps/{appsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.get", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the Application resource to get. Example: apps/myapp.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}", + // "response": { + // "$ref": "Application" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.patch": + +type AppsPatchCall struct { + s *APIService + appsId string + application *Application + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified Application resource. You can update the +// following fields: +// auth_domain - Google authentication domain for controlling user +// access to the application. +// default_cookie_expiration - Cookie expiration policy for the +// application. +func (r *AppsService) Patch(appsId string, application *Application) *AppsPatchCall { + c := &AppsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.application = application + return c +} + +// UpdateMask sets the optional parameter "updateMask": Standard field +// mask for the set of fields to be updated. +func (c *AppsPatchCall) UpdateMask(updateMask string) *AppsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsPatchCall) Fields(s ...googleapi.Field) *AppsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsPatchCall) Context(ctx context.Context) *AppsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified Application resource. You can update the following fields:\nauth_domain - Google authentication domain for controlling user access to the application.\ndefault_cookie_expiration - Cookie expiration policy for the application.", + // "flatPath": "v1alpha/apps/{appsId}", + // "httpMethod": "PATCH", + // "id": "appengine.apps.patch", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the Application resource to update. Example: apps/myapp.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Standard field mask for the set of fields to be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}", + // "request": { + // "$ref": "Application" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.repair": + +type AppsRepairCall struct { + s *APIService + appsId string + repairapplicationrequest *RepairApplicationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Repair: Recreates the required App Engine features for the specified +// App Engine application, for example a Cloud Storage bucket or App +// Engine service account. Use this method if you receive an error +// message about a missing feature, for example, Error retrieving the +// App Engine service account. +func (r *AppsService) Repair(appsId string, repairapplicationrequest *RepairApplicationRequest) *AppsRepairCall { + c := &AppsRepairCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.repairapplicationrequest = repairapplicationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsRepairCall) Fields(s ...googleapi.Field) *AppsRepairCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsRepairCall) Context(ctx context.Context) *AppsRepairCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsRepairCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsRepairCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.repairapplicationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}:repair") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.repair" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsRepairCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Recreates the required App Engine features for the specified App Engine application, for example a Cloud Storage bucket or App Engine service account. Use this method if you receive an error message about a missing feature, for example, Error retrieving the App Engine service account.", + // "flatPath": "v1alpha/apps/{appsId}:repair", + // "httpMethod": "POST", + // "id": "appengine.apps.repair", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the application to repair. Example: apps/myapp", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}:repair", + // "request": { + // "$ref": "RepairApplicationRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.locations.get": + +type AppsLocationsGetCall struct { + s *APIService + appsId string + locationsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get information about a location. +func (r *AppsLocationsService) Get(appsId string, locationsId string) *AppsLocationsGetCall { + c := &AppsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.locationsId = locationsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsLocationsGetCall) Fields(s ...googleapi.Field) *AppsLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsLocationsGetCall) IfNoneMatch(entityTag string) *AppsLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsLocationsGetCall) Context(ctx context.Context) *AppsLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/locations/{locationsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "locationsId": c.locationsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.locations.get" call. +// Exactly one of *Location or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Location.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Location{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get information about a location.", + // "flatPath": "v1alpha/apps/{appsId}/locations/{locationsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.locations.get", + // "parameterOrder": [ + // "appsId", + // "locationsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Resource name for the location.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "locationsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/locations/{locationsId}", + // "response": { + // "$ref": "Location" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.locations.list": + +type AppsLocationsListCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists information about the supported locations for this +// service. +func (r *AppsLocationsService) List(appsId string) *AppsLocationsListCall { + c := &AppsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *AppsLocationsListCall) Filter(filter string) *AppsLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *AppsLocationsListCall) PageSize(pageSize int64) *AppsLocationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *AppsLocationsListCall) PageToken(pageToken string) *AppsLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsLocationsListCall) Fields(s ...googleapi.Field) *AppsLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsLocationsListCall) IfNoneMatch(entityTag string) *AppsLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsLocationsListCall) Context(ctx context.Context) *AppsLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/locations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.locations.list" call. +// Exactly one of *ListLocationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLocationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListLocationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists information about the supported locations for this service.", + // "flatPath": "v1alpha/apps/{appsId}/locations", + // "httpMethod": "GET", + // "id": "appengine.apps.locations.list", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. The resource that owns the locations collection, if applicable.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/locations", + // "response": { + // "$ref": "ListLocationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.operations.get": + +type AppsOperationsGetCall struct { + s *APIService + appsId string + operationsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. +func (r *AppsOperationsService) Get(appsId string, operationsId string) *AppsOperationsGetCall { + c := &AppsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.operationsId = operationsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsOperationsGetCall) Fields(s ...googleapi.Field) *AppsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsOperationsGetCall) IfNoneMatch(entityTag string) *AppsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsOperationsGetCall) Context(ctx context.Context) *AppsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/operations/{operationsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "operationsId": c.operationsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1alpha/apps/{appsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.operations.get", + // "parameterOrder": [ + // "appsId", + // "operationsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. The name of the operation resource.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "operationsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/operations/{operationsId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.operations.list": + +type AppsOperationsListCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the +// request. If the server doesn't support this method, it returns +// UNIMPLEMENTED.NOTE: the name binding below allows API services to +// override the binding to use different resource name schemes, such as +// users/*/operations. +func (r *AppsOperationsService) List(appsId string) *AppsOperationsListCall { + c := &AppsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *AppsOperationsListCall) Filter(filter string) *AppsOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *AppsOperationsListCall) PageSize(pageSize int64) *AppsOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *AppsOperationsListCall) PageToken(pageToken string) *AppsOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsOperationsListCall) Fields(s ...googleapi.Field) *AppsOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsOperationsListCall) IfNoneMatch(entityTag string) *AppsOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsOperationsListCall) Context(ctx context.Context) *AppsOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.", + // "flatPath": "v1alpha/apps/{appsId}/operations", + // "httpMethod": "GET", + // "id": "appengine.apps.operations.list", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. The name of the operation collection.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/operations", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.services.delete": + +type AppsServicesDeleteCall struct { + s *APIService + appsId string + servicesId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified service and all enclosed versions. +func (r *AppsServicesService) Delete(appsId string, servicesId string) *AppsServicesDeleteCall { + c := &AppsServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesDeleteCall) Fields(s ...googleapi.Field) *AppsServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesDeleteCall) Context(ctx context.Context) *AppsServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified service and all enclosed versions.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}", + // "httpMethod": "DELETE", + // "id": "appengine.apps.services.delete", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.get": + +type AppsServicesGetCall struct { + s *APIService + appsId string + servicesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the current configuration of the specified service. +func (r *AppsServicesService) Get(appsId string, servicesId string) *AppsServicesGetCall { + c := &AppsServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesGetCall) Fields(s ...googleapi.Field) *AppsServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesGetCall) IfNoneMatch(entityTag string) *AppsServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesGetCall) Context(ctx context.Context) *AppsServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.get" call. +// Exactly one of *Service or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Service.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AppsServicesGetCall) Do(opts ...googleapi.CallOption) (*Service, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Service{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the current configuration of the specified service.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}", + // "httpMethod": "GET", + // "id": "appengine.apps.services.get", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}", + // "response": { + // "$ref": "Service" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.services.list": + +type AppsServicesListCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the services in the application. +func (r *AppsServicesService) List(appsId string) *AppsServicesListCall { + c := &AppsServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum results to +// return per page. +func (c *AppsServicesListCall) PageSize(pageSize int64) *AppsServicesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Continuation token +// for fetching the next page of results. +func (c *AppsServicesListCall) PageToken(pageToken string) *AppsServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesListCall) Fields(s ...googleapi.Field) *AppsServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesListCall) IfNoneMatch(entityTag string) *AppsServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesListCall) Context(ctx context.Context) *AppsServicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.list" call. +// Exactly one of *ListServicesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListServicesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListServicesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all the services in the application.", + // "flatPath": "v1alpha/apps/{appsId}/services", + // "httpMethod": "GET", + // "id": "appengine.apps.services.list", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent Application resource. Example: apps/myapp.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum results to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Continuation token for fetching the next page of results.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services", + // "response": { + // "$ref": "ListServicesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsServicesListCall) Pages(ctx context.Context, f func(*ListServicesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.services.patch": + +type AppsServicesPatchCall struct { + s *APIService + appsId string + servicesId string + service *Service + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the configuration of the specified service. +func (r *AppsServicesService) Patch(appsId string, servicesId string, service *Service) *AppsServicesPatchCall { + c := &AppsServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.service = service + return c +} + +// MigrateTraffic sets the optional parameter "migrateTraffic": Set to +// true to gradually shift traffic to one or more versions that you +// specify. By default, traffic is shifted immediately. For gradual +// traffic migration, the target versions must be located within +// instances that are configured for both warmup requests +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al +// pha/apps.services.versions#inboundservicetype) and automatic scaling +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al +// pha/apps.services.versions#automaticscaling). You must specify the +// shardBy +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al +// pha/apps.services#shardby) field in the Service resource. Gradual +// traffic migration is not supported in the App Engine flexible +// environment. For examples, see Migrating and Splitting Traffic +// (https://cloud.google.com/appengine/docs/admin-api/migrating-splitting +// -traffic). +func (c *AppsServicesPatchCall) MigrateTraffic(migrateTraffic bool) *AppsServicesPatchCall { + c.urlParams_.Set("migrateTraffic", fmt.Sprint(migrateTraffic)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Standard field +// mask for the set of fields to be updated. +func (c *AppsServicesPatchCall) UpdateMask(updateMask string) *AppsServicesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesPatchCall) Fields(s ...googleapi.Field) *AppsServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesPatchCall) Context(ctx context.Context) *AppsServicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.service) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the configuration of the specified service.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}", + // "httpMethod": "PATCH", + // "id": "appengine.apps.services.patch", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "migrateTraffic": { + // "description": "Set to true to gradually shift traffic to one or more versions that you specify. By default, traffic is shifted immediately. For gradual traffic migration, the target versions must be located within instances that are configured for both warmup requests (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#inboundservicetype) and automatic scaling (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#automaticscaling). You must specify the shardBy (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services#shardby) field in the Service resource. Gradual traffic migration is not supported in the App Engine flexible environment. For examples, see Migrating and Splitting Traffic (https://cloud.google.com/appengine/docs/admin-api/migrating-splitting-traffic).", + // "location": "query", + // "type": "boolean" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Standard field mask for the set of fields to be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}", + // "request": { + // "$ref": "Service" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.create": + +type AppsServicesVersionsCreateCall struct { + s *APIService + appsId string + servicesId string + version *Version + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Deploys code and resource files to a new version. +func (r *AppsServicesVersionsService) Create(appsId string, servicesId string, version *Version) *AppsServicesVersionsCreateCall { + c := &AppsServicesVersionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.version = version + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsCreateCall) Fields(s ...googleapi.Field) *AppsServicesVersionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsCreateCall) Context(ctx context.Context) *AppsServicesVersionsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deploys code and resource files to a new version.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + // "httpMethod": "POST", + // "id": "appengine.apps.services.versions.create", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent resource to create this version under. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + // "request": { + // "$ref": "Version" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.delete": + +type AppsServicesVersionsDeleteCall struct { + s *APIService + appsId string + servicesId string + versionsId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an existing Version resource. +func (r *AppsServicesVersionsService) Delete(appsId string, servicesId string, versionsId string) *AppsServicesVersionsDeleteCall { + c := &AppsServicesVersionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsDeleteCall) Fields(s ...googleapi.Field) *AppsServicesVersionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsDeleteCall) Context(ctx context.Context) *AppsServicesVersionsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an existing Version resource.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "httpMethod": "DELETE", + // "id": "appengine.apps.services.versions.delete", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.get": + +type AppsServicesVersionsGetCall struct { + s *APIService + appsId string + servicesId string + versionsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the specified Version resource. By default, only a +// BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get +// the full resource. +func (r *AppsServicesVersionsService) Get(appsId string, servicesId string, versionsId string) *AppsServicesVersionsGetCall { + c := &AppsServicesVersionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + return c +} + +// View sets the optional parameter "view": Controls the set of fields +// returned in the Get response. +// +// Possible values: +// "BASIC" +// "FULL" +func (c *AppsServicesVersionsGetCall) View(view string) *AppsServicesVersionsGetCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsGetCall) Fields(s ...googleapi.Field) *AppsServicesVersionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsGetCall) IfNoneMatch(entityTag string) *AppsServicesVersionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsGetCall) Context(ctx context.Context) *AppsServicesVersionsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.get" call. +// Exactly one of *Version or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Version.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AppsServicesVersionsGetCall) Do(opts ...googleapi.CallOption) (*Version, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Version{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the specified Version resource. By default, only a BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get the full resource.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.get", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "view": { + // "description": "Controls the set of fields returned in the Get response.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "response": { + // "$ref": "Version" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.services.versions.list": + +type AppsServicesVersionsListCall struct { + s *APIService + appsId string + servicesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the versions of a service. +func (r *AppsServicesVersionsService) List(appsId string, servicesId string) *AppsServicesVersionsListCall { + c := &AppsServicesVersionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum results to +// return per page. +func (c *AppsServicesVersionsListCall) PageSize(pageSize int64) *AppsServicesVersionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Continuation token +// for fetching the next page of results. +func (c *AppsServicesVersionsListCall) PageToken(pageToken string) *AppsServicesVersionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// View sets the optional parameter "view": Controls the set of fields +// returned in the List response. +// +// Possible values: +// "BASIC" +// "FULL" +func (c *AppsServicesVersionsListCall) View(view string) *AppsServicesVersionsListCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsListCall) Fields(s ...googleapi.Field) *AppsServicesVersionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsListCall) IfNoneMatch(entityTag string) *AppsServicesVersionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsListCall) Context(ctx context.Context) *AppsServicesVersionsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.list" call. +// Exactly one of *ListVersionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListVersionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsServicesVersionsListCall) Do(opts ...googleapi.CallOption) (*ListVersionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListVersionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the versions of a service.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.list", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent Service resource. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum results to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Continuation token for fetching the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "view": { + // "description": "Controls the set of fields returned in the List response.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions", + // "response": { + // "$ref": "ListVersionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsServicesVersionsListCall) Pages(ctx context.Context, f func(*ListVersionsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.services.versions.patch": + +type AppsServicesVersionsPatchCall struct { + s *APIService + appsId string + servicesId string + versionsId string + version *Version + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified Version resource. You can specify the +// following fields depending on the App Engine environment and type of +// scaling that the version resource uses: +// serving_status +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al +// pha/apps.services.versions#Version.FIELDS.serving_status): For +// Version resources that use basic scaling, manual scaling, or run in +// the App Engine flexible environment. +// instance_class +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al +// pha/apps.services.versions#Version.FIELDS.instance_class): For +// Version resources that run in the App Engine standard +// environment. +// automatic_scaling.min_idle_instances +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al +// pha/apps.services.versions#Version.FIELDS.automatic_scaling): For +// Version resources that use automatic scaling and run in the App +// Engine standard environment. +// automatic_scaling.max_idle_instances +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1al +// pha/apps.services.versions#Version.FIELDS.automatic_scaling): For +// Version resources that use automatic scaling and run in the App +// Engine standard environment. +func (r *AppsServicesVersionsService) Patch(appsId string, servicesId string, versionsId string, version *Version) *AppsServicesVersionsPatchCall { + c := &AppsServicesVersionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.version = version + return c +} + +// UpdateMask sets the optional parameter "updateMask": Standard field +// mask for the set of fields to be updated. +func (c *AppsServicesVersionsPatchCall) UpdateMask(updateMask string) *AppsServicesVersionsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsPatchCall) Fields(s ...googleapi.Field) *AppsServicesVersionsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsPatchCall) Context(ctx context.Context) *AppsServicesVersionsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.serving_status): For Version resources that use basic scaling, manual scaling, or run in the App Engine flexible environment.\ninstance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.instance_class): For Version resources that run in the App Engine standard environment.\nautomatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.\nautomatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1alpha/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "httpMethod": "PATCH", + // "id": "appengine.apps.services.versions.patch", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default/versions/1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Standard field mask for the set of fields to be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "request": { + // "$ref": "Version" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.debug": + +type AppsServicesVersionsInstancesDebugCall struct { + s *APIService + appsId string + servicesId string + versionsId string + instancesId string + debuginstancerequest *DebugInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Debug: Enables debugging on a VM instance. This allows you to use the +// SSH command to connect to the virtual machine where the instance +// lives. While in "debug mode", the instance continues to serve live +// traffic. You should delete the instance when you are done debugging +// and then allow the system to take over and determine if another +// instance should be started.Only applicable for instances in App +// Engine flexible environment. +func (r *AppsServicesVersionsInstancesService) Debug(appsId string, servicesId string, versionsId string, instancesId string, debuginstancerequest *DebugInstanceRequest) *AppsServicesVersionsInstancesDebugCall { + c := &AppsServicesVersionsInstancesDebugCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.instancesId = instancesId + c.debuginstancerequest = debuginstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesDebugCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesDebugCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesDebugCall) Context(ctx context.Context) *AppsServicesVersionsInstancesDebugCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesDebugCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesDebugCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.debuginstancerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + "instancesId": c.instancesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.debug" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesDebugCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enables debugging on a VM instance. This allows you to use the SSH command to connect to the virtual machine where the instance lives. While in \"debug mode\", the instance continues to serve live traffic. You should delete the instance when you are done debugging and then allow the system to take over and determine if another instance should be started.Only applicable for instances in App Engine flexible environment.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug", + // "httpMethod": "POST", + // "id": "appengine.apps.services.versions.instances.debug", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId", + // "instancesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instancesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug", + // "request": { + // "$ref": "DebugInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.delete": + +type AppsServicesVersionsInstancesDeleteCall struct { + s *APIService + appsId string + servicesId string + versionsId string + instancesId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Stops a running instance. +func (r *AppsServicesVersionsInstancesService) Delete(appsId string, servicesId string, versionsId string, instancesId string) *AppsServicesVersionsInstancesDeleteCall { + c := &AppsServicesVersionsInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.instancesId = instancesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesDeleteCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesDeleteCall) Context(ctx context.Context) *AppsServicesVersionsInstancesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + "instancesId": c.instancesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stops a running instance.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "httpMethod": "DELETE", + // "id": "appengine.apps.services.versions.instances.delete", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId", + // "instancesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instancesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.get": + +type AppsServicesVersionsInstancesGetCall struct { + s *APIService + appsId string + servicesId string + versionsId string + instancesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets instance information. +func (r *AppsServicesVersionsInstancesService) Get(appsId string, servicesId string, versionsId string, instancesId string) *AppsServicesVersionsInstancesGetCall { + c := &AppsServicesVersionsInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.instancesId = instancesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesGetCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsInstancesGetCall) IfNoneMatch(entityTag string) *AppsServicesVersionsInstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesGetCall) Context(ctx context.Context) *AppsServicesVersionsInstancesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + "instancesId": c.instancesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Instance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets instance information.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.instances.get", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId", + // "instancesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instancesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "response": { + // "$ref": "Instance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.list": + +type AppsServicesVersionsInstancesListCall struct { + s *APIService + appsId string + servicesId string + versionsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the instances of a version. +func (r *AppsServicesVersionsInstancesService) List(appsId string, servicesId string, versionsId string) *AppsServicesVersionsInstancesListCall { + c := &AppsServicesVersionsInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum results to +// return per page. +func (c *AppsServicesVersionsInstancesListCall) PageSize(pageSize int64) *AppsServicesVersionsInstancesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Continuation token +// for fetching the next page of results. +func (c *AppsServicesVersionsInstancesListCall) PageToken(pageToken string) *AppsServicesVersionsInstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesListCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsInstancesListCall) IfNoneMatch(entityTag string) *AppsServicesVersionsInstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesListCall) Context(ctx context.Context) *AppsServicesVersionsInstancesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.list" call. +// Exactly one of *ListInstancesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListInstancesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesListCall) Do(opts ...googleapi.CallOption) (*ListInstancesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListInstancesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances of a version.", + // "flatPath": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.instances.list", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent Version resource. Example: apps/myapp/services/default/versions/v1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum results to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Continuation token for fetching the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances", + // "response": { + // "$ref": "ListInstancesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsServicesVersionsInstancesListCall) Pages(ctx context.Context, f func(*ListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/vendor/google.golang.org/api/appengine/v1beta/appengine-api.json b/vendor/google.golang.org/api/appengine/v1beta/appengine-api.json new file mode 100644 index 000000000..5bc5cb12e --- /dev/null +++ b/vendor/google.golang.org/api/appengine/v1beta/appengine-api.json @@ -0,0 +1,2522 @@ +{ + "basePath": "", + "ownerDomain": "google.com", + "name": "appengine", + "batchPath": "batch", + "revision": "20170221", + "id": "appengine:v1beta", + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "title": "Google App Engine Admin API", + "discoveryVersion": "v1", + "ownerName": "Google", + "resources": { + "apps": { + "resources": { + "operations": { + "methods": { + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.", + "httpMethod": "GET", + "parameterOrder": [ + "appsId" + ], + "response": { + "$ref": "ListOperationsResponse" + }, + "parameters": { + "appsId": { + "location": "path", + "description": "Part of `name`. The name of the operation collection.", + "required": true, + "type": "string" + }, + "pageToken": { + "description": "The standard list page token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "type": "integer", + "location": "query", + "description": "The standard list page size.", + "format": "int32" + }, + "filter": { + "location": "query", + "description": "The standard list filter.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1beta/apps/{appsId}/operations", + "id": "appengine.apps.operations.list", + "path": "v1beta/apps/{appsId}/operations" + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "operationsId" + ], + "httpMethod": "GET", + "parameters": { + "appsId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. The name of the operation resource." + }, + "operationsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1beta/apps/{appsId}/operations/{operationsId}", + "path": "v1beta/apps/{appsId}/operations/{operationsId}", + "id": "appengine.apps.operations.get" + } + } + }, + "locations": { + "methods": { + "list": { + "flatPath": "v1beta/apps/{appsId}/locations", + "path": "v1beta/apps/{appsId}/locations", + "id": "appengine.apps.locations.list", + "description": "Lists information about the supported locations for this service.", + "response": { + "$ref": "ListLocationsResponse" + }, + "parameterOrder": [ + "appsId" + ], + "httpMethod": "GET", + "parameters": { + "appsId": { + "location": "path", + "description": "Part of `name`. The resource that owns the locations collection, if applicable.", + "required": true, + "type": "string" + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "The standard list page token." + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + }, + "filter": { + "location": "query", + "description": "The standard list filter.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "get": { + "response": { + "$ref": "Location" + }, + "parameterOrder": [ + "appsId", + "locationsId" + ], + "httpMethod": "GET", + "parameters": { + "appsId": { + "description": "Part of `name`. Resource name for the location.", + "required": true, + "type": "string", + "location": "path" + }, + "locationsId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. See documentation of `appsId`." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1beta/apps/{appsId}/locations/{locationsId}", + "path": "v1beta/apps/{appsId}/locations/{locationsId}", + "id": "appengine.apps.locations.get", + "description": "Get information about a location." + } + } + }, + "services": { + "methods": { + "delete": { + "id": "appengine.apps.services.delete", + "path": "v1beta/apps/{appsId}/services/{servicesId}", + "description": "Deletes the specified service and all enclosed versions.", + "httpMethod": "DELETE", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "servicesId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "appsId": { + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}" + }, + "list": { + "response": { + "$ref": "ListServicesResponse" + }, + "parameterOrder": [ + "appsId" + ], + "httpMethod": "GET", + "parameters": { + "pageSize": { + "description": "Maximum results to return per page.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "appsId": { + "location": "path", + "description": "Part of `parent`. Name of the parent Application resource. Example: apps/myapp.", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "Continuation token for fetching the next page of results.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1beta/apps/{appsId}/services", + "path": "v1beta/apps/{appsId}/services", + "id": "appengine.apps.services.list", + "description": "Lists all the services in the application." + }, + "get": { + "description": "Gets the current configuration of the specified service.", + "response": { + "$ref": "Service" + }, + "parameterOrder": [ + "appsId", + "servicesId" + ], + "httpMethod": "GET", + "parameters": { + "servicesId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. See documentation of `appsId`." + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}", + "path": "v1beta/apps/{appsId}/services/{servicesId}", + "id": "appengine.apps.services.get" + }, + "patch": { + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}", + "path": "v1beta/apps/{appsId}/services/{servicesId}", + "id": "appengine.apps.services.patch", + "description": "Updates the configuration of the specified service.", + "request": { + "$ref": "Service" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "migrateTraffic": { + "location": "query", + "description": "Set to true to gradually shift traffic to one or more versions that you specify. By default, traffic is shifted immediately. For gradual traffic migration, the target versions must be located within instances that are configured for both warmup requests (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#inboundservicetype) and automatic scaling (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#automaticscaling). You must specify the shardBy (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services#shardby) field in the Service resource. Gradual traffic migration is not supported in the App Engine flexible environment. For examples, see Migrating and Splitting Traffic (https://cloud.google.com/appengine/docs/admin-api/migrating-splitting-traffic).", + "type": "boolean" + }, + "updateMask": { + "location": "query", + "description": "Standard field mask for the set of fields to be updated.", + "format": "google-fieldmask", + "type": "string" + }, + "servicesId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "appsId": { + "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default.", + "required": true, + "type": "string", + "location": "path" + } + } + } + }, + "resources": { + "versions": { + "methods": { + "create": { + "httpMethod": "POST", + "parameterOrder": [ + "appsId", + "servicesId" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "servicesId": { + "description": "Part of `parent`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "appsId": { + "description": "Part of `parent`. Name of the parent resource to create this version under. Example: apps/myapp/services/default.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions", + "id": "appengine.apps.services.versions.create", + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions", + "request": { + "$ref": "Version" + }, + "description": "Deploys code and resource files to a new version." + }, + "delete": { + "description": "Deletes an existing Version resource.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "httpMethod": "DELETE", + "parameters": { + "versionsId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "id": "appengine.apps.services.versions.delete" + }, + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "appsId", + "servicesId" + ], + "response": { + "$ref": "ListVersionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "servicesId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `parent`. See documentation of `appsId`." + }, + "appsId": { + "description": "Part of `parent`. Name of the parent Service resource. Example: apps/myapp/services/default.", + "required": true, + "type": "string", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "Continuation token for fetching the next page of results.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Maximum results to return per page.", + "format": "int32", + "type": "integer" + }, + "view": { + "type": "string", + "location": "query", + "enum": [ + "BASIC", + "FULL" + ], + "description": "Controls the set of fields returned in the List response." + } + }, + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions", + "id": "appengine.apps.services.versions.list", + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions", + "description": "Lists the versions of a service." + }, + "get": { + "response": { + "$ref": "Version" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "httpMethod": "GET", + "parameters": { + "versionsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "view": { + "enum": [ + "BASIC", + "FULL" + ], + "description": "Controls the set of fields returned in the Get response.", + "type": "string", + "location": "query" + }, + "servicesId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. See documentation of `appsId`." + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "id": "appengine.apps.services.versions.get", + "description": "Gets the specified Version resource. By default, only a BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get the full resource." + }, + "patch": { + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + "id": "appengine.apps.services.versions.patch", + "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.serving_status): For Version resources that use basic scaling, manual scaling, or run in the App Engine flexible environment.\ninstance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.instance_class): For Version resources that run in the App Engine standard environment.\nautomatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.\nautomatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.", + "request": { + "$ref": "Version" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default/versions/1.", + "required": true, + "type": "string" + }, + "versionsId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Standard field mask for the set of fields to be updated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}" + } + }, + "resources": { + "instances": { + "methods": { + "list": { + "response": { + "$ref": "ListInstancesResponse" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `parent`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "location": "path", + "description": "Part of `parent`. Name of the parent Version resource. Example: apps/myapp/services/default/versions/v1.", + "required": true, + "type": "string" + }, + "pageToken": { + "description": "Continuation token for fetching the next page of results.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Maximum results to return per page.", + "format": "int32", + "type": "integer" + }, + "versionsId": { + "description": "Part of `parent`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances", + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances", + "id": "appengine.apps.services.versions.instances.list", + "description": "Lists the instances of a version." + }, + "get": { + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + "id": "appengine.apps.services.versions.instances.get", + "description": "Gets instance information.", + "response": { + "$ref": "Instance" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId", + "instancesId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "servicesId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. See documentation of `appsId`." + }, + "appsId": { + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + "required": true, + "type": "string", + "location": "path" + }, + "instancesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "versionsId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}" + }, + "debug": { + "httpMethod": "POST", + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId", + "instancesId" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "servicesId": { + "location": "path", + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string" + }, + "appsId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1." + }, + "instancesId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. See documentation of `appsId`." + }, + "versionsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug", + "id": "appengine.apps.services.versions.instances.debug", + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug", + "request": { + "$ref": "DebugInstanceRequest" + }, + "description": "Enables debugging on a VM instance. This allows you to use the SSH command to connect to the virtual machine where the instance lives. While in \"debug mode\", the instance continues to serve live traffic. You should delete the instance when you are done debugging and then allow the system to take over and determine if another instance should be started.Only applicable for instances in App Engine flexible environment." + }, + "delete": { + "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + "id": "appengine.apps.services.versions.instances.delete", + "description": "Stops a running instance.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId", + "servicesId", + "versionsId", + "instancesId" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "appsId": { + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + "required": true, + "type": "string", + "location": "path" + }, + "instancesId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "versionsId": { + "description": "Part of `name`. See documentation of `appsId`.", + "required": true, + "type": "string", + "location": "path" + }, + "servicesId": { + "required": true, + "type": "string", + "location": "path", + "description": "Part of `name`. See documentation of `appsId`." + } + } + } + } + } + } + } + } + } + }, + "methods": { + "get": { + "flatPath": "v1beta/apps/{appsId}", + "id": "appengine.apps.get", + "path": "v1beta/apps/{appsId}", + "description": "Gets information about an application.", + "httpMethod": "GET", + "parameterOrder": [ + "appsId" + ], + "response": { + "$ref": "Application" + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "appsId": { + "description": "Part of `name`. Name of the Application resource to get. Example: apps/myapp.", + "required": true, + "type": "string", + "location": "path" + } + } + }, + "patch": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "updateMask": { + "location": "query", + "description": "Standard field mask for the set of fields to be updated.", + "format": "google-fieldmask", + "type": "string" + }, + "appsId": { + "description": "Part of `name`. Name of the Application resource to update. Example: apps/myapp.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1beta/apps/{appsId}", + "path": "v1beta/apps/{appsId}", + "id": "appengine.apps.patch", + "description": "Updates the specified Application resource. You can update the following fields:\nauth_domain - Google authentication domain for controlling user access to the application.\ndefault_cookie_expiration - Cookie expiration policy for the application.", + "request": { + "$ref": "Application" + } + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": {}, + "flatPath": "v1beta/apps", + "id": "appengine.apps.create", + "path": "v1beta/apps", + "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields:\nid - The ID of the target Cloud Platform project.\nlocation - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/python/console/).", + "request": { + "$ref": "Application" + } + }, + "repair": { + "description": "Recreates the required App Engine features for the specified App Engine application, for example a Cloud Storage bucket or App Engine service account. Use this method if you receive an error message about a missing feature, for example, Error retrieving the App Engine service account.", + "request": { + "$ref": "RepairApplicationRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "appsId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "appsId": { + "location": "path", + "description": "Part of `name`. Name of the application to repair. Example: apps/myapp", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta/apps/{appsId}:repair", + "path": "v1beta/apps/{appsId}:repair", + "id": "appengine.apps.repair" + } + } + } + }, + "parameters": { + "alt": { + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Pretty-print response." + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + } + }, + "schemas": { + "LocationMetadata": { + "type": "object", + "properties": { + "standardEnvironmentAvailable": { + "description": "App Engine Standard Environment is available in the given location.@OutputOnly", + "type": "boolean" + }, + "flexibleEnvironmentAvailable": { + "description": "App Engine Flexible Environment is available in the given location.@OutputOnly", + "type": "boolean" + } + }, + "id": "LocationMetadata", + "description": "Metadata for the given google.cloud.location.Location." + }, + "Service": { + "properties": { + "id": { + "description": "Relative name of the service within the application. Example: default.@OutputOnly", + "type": "string" + }, + "name": { + "description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + }, + "split": { + "description": "Mapping that defines fractional HTTP traffic diversion to different versions within the service.", + "$ref": "TrafficSplit" + } + }, + "id": "Service", + "description": "A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service.", + "type": "object" + }, + "ListOperationsResponse": { + "id": "ListOperationsResponse", + "description": "The response message for Operations.ListOperations.", + "type": "object", + "properties": { + "nextPageToken": { + "type": "string", + "description": "The standard List next-page token." + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Operation" + } + } + } + }, + "OperationMetadata": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "operationType": { + "description": "Type of this operation. Deprecated, use method field instead. Example: \"create_version\".@OutputOnly", + "type": "string" + }, + "insertTime": { + "description": "Timestamp that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/modules/default.@OutputOnly", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "method": { + "description": "API method that initiated this operation. Example: google.appengine.v1beta4.Version.CreateVersion.@OutputOnly", + "type": "string" + }, + "endTime": { + "description": "Timestamp that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + } + }, + "id": "OperationMetadata" + }, + "OperationMetadataV1": { + "type": "object", + "properties": { + "ephemeralMessage": { + "description": "Ephemeral message that may change every time the operation is polled. @OutputOnly", + "type": "string" + }, + "method": { + "type": "string", + "description": "API method that initiated this operation. Example: google.appengine.v1.Versions.CreateVersion.@OutputOnly" + }, + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "warning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Durable messages that persist on every operation poll. @OutputOnly" + }, + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadataV1", + "description": "Metadata for the given google.longrunning.Operation." + }, + "ErrorHandler": { + "description": "Custom static error page to be served when an error occurs.", + "type": "object", + "properties": { + "errorCode": { + "description": "Error condition this handler applies to.", + "type": "string", + "enumDescriptions": [ + "Not specified. ERROR_CODE_DEFAULT is assumed.", + "All other error types.", + "Application has exceeded a resource quota.", + "Client blocked by the application's Denial of Service protection configuration.", + "Deadline reached before the application responds." + ], + "enum": [ + "ERROR_CODE_UNSPECIFIED", + "ERROR_CODE_DEFAULT", + "ERROR_CODE_OVER_QUOTA", + "ERROR_CODE_DOS_API_DENIAL", + "ERROR_CODE_TIMEOUT" + ] + }, + "mimeType": { + "description": "MIME type of file. Defaults to text/html.", + "type": "string" + }, + "staticFile": { + "description": "Static file content to be served for this error.", + "type": "string" + } + }, + "id": "ErrorHandler" + }, + "Network": { + "description": "Extra network settings. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "forwardedPorts": { + "description": "List of ports, or port pairs, to forward from the virtual machine to the application container.", + "type": "array", + "items": { + "type": "string" + } + }, + "instanceTag": { + "description": "Tag to apply to the VM instance during creation.", + "type": "string" + }, + "subnetworkName": { + "type": "string", + "description": "Google Cloud Platform sub-network where the virtual machines are created. Specify the short name, not the resource path.If a subnetwork name is specified, a network name will also be required unless it is for the default network.\nIf the network the VM instance is being created in is a Legacy network, then the IP address is allocated from the IPv4Range.\nIf the network the VM instance is being created in is an auto Subnet Mode Network, then only network name should be specified (not the subnetwork_name) and the IP address is created from the IPCidrRange of the subnetwork that exists in that zone for that network.\nIf the network the VM instance is being created in is a custom Subnet Mode Network, then the subnetwork_name must be specified and the IP address is created from the IPCidrRange of the subnetwork.If specified, the subnetwork must exist in the same region as the Flex app." + }, + "name": { + "description": "Google Cloud Platform network where the virtual machines are created. Specify the short name, not the resource path.Defaults to default.", + "type": "string" + } + }, + "id": "Network" + }, + "Application": { + "properties": { + "defaultHostname": { + "description": "Hostname used to reach this application, as resolved by App Engine.@OutputOnly", + "type": "string" + }, + "name": { + "type": "string", + "description": "Full path to the Application resource in the API. Example: apps/myapp.@OutputOnly" + }, + "iap": { + "$ref": "IdentityAwareProxy" + }, + "authDomain": { + "description": "Google Apps authentication domain that controls which users can access this application.Defaults to open access for any Google Account.", + "type": "string" + }, + "defaultCookieExpiration": { + "description": "Cookie expiration policy for this application.", + "format": "google-duration", + "type": "string" + }, + "id": { + "description": "Identifier of the Application resource. This identifier is equivalent to the project ID of the Google Cloud Platform project where you want to deploy your application. Example: myapp.", + "type": "string" + }, + "codeBucket": { + "description": "Google Cloud Storage bucket that can be used for storing files associated with this application. This bucket is associated with the application and can be used by the gcloud deployment commands.@OutputOnly", + "type": "string" + }, + "defaultBucket": { + "description": "Google Cloud Storage bucket that can be used by this application to store content.@OutputOnly", + "type": "string" + }, + "locationId": { + "description": "Location from which this application will be run. Application instances will run out of data centers in the chosen location, which is also where all of the application's end user content is stored.Defaults to us-central.Options are:us-central - Central USeurope-west - Western Europeus-east1 - Eastern US", + "type": "string" + }, + "dispatchRules": { + "description": "HTTP path dispatch rules for requests to the application that do not explicitly target a service or version. Rules are order-dependent.@OutputOnly", + "type": "array", + "items": { + "$ref": "UrlDispatchRule" + } + } + }, + "id": "Application", + "description": "An Application resource contains the top-level configuration of an App Engine application.", + "type": "object" + }, + "Instance": { + "type": "object", + "properties": { + "vmName": { + "type": "string", + "description": "Name of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.@OutputOnly" + }, + "qps": { + "description": "Average queries per second (QPS) over the last minute.@OutputOnly", + "format": "float", + "type": "number" + }, + "vmId": { + "description": "Virtual machine ID of this instance. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "name": { + "description": "Full path to the Instance resource in the API. Example: apps/myapp/services/default/versions/v1/instances/instance-1.@OutputOnly", + "type": "string" + }, + "vmZoneName": { + "description": "Zone where the virtual machine is located. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "averageLatency": { + "description": "Average latency (ms) over the last minute.@OutputOnly", + "format": "int32", + "type": "integer" + }, + "vmIp": { + "description": "The IP address of this instance. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "id": { + "description": "Relative name of the instance within the version. Example: instance-1.@OutputOnly", + "type": "string" + }, + "memoryUsage": { + "type": "string", + "description": "Total memory in use (bytes).@OutputOnly", + "format": "int64" + }, + "availability": { + "enumDescriptions": [ + "", + "", + "" + ], + "enum": [ + "UNSPECIFIED", + "RESIDENT", + "DYNAMIC" + ], + "description": "Availability of the instance.@OutputOnly", + "type": "string" + }, + "errors": { + "description": "Number of errors since this instance was started.@OutputOnly", + "format": "int32", + "type": "integer" + }, + "vmStatus": { + "description": "Status of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "string" + }, + "startTime": { + "type": "string", + "description": "Time that this instance was started.@OutputOnly", + "format": "google-datetime" + }, + "vmDebugEnabled": { + "description": "Whether this instance is in debug mode. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "type": "boolean" + }, + "requests": { + "description": "Number of requests since this instance was started.@OutputOnly", + "format": "int32", + "type": "integer" + }, + "appEngineRelease": { + "description": "App Engine release this instance is running on.@OutputOnly", + "type": "string" + } + }, + "id": "Instance", + "description": "An Instance resource is the computing unit that App Engine uses to automatically scale an application." + }, + "LivenessCheck": { + "description": "Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances.", + "type": "object", + "properties": { + "healthyThreshold": { + "type": "integer", + "description": "Number of consecutive successful checks required before considering the VM healthy.", + "format": "uint32" + }, + "checkInterval": { + "type": "string", + "description": "Interval between health checks.", + "format": "google-duration" + }, + "timeout": { + "description": "Time before the check is considered failed.", + "format": "google-duration", + "type": "string" + }, + "initialDelay": { + "type": "string", + "description": "The initial delay before starting to execute the checks.", + "format": "google-duration" + }, + "unhealthyThreshold": { + "description": "Number of consecutive failed checks required before considering the VM unhealthy.", + "format": "uint32", + "type": "integer" + }, + "path": { + "description": "The request path.", + "type": "string" + }, + "host": { + "description": "Host header to send when performing a HTTP Liveness check. Example: \"myapp.appspot.com\"", + "type": "string" + } + }, + "id": "LivenessCheck" + }, + "NetworkUtilization": { + "description": "Target scaling by network usage. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "targetReceivedPacketsPerSecond": { + "description": "Target packets received per second.", + "format": "int32", + "type": "integer" + }, + "targetSentBytesPerSecond": { + "description": "Target bytes sent per second.", + "format": "int32", + "type": "integer" + }, + "targetSentPacketsPerSecond": { + "description": "Target packets sent per second.", + "format": "int32", + "type": "integer" + }, + "targetReceivedBytesPerSecond": { + "description": "Target bytes received per second.", + "format": "int32", + "type": "integer" + } + }, + "id": "NetworkUtilization" + }, + "Location": { + "description": "A resource that represents Google Cloud Platform location.", + "type": "object", + "properties": { + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Cross-service attributes for the location. For example\n{\"cloud.googleapis.com/region\": \"us-east1\"}\n" + }, + "name": { + "description": "Resource name for the location, which may vary between implementations. For example: \"projects/example-project/locations/us-east1\"", + "type": "string" + }, + "locationId": { + "description": "The canonical id for this location. For example: \"us-east1\".", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata. For example the available capacity at the given location.", + "type": "object" + } + }, + "id": "Location" + }, + "HealthCheck": { + "description": "Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances. Only applicable for instances in App Engine flexible environment.", + "type": "object", + "properties": { + "timeout": { + "description": "Time before the health check is considered failed.", + "format": "google-duration", + "type": "string" + }, + "unhealthyThreshold": { + "description": "Number of consecutive failed health checks required before removing traffic.", + "format": "uint32", + "type": "integer" + }, + "disableHealthCheck": { + "type": "boolean", + "description": "Whether to explicitly disable health checks for this instance." + }, + "host": { + "description": "Host header to send when performing an HTTP health check. Example: \"myapp.appspot.com\"", + "type": "string" + }, + "healthyThreshold": { + "description": "Number of consecutive successful health checks required before receiving traffic.", + "format": "uint32", + "type": "integer" + }, + "restartThreshold": { + "description": "Number of consecutive failed health checks required before an instance is restarted.", + "format": "uint32", + "type": "integer" + }, + "checkInterval": { + "type": "string", + "description": "Interval between health checks.", + "format": "google-duration" + } + }, + "id": "HealthCheck" + }, + "ReadinessCheck": { + "description": "Readiness checking configuration for VM instances. Unhealthy instances are removed from traffic rotation.", + "type": "object", + "properties": { + "timeout": { + "description": "Time before the check is considered failed.", + "format": "google-duration", + "type": "string" + }, + "unhealthyThreshold": { + "description": "Number of consecutive failed checks required before removing traffic.", + "format": "uint32", + "type": "integer" + }, + "path": { + "type": "string", + "description": "The request path." + }, + "host": { + "description": "Host header to send when performing a HTTP Readiness check. Example: \"myapp.appspot.com\"", + "type": "string" + }, + "healthyThreshold": { + "description": "Number of consecutive successful checks required before receiving traffic.", + "format": "uint32", + "type": "integer" + }, + "checkInterval": { + "description": "Interval between health checks.", + "format": "google-duration", + "type": "string" + } + }, + "id": "ReadinessCheck" + }, + "DebugInstanceRequest": { + "description": "Request message for Instances.DebugInstance.", + "type": "object", + "properties": { + "sshKey": { + "description": "Public SSH key to add to the instance. Examples:\n[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]\n[USERNAME]:ssh-rsa [KEY_VALUE] google-ssh {\"userName\":\"[USERNAME]\",\"expireOn\":\"[EXPIRE_TIME]\"}For more information, see Adding and Removing SSH Keys (https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys).", + "type": "string" + } + }, + "id": "DebugInstanceRequest" + }, + "OperationMetadataV1Beta5": { + "id": "OperationMetadataV1Beta5", + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "User who requested this operation.@OutputOnly" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + }, + "method": { + "description": "API method name that initiated this operation. Example: google.appengine.v1beta5.Version.CreateVersion.@OutputOnly", + "type": "string" + }, + "insertTime": { + "description": "Timestamp that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "Timestamp that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + } + } + }, + "Version": { + "id": "Version", + "description": "A Version resource is a specific set of source code and configuration files that are deployed into a service.", + "type": "object", + "properties": { + "libraries": { + "description": "Configuration for third-party Python runtime libraries that are required by the application.Only returned in GET requests if view=FULL is set.", + "type": "array", + "items": { + "$ref": "Library" + } + }, + "nobuildFilesRegex": { + "description": "Files that match this pattern will not be built into this version. Only applicable for Go runtimes.Only returned in GET requests if view=FULL is set.", + "type": "string" + }, + "basicScaling": { + "$ref": "BasicScaling", + "description": "A service with basic scaling will create an instance when the application receives a request. The instance will be turned down when the app becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity." + }, + "runtime": { + "description": "Desired runtime. Example: python27.", + "type": "string" + }, + "id": { + "description": "Relative name of the version within the service. Example: v1. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names: \"default\", \"latest\", and any name with the prefix \"ah-\".", + "type": "string" + }, + "createdBy": { + "description": "Email address of the user who created this version.@OutputOnly", + "type": "string" + }, + "envVariables": { + "description": "Environment variables available to the application.Only returned in GET requests if view=FULL is set.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "livenessCheck": { + "description": "Configures liveness health checking for VM instances. Unhealthy instances are stopped and replaced with new instancesOnly returned in GET requests if view=FULL is set.", + "$ref": "LivenessCheck" + }, + "network": { + "$ref": "Network", + "description": "Extra network settings. Only applicable for VM runtimes." + }, + "betaSettings": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata settings that are supplied to this version to enable beta runtime features.", + "type": "object" + }, + "env": { + "description": "App Engine execution environment for this version.Defaults to standard.", + "type": "string" + }, + "handlers": { + "type": "array", + "items": { + "$ref": "UrlMap" + }, + "description": "An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set." + }, + "automaticScaling": { + "$ref": "AutomaticScaling", + "description": "Automatic scaling is based on request rate, response latencies, and other application metrics." + }, + "diskUsageBytes": { + "description": "Total size in bytes of all the files that are included in this version and curerntly hosted on the App Engine disk.@OutputOnly", + "format": "int64", + "type": "string" + }, + "healthCheck": { + "$ref": "HealthCheck", + "description": "Configures health checking for VM instances. Unhealthy instances are stopped and replaced with new instances. Only applicable for VM runtimes.Only returned in GET requests if view=FULL is set." + }, + "threadsafe": { + "description": "Whether multiple requests can be dispatched to this version at once.", + "type": "boolean" + }, + "readinessCheck": { + "description": "Configures readiness health checking for VM instances. Unhealthy instances are not put into the backend traffic rotation.Only returned in GET requests if view=FULL is set.", + "$ref": "ReadinessCheck" + }, + "manualScaling": { + "$ref": "ManualScaling", + "description": "A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time." + }, + "name": { + "description": "Full path to the Version resource in the API. Example: apps/myapp/services/default/versions/v1.@OutputOnly", + "type": "string" + }, + "apiConfig": { + "description": "Serving configuration for Google Cloud Endpoints (https://cloud.google.com/appengine/docs/python/endpoints/).Only returned in GET requests if view=FULL is set.", + "$ref": "ApiConfigHandler" + }, + "endpointsApiService": { + "description": "Cloud Endpoints configuration.If endpoints_api_service is set, the Cloud Endpoints Extensible Service Proxy will be provided to serve the API implemented by the app.", + "$ref": "EndpointsApiService" + }, + "versionUrl": { + "description": "Serving URL for this version. Example: \"https://myversion-dot-myservice-dot-myapp.appspot.com\"@OutputOnly", + "type": "string" + }, + "vm": { + "description": "Whether to deploy this version in a container on a virtual machine.", + "type": "boolean" + }, + "instanceClass": { + "description": "Instance class that is used to run this version. Valid values are:\nAutomaticScaling: F1, F2, F4, F4_1G\nManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 for AutomaticScaling and B1 for ManualScaling or BasicScaling.", + "type": "string" + }, + "servingStatus": { + "enumDescriptions": [ + "Not specified.", + "Currently serving. Instances are created according to the scaling settings of the version.", + "Disabled. No instances will be created and the scaling settings are ignored until the state of the version changes to SERVING." + ], + "enum": [ + "SERVING_STATUS_UNSPECIFIED", + "SERVING", + "STOPPED" + ], + "description": "Current serving status of this version. Only the versions with a SERVING status create instances and can be billed.SERVING_STATUS_UNSPECIFIED is an invalid value. Defaults to SERVING.", + "type": "string" + }, + "deployment": { + "description": "Code and application artifacts that make up this version.Only returned in GET requests if view=FULL is set.", + "$ref": "Deployment" + }, + "createTime": { + "type": "string", + "description": "Time that this version was created.@OutputOnly", + "format": "google-datetime" + }, + "resources": { + "$ref": "Resources", + "description": "Machine resources for this version. Only applicable for VM runtimes." + }, + "inboundServices": { + "enumDescriptions": [ + "Not specified.", + "Allows an application to receive mail.", + "Allows an application to receive email-bound notifications.", + "Allows an application to receive error stanzas.", + "Allows an application to receive instant messages.", + "Allows an application to receive user subscription POSTs.", + "Allows an application to receive a user's chat presence.", + "Registers an application for notifications when a client connects or disconnects from a channel.", + "Enables warmup requests." + ], + "description": "Before an application can receive email or XMPP messages, the application must be configured to enable the service.", + "type": "array", + "items": { + "enum": [ + "INBOUND_SERVICE_UNSPECIFIED", + "INBOUND_SERVICE_MAIL", + "INBOUND_SERVICE_MAIL_BOUNCE", + "INBOUND_SERVICE_XMPP_ERROR", + "INBOUND_SERVICE_XMPP_MESSAGE", + "INBOUND_SERVICE_XMPP_SUBSCRIBE", + "INBOUND_SERVICE_XMPP_PRESENCE", + "INBOUND_SERVICE_CHANNEL_PRESENCE", + "INBOUND_SERVICE_WARMUP" + ], + "type": "string" + } + }, + "errorHandlers": { + "description": "Custom static error pages. Limited to 10KB per page.Only returned in GET requests if view=FULL is set.", + "type": "array", + "items": { + "$ref": "ErrorHandler" + } + }, + "defaultExpiration": { + "type": "string", + "description": "Duration that static files should be cached by web proxies and browsers. Only applicable if the corresponding StaticFilesHandler (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#staticfileshandler) does not specify its own expiration time.Only returned in GET requests if view=FULL is set.", + "format": "google-duration" + } + } + }, + "RepairApplicationRequest": { + "properties": {}, + "id": "RepairApplicationRequest", + "description": "Request message for 'Applications.RepairApplication'.", + "type": "object" + }, + "ScriptHandler": { + "properties": { + "scriptPath": { + "description": "Path to the script from the application root directory.", + "type": "string" + } + }, + "id": "ScriptHandler", + "description": "Executes a script to handle the request that matches the URL pattern.", + "type": "object" + }, + "FileInfo": { + "id": "FileInfo", + "description": "Single source file that is part of the version to be deployed. Each source file that is deployed must be specified separately.", + "type": "object", + "properties": { + "sha1Sum": { + "description": "The SHA1 hash of the file, in hex.", + "type": "string" + }, + "mimeType": { + "description": "The MIME type of the file.Defaults to the value from Google Cloud Storage.", + "type": "string" + }, + "sourceUrl": { + "description": "URL source to use to fetch this file. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com/\u003cbucket\u003e/\u003cobject\u003e'.", + "type": "string" + } + } + }, + "OperationMetadataExperimental": { + "type": "object", + "properties": { + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "target": { + "type": "string", + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/customDomains/example.com.@OutputOnly" + }, + "method": { + "description": "API method that initiated this operation. Example: google.appengine.experimental.CustomDomains.CreateCustomDomain.@OutputOnly", + "type": "string" + }, + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + } + }, + "id": "OperationMetadataExperimental", + "description": "Metadata for the given google.longrunning.Operation." + }, + "TrafficSplit": { + "description": "Traffic routing configuration for versions within a single service. Traffic splits define how traffic directed to the service is assigned to versions.", + "type": "object", + "properties": { + "shardBy": { + "enumDescriptions": [ + "Diversion method unspecified.", + "Diversion based on a specially named cookie, \"GOOGAPPUID.\" The cookie must be set by the application itself or no diversion will occur.", + "Diversion based on applying the modulus operation to a fingerprint of the IP address." + ], + "enum": [ + "UNSPECIFIED", + "COOKIE", + "IP" + ], + "description": "Mechanism used to determine which version a request is sent to. The traffic selection algorithm will be stable for either type until allocations are changed.", + "type": "string" + }, + "allocations": { + "description": "Mapping from version IDs within the service to fractional (0.000, 1] allocations of traffic for that version. Each version can be specified only once, but some versions in the service may not have any traffic allocation. Services that have traffic allocated cannot be deleted until either the service is deleted or their traffic allocation is removed. Allocations must sum to 1. Up to two decimal place precision is supported for IP-based splits and up to three decimal places is supported for cookie-based splits.", + "type": "object", + "additionalProperties": { + "format": "double", + "type": "number" + } + } + }, + "id": "TrafficSplit" + }, + "OperationMetadataV1Beta": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "method": { + "description": "API method that initiated this operation. Example: google.appengine.v1beta.Versions.CreateVersion.@OutputOnly", + "type": "string" + }, + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "warning": { + "description": "Durable messages that persist on every operation poll. @OutputOnly", + "type": "array", + "items": { + "type": "string" + } + }, + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + }, + "user": { + "description": "User who requested this operation.@OutputOnly", + "type": "string" + }, + "ephemeralMessage": { + "description": "Ephemeral message that may change every time the operation is polled. @OutputOnly", + "type": "string" + } + }, + "id": "OperationMetadataV1Beta" + }, + "ListServicesResponse": { + "type": "object", + "properties": { + "services": { + "type": "array", + "items": { + "$ref": "Service" + }, + "description": "The services belonging to the requested application." + }, + "nextPageToken": { + "description": "Continuation token for fetching the next page of results.", + "type": "string" + } + }, + "id": "ListServicesResponse", + "description": "Response message for Services.ListServices." + }, + "Deployment": { + "id": "Deployment", + "description": "Code and application artifacts used to deploy a version to App Engine.", + "type": "object", + "properties": { + "files": { + "description": "Manifest of the files stored in Google Cloud Storage that are included as part of this version. All files must be readable using the credentials supplied with this call.", + "type": "object", + "additionalProperties": { + "$ref": "FileInfo" + } + }, + "zip": { + "$ref": "ZipInfo", + "description": "The zip file for this deployment, if this is a zip deployment." + }, + "container": { + "$ref": "ContainerInfo", + "description": "The Docker image for the container that runs the version. Only applicable for instances running in the App Engine flexible environment." + } + } + }, + "Resources": { + "description": "Machine resources for a version.", + "type": "object", + "properties": { + "cpu": { + "description": "Number of CPU cores needed.", + "format": "double", + "type": "number" + }, + "memoryGb": { + "description": "Memory (GB) needed.", + "format": "double", + "type": "number" + }, + "volumes": { + "description": "User specified volumes.", + "type": "array", + "items": { + "$ref": "Volume" + } + }, + "diskGb": { + "description": "Disk size (GB) needed.", + "format": "double", + "type": "number" + } + }, + "id": "Resources" + }, + "Volume": { + "description": "Volumes mounted within the app container. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "sizeGb": { + "description": "Volume size in gigabytes.", + "format": "double", + "type": "number" + }, + "name": { + "description": "Unique name for the volume.", + "type": "string" + }, + "volumeType": { + "type": "string", + "description": "Underlying volume type, e.g. 'tmpfs'." + } + }, + "id": "Volume" + }, + "ListInstancesResponse": { + "description": "Response message for Instances.ListInstances.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "Continuation token for fetching the next page of results.", + "type": "string" + }, + "instances": { + "description": "The instances belonging to the requested version.", + "type": "array", + "items": { + "$ref": "Instance" + } + } + }, + "id": "ListInstancesResponse" + }, + "OperationMetadataV1Alpha": { + "description": "Metadata for the given google.longrunning.Operation.", + "type": "object", + "properties": { + "insertTime": { + "description": "Time that this operation was created.@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "warning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Durable messages that persist on every operation poll. @OutputOnly" + }, + "user": { + "type": "string", + "description": "User who requested this operation.@OutputOnly" + }, + "target": { + "description": "Name of the resource that this operation is acting on. Example: apps/myapp/services/default.@OutputOnly", + "type": "string" + }, + "ephemeralMessage": { + "type": "string", + "description": "Ephemeral message that may change every time the operation is polled. @OutputOnly" + }, + "method": { + "description": "API method that initiated this operation. Example: google.appengine.v1alpha.Versions.CreateVersion.@OutputOnly", + "type": "string" + }, + "endTime": { + "description": "Time that this operation completed.@OutputOnly", + "format": "google-datetime", + "type": "string" + } + }, + "id": "OperationMetadataV1Alpha" + }, + "UrlDispatchRule": { + "description": "Rules to match an HTTP request and dispatch that request to a service.", + "type": "object", + "properties": { + "domain": { + "type": "string", + "description": "Domain name to match against. The wildcard \"*\" is supported if specified before a period: \"*.\".Defaults to matching all domains: \"*\"." + }, + "service": { + "description": "Resource ID of a service in this application that should serve the matched request. The service must already exist. Example: default.", + "type": "string" + }, + "path": { + "description": "Pathname within the host. Must start with a \"/\". A single \"*\" can be included at the end of the path. The sum of the lengths of the domain and path may not exceed 100 characters.", + "type": "string" + } + }, + "id": "UrlDispatchRule" + }, + "ListVersionsResponse": { + "description": "Response message for Versions.ListVersions.", + "type": "object", + "properties": { + "versions": { + "description": "The versions belonging to the requested service.", + "type": "array", + "items": { + "$ref": "Version" + } + }, + "nextPageToken": { + "description": "Continuation token for fetching the next page of results.", + "type": "string" + } + }, + "id": "ListVersionsResponse" + }, + "ApiEndpointHandler": { + "properties": { + "scriptPath": { + "description": "Path to the script from the application root directory.", + "type": "string" + } + }, + "id": "ApiEndpointHandler", + "description": "Uses Google Cloud Endpoints to handle requests.", + "type": "object" + }, + "ZipInfo": { + "description": "The zip file information for a zip deployment.", + "type": "object", + "properties": { + "sourceUrl": { + "description": "URL of the zip file to deploy from. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com/\u003cbucket\u003e/\u003cobject\u003e'.", + "type": "string" + }, + "filesCount": { + "description": "An estimate of the number of files in a zip for a zip deployment. If set, must be greater than or equal to the actual number of files. Used for optimizing performance; if not provided, deployment may be slow.", + "format": "int32", + "type": "integer" + } + }, + "id": "ZipInfo" + }, + "AutomaticScaling": { + "type": "object", + "properties": { + "diskUtilization": { + "description": "Target scaling by disk usage.", + "$ref": "DiskUtilization" + }, + "minPendingLatency": { + "description": "Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it.", + "format": "google-duration", + "type": "string" + }, + "requestUtilization": { + "$ref": "RequestUtilization", + "description": "Target scaling by request utilization." + }, + "maxIdleInstances": { + "description": "Maximum number of idle instances that should be maintained for this version.", + "format": "int32", + "type": "integer" + }, + "minIdleInstances": { + "description": "Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service.", + "format": "int32", + "type": "integer" + }, + "maxTotalInstances": { + "description": "Maximum number of instances that should be started to handle requests.", + "format": "int32", + "type": "integer" + }, + "minTotalInstances": { + "description": "Minimum number of instances that should be maintained for this version.", + "format": "int32", + "type": "integer" + }, + "networkUtilization": { + "$ref": "NetworkUtilization", + "description": "Target scaling by network usage." + }, + "maxConcurrentRequests": { + "description": "Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance.Defaults to a runtime-specific value.", + "format": "int32", + "type": "integer" + }, + "coolDownPeriod": { + "description": "Amount of time that the Autoscaler (https://cloud.google.com/compute/docs/autoscaler/) should wait between changes to the number of virtual machines. Only applicable for VM runtimes.", + "format": "google-duration", + "type": "string" + }, + "maxPendingLatency": { + "description": "Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it.", + "format": "google-duration", + "type": "string" + }, + "cpuUtilization": { + "$ref": "CpuUtilization", + "description": "Target scaling by CPU usage." + } + }, + "id": "AutomaticScaling", + "description": "Automatic scaling is based on request rate, response latencies, and other application metrics." + }, + "Library": { + "description": "Third-party Python runtime library that is required by the application.", + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the library. Example: \"django\"." + }, + "version": { + "description": "Version of the library to select, or \"latest\".", + "type": "string" + } + }, + "id": "Library" + }, + "ListLocationsResponse": { + "description": "The response message for Locations.ListLocations.", + "type": "object", + "properties": { + "locations": { + "description": "A list of locations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Location" + } + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + } + }, + "id": "ListLocationsResponse" + }, + "ContainerInfo": { + "description": "Docker image that is used to create a container and start a VM instance for the version that you deploy. Only applicable for instances running in the App Engine flexible environment.", + "type": "object", + "properties": { + "image": { + "description": "URI to the hosted container image in Google Container Registry. The URI must be fully qualified and include a tag or digest. Examples: \"gcr.io/my-project/image:tag\" or \"gcr.io/my-project/image@digest\"", + "type": "string" + } + }, + "id": "ContainerInfo" + }, + "RequestUtilization": { + "description": "Target scaling by request utilization. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "targetRequestCountPerSecond": { + "description": "Target requests per second.", + "format": "int32", + "type": "integer" + }, + "targetConcurrentRequests": { + "description": "Target number of concurrent requests.", + "format": "int32", + "type": "integer" + } + }, + "id": "RequestUtilization" + }, + "UrlMap": { + "description": "URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript.", + "type": "object", + "properties": { + "script": { + "description": "Executes a script to handle the request that matches this URL pattern.", + "$ref": "ScriptHandler" + }, + "urlRegex": { + "description": "URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path.", + "type": "string" + }, + "login": { + "description": "Level of login required to access this resource.", + "type": "string", + "enumDescriptions": [ + "Not specified. LOGIN_OPTIONAL is assumed.", + "Does not require that the user is signed in.", + "If the user is not signed in, the auth_fail_action is taken. In addition, if the user is not an administrator for the application, they are given an error message regardless of auth_fail_action. If the user is an administrator, the handler proceeds.", + "If the user has signed in, the handler proceeds normally. Otherwise, the auth_fail_action is taken." + ], + "enum": [ + "LOGIN_UNSPECIFIED", + "LOGIN_OPTIONAL", + "LOGIN_ADMIN", + "LOGIN_REQUIRED" + ] + }, + "apiEndpoint": { + "$ref": "ApiEndpointHandler", + "description": "Uses API Endpoints to handle requests." + }, + "staticFiles": { + "description": "Returns the contents of a file, such as an image, as the response.", + "$ref": "StaticFilesHandler" + }, + "redirectHttpResponseCode": { + "description": "30x code to use when performing redirects for the secure field. Defaults to 302.", + "type": "string", + "enumDescriptions": [ + "Not specified. 302 is assumed.", + "301 Moved Permanently code.", + "302 Moved Temporarily code.", + "303 See Other code.", + "307 Temporary Redirect code." + ], + "enum": [ + "REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED", + "REDIRECT_HTTP_RESPONSE_CODE_301", + "REDIRECT_HTTP_RESPONSE_CODE_302", + "REDIRECT_HTTP_RESPONSE_CODE_303", + "REDIRECT_HTTP_RESPONSE_CODE_307" + ] + }, + "securityLevel": { + "enumDescriptions": [ + "Not specified.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used, and respond accordingly.", + "Requests for a URL that match this handler that use HTTPS are automatically redirected to the HTTP equivalent URL.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used and respond accordingly.", + "Requests for a URL that match this handler that do not use HTTPS are automatically redirected to the HTTPS URL with the same path. Query parameters are reserved for the redirect." + ], + "enum": [ + "SECURE_UNSPECIFIED", + "SECURE_DEFAULT", + "SECURE_NEVER", + "SECURE_OPTIONAL", + "SECURE_ALWAYS" + ], + "description": "Security (HTTPS) enforcement for this URL.", + "type": "string" + }, + "authFailAction": { + "enumDescriptions": [ + "Not specified. AUTH_FAIL_ACTION_REDIRECT is assumed.", + "Redirects user to \"accounts.google.com\". The user is redirected back to the application URL after signing in or creating an account.", + "Rejects request with a 401 HTTP status code and an error message." + ], + "enum": [ + "AUTH_FAIL_ACTION_UNSPECIFIED", + "AUTH_FAIL_ACTION_REDIRECT", + "AUTH_FAIL_ACTION_UNAUTHORIZED" + ], + "description": "Action to take when users access resources that require authentication. Defaults to redirect.", + "type": "string" + } + }, + "id": "UrlMap" + }, + "EndpointsApiService": { + "description": "Cloud Endpoints (https://cloud.google.com/endpoints) configuration. The Endpoints API Service provides tooling for serving Open API and gRPC endpoints via an NGINX proxy.The fields here refer to the name and configuration id of a \"service\" resource in the Service Management API (https://cloud.google.com/service-management/overview).", + "type": "object", + "properties": { + "name": { + "description": "Endpoints service name which is the name of the \"service\" resource in the Service Management API. For example \"myapi.endpoints.myproject.cloud.goog\"", + "type": "string" + }, + "configId": { + "type": "string", + "description": "Endpoints service configuration id as specified by the Service Management API. For example \"2016-09-19r1\"" + } + }, + "id": "EndpointsApiService" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "type": "object", + "properties": { + "done": { + "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", + "type": "boolean" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should have the format of operations/some/unique/name.", + "type": "string" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + } + }, + "id": "Operation" + }, + "ApiConfigHandler": { + "description": "Google Cloud Endpoints (https://cloud.google.com/appengine/docs/python/endpoints/) configuration for API handlers.", + "type": "object", + "properties": { + "url": { + "description": "URL to serve the endpoint at.", + "type": "string" + }, + "securityLevel": { + "enum": [ + "SECURE_UNSPECIFIED", + "SECURE_DEFAULT", + "SECURE_NEVER", + "SECURE_OPTIONAL", + "SECURE_ALWAYS" + ], + "description": "Security (HTTPS) enforcement for this URL.", + "type": "string", + "enumDescriptions": [ + "Not specified.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used, and respond accordingly.", + "Requests for a URL that match this handler that use HTTPS are automatically redirected to the HTTP equivalent URL.", + "Both HTTP and HTTPS requests with URLs that match the handler succeed without redirects. The application can examine the request to determine which protocol was used and respond accordingly.", + "Requests for a URL that match this handler that do not use HTTPS are automatically redirected to the HTTPS URL with the same path. Query parameters are reserved for the redirect." + ] + }, + "authFailAction": { + "enumDescriptions": [ + "Not specified. AUTH_FAIL_ACTION_REDIRECT is assumed.", + "Redirects user to \"accounts.google.com\". The user is redirected back to the application URL after signing in or creating an account.", + "Rejects request with a 401 HTTP status code and an error message." + ], + "enum": [ + "AUTH_FAIL_ACTION_UNSPECIFIED", + "AUTH_FAIL_ACTION_REDIRECT", + "AUTH_FAIL_ACTION_UNAUTHORIZED" + ], + "description": "Action to take when users access resources that require authentication. Defaults to redirect.", + "type": "string" + }, + "script": { + "description": "Path to the script from the application root directory.", + "type": "string" + }, + "login": { + "enumDescriptions": [ + "Not specified. LOGIN_OPTIONAL is assumed.", + "Does not require that the user is signed in.", + "If the user is not signed in, the auth_fail_action is taken. In addition, if the user is not an administrator for the application, they are given an error message regardless of auth_fail_action. If the user is an administrator, the handler proceeds.", + "If the user has signed in, the handler proceeds normally. Otherwise, the auth_fail_action is taken." + ], + "enum": [ + "LOGIN_UNSPECIFIED", + "LOGIN_OPTIONAL", + "LOGIN_ADMIN", + "LOGIN_REQUIRED" + ], + "description": "Level of login required to access this resource. Defaults to optional.", + "type": "string" + } + }, + "id": "ApiConfigHandler" + }, + "StaticFilesHandler": { + "type": "object", + "properties": { + "expiration": { + "description": "Time a static file served by this handler should be cached by web proxies and browsers.", + "format": "google-duration", + "type": "string" + }, + "httpHeaders": { + "additionalProperties": { + "type": "string" + }, + "description": "HTTP headers to use for all responses from these URLs.", + "type": "object" + }, + "applicationReadable": { + "description": "Whether files should also be uploaded as code data. By default, files declared in static file handlers are uploaded as static data and are only served to end users; they cannot be read by the application. If enabled, uploads are charged against both your code and static data storage resource quotas.", + "type": "boolean" + }, + "uploadPathRegex": { + "description": "Regular expression that matches the file paths for all files that should be referenced by this handler.", + "type": "string" + }, + "path": { + "type": "string", + "description": "Path to the static files matched by the URL pattern, from the application root directory. The path can refer to text matched in groupings in the URL pattern." + }, + "mimeType": { + "description": "MIME type used to serve all files served by this handler.Defaults to file-specific MIME types, which are derived from each file's filename extension.", + "type": "string" + }, + "requireMatchingFile": { + "description": "Whether this handler should match the request if the file referenced by the handler does not exist.", + "type": "boolean" + } + }, + "id": "StaticFilesHandler", + "description": "Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them." + }, + "DiskUtilization": { + "id": "DiskUtilization", + "description": "Target scaling by disk usage. Only applicable for VM runtimes.", + "type": "object", + "properties": { + "targetWriteBytesPerSecond": { + "description": "Target bytes written per second.", + "format": "int32", + "type": "integer" + }, + "targetReadBytesPerSecond": { + "description": "Target bytes read per second.", + "format": "int32", + "type": "integer" + }, + "targetReadOpsPerSecond": { + "description": "Target ops read per seconds.", + "format": "int32", + "type": "integer" + }, + "targetWriteOpsPerSecond": { + "description": "Target ops written per second.", + "format": "int32", + "type": "integer" + } + } + }, + "BasicScaling": { + "description": "A service with basic scaling will create an instance when the application receives a request. The instance will be turned down when the app becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.", + "type": "object", + "properties": { + "maxInstances": { + "description": "Maximum number of instances to create for this version.", + "format": "int32", + "type": "integer" + }, + "idleTimeout": { + "type": "string", + "description": "Duration of time after the last request that an instance must wait before the instance is shut down.", + "format": "google-duration" + } + }, + "id": "BasicScaling" + }, + "CpuUtilization": { + "description": "Target scaling by CPU usage.", + "type": "object", + "properties": { + "aggregationWindowLength": { + "description": "Period of time over which CPU utilization is calculated.", + "format": "google-duration", + "type": "string" + }, + "targetUtilization": { + "description": "Target CPU utilization ratio to maintain when scaling. Must be between 0 and 1.", + "format": "double", + "type": "number" + } + }, + "id": "CpuUtilization" + }, + "IdentityAwareProxy": { + "description": "Identity-Aware Proxy", + "type": "object", + "properties": { + "oauth2ClientSecret": { + "description": "OAuth2 client secret to use for the authentication flow.For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2_client_secret_sha256 field.@InputOnly", + "type": "string" + }, + "oauth2ClientId": { + "description": "OAuth2 client ID to use for the authentication flow.", + "type": "string" + }, + "oauth2ClientSecretSha256": { + "description": "Hex-encoded SHA-256 hash of the client secret.@OutputOnly", + "type": "string" + }, + "enabled": { + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests.If true, the oauth2_client_id and oauth2_client_secret fields must be non-empty.", + "type": "boolean" + } + }, + "id": "IdentityAwareProxy" + }, + "Status": { + "id": "Status", + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc which can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting purpose.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { + "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + } + } + } + }, + "ManualScaling": { + "type": "object", + "properties": { + "instances": { + "description": "Number of instances to assign to the service at the start. This number can later be altered by using the Modules API (https://cloud.google.com/appengine/docs/python/modules/functions) set_num_instances() function.", + "format": "int32", + "type": "integer" + } + }, + "id": "ManualScaling", + "description": "A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time." + } + }, + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "protocol": "rest", + "version": "v1beta", + "baseUrl": "https://appengine.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/appengine.admin": { + "description": "View and manage your applications deployed on Google App Engine" + } + } + } + }, + "kind": "discovery#restDescription", + "description": "Provisions and manages App Engine applications.", + "servicePath": "", + "rootUrl": "https://appengine.googleapis.com/" +} diff --git a/vendor/google.golang.org/api/appengine/v1beta/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1beta/appengine-gen.go new file mode 100644 index 000000000..fa0eed0d9 --- /dev/null +++ b/vendor/google.golang.org/api/appengine/v1beta/appengine-gen.go @@ -0,0 +1,6151 @@ +// Package appengine provides access to the Google App Engine Admin API. +// +// See https://cloud.google.com/appengine/docs/admin-api/ +// +// Usage example: +// +// import "google.golang.org/api/appengine/v1beta" +// ... +// appengineService, err := appengine.New(oauthHttpClient) +package appengine // import "google.golang.org/api/appengine/v1beta" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "appengine:v1beta" +const apiName = "appengine" +const apiVersion = "v1beta" +const basePath = "https://appengine.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your applications deployed on Google App Engine + AppengineAdminScope = "https://www.googleapis.com/auth/appengine.admin" + + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" +) + +func New(client *http.Client) (*APIService, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &APIService{client: client, BasePath: basePath} + s.Apps = NewAppsService(s) + return s, nil +} + +type APIService struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Apps *AppsService +} + +func (s *APIService) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewAppsService(s *APIService) *AppsService { + rs := &AppsService{s: s} + rs.Locations = NewAppsLocationsService(s) + rs.Operations = NewAppsOperationsService(s) + rs.Services = NewAppsServicesService(s) + return rs +} + +type AppsService struct { + s *APIService + + Locations *AppsLocationsService + + Operations *AppsOperationsService + + Services *AppsServicesService +} + +func NewAppsLocationsService(s *APIService) *AppsLocationsService { + rs := &AppsLocationsService{s: s} + return rs +} + +type AppsLocationsService struct { + s *APIService +} + +func NewAppsOperationsService(s *APIService) *AppsOperationsService { + rs := &AppsOperationsService{s: s} + return rs +} + +type AppsOperationsService struct { + s *APIService +} + +func NewAppsServicesService(s *APIService) *AppsServicesService { + rs := &AppsServicesService{s: s} + rs.Versions = NewAppsServicesVersionsService(s) + return rs +} + +type AppsServicesService struct { + s *APIService + + Versions *AppsServicesVersionsService +} + +func NewAppsServicesVersionsService(s *APIService) *AppsServicesVersionsService { + rs := &AppsServicesVersionsService{s: s} + rs.Instances = NewAppsServicesVersionsInstancesService(s) + return rs +} + +type AppsServicesVersionsService struct { + s *APIService + + Instances *AppsServicesVersionsInstancesService +} + +func NewAppsServicesVersionsInstancesService(s *APIService) *AppsServicesVersionsInstancesService { + rs := &AppsServicesVersionsInstancesService{s: s} + return rs +} + +type AppsServicesVersionsInstancesService struct { + s *APIService +} + +// ApiConfigHandler: Google Cloud Endpoints +// (https://cloud.google.com/appengine/docs/python/endpoints/) +// configuration for API handlers. +type ApiConfigHandler struct { + // AuthFailAction: Action to take when users access resources that + // require authentication. Defaults to redirect. + // + // Possible values: + // "AUTH_FAIL_ACTION_UNSPECIFIED" - Not specified. + // AUTH_FAIL_ACTION_REDIRECT is assumed. + // "AUTH_FAIL_ACTION_REDIRECT" - Redirects user to + // "accounts.google.com". The user is redirected back to the application + // URL after signing in or creating an account. + // "AUTH_FAIL_ACTION_UNAUTHORIZED" - Rejects request with a 401 HTTP + // status code and an error message. + AuthFailAction string `json:"authFailAction,omitempty"` + + // Login: Level of login required to access this resource. Defaults to + // optional. + // + // Possible values: + // "LOGIN_UNSPECIFIED" - Not specified. LOGIN_OPTIONAL is assumed. + // "LOGIN_OPTIONAL" - Does not require that the user is signed in. + // "LOGIN_ADMIN" - If the user is not signed in, the auth_fail_action + // is taken. In addition, if the user is not an administrator for the + // application, they are given an error message regardless of + // auth_fail_action. If the user is an administrator, the handler + // proceeds. + // "LOGIN_REQUIRED" - If the user has signed in, the handler proceeds + // normally. Otherwise, the auth_fail_action is taken. + Login string `json:"login,omitempty"` + + // Script: Path to the script from the application root directory. + Script string `json:"script,omitempty"` + + // SecurityLevel: Security (HTTPS) enforcement for this URL. + // + // Possible values: + // "SECURE_UNSPECIFIED" - Not specified. + // "SECURE_DEFAULT" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used, and respond + // accordingly. + // "SECURE_NEVER" - Requests for a URL that match this handler that + // use HTTPS are automatically redirected to the HTTP equivalent URL. + // "SECURE_OPTIONAL" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used and respond + // accordingly. + // "SECURE_ALWAYS" - Requests for a URL that match this handler that + // do not use HTTPS are automatically redirected to the HTTPS URL with + // the same path. Query parameters are reserved for the redirect. + SecurityLevel string `json:"securityLevel,omitempty"` + + // Url: URL to serve the endpoint at. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuthFailAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuthFailAction") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ApiConfigHandler) MarshalJSON() ([]byte, error) { + type noMethod ApiConfigHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ApiEndpointHandler: Uses Google Cloud Endpoints to handle requests. +type ApiEndpointHandler struct { + // ScriptPath: Path to the script from the application root directory. + ScriptPath string `json:"scriptPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ScriptPath") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ScriptPath") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ApiEndpointHandler) MarshalJSON() ([]byte, error) { + type noMethod ApiEndpointHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Application: An Application resource contains the top-level +// configuration of an App Engine application. +type Application struct { + // AuthDomain: Google Apps authentication domain that controls which + // users can access this application.Defaults to open access for any + // Google Account. + AuthDomain string `json:"authDomain,omitempty"` + + // CodeBucket: Google Cloud Storage bucket that can be used for storing + // files associated with this application. This bucket is associated + // with the application and can be used by the gcloud deployment + // commands.@OutputOnly + CodeBucket string `json:"codeBucket,omitempty"` + + // DefaultBucket: Google Cloud Storage bucket that can be used by this + // application to store content.@OutputOnly + DefaultBucket string `json:"defaultBucket,omitempty"` + + // DefaultCookieExpiration: Cookie expiration policy for this + // application. + DefaultCookieExpiration string `json:"defaultCookieExpiration,omitempty"` + + // DefaultHostname: Hostname used to reach this application, as resolved + // by App Engine.@OutputOnly + DefaultHostname string `json:"defaultHostname,omitempty"` + + // DispatchRules: HTTP path dispatch rules for requests to the + // application that do not explicitly target a service or version. Rules + // are order-dependent.@OutputOnly + DispatchRules []*UrlDispatchRule `json:"dispatchRules,omitempty"` + + Iap *IdentityAwareProxy `json:"iap,omitempty"` + + // Id: Identifier of the Application resource. This identifier is + // equivalent to the project ID of the Google Cloud Platform project + // where you want to deploy your application. Example: myapp. + Id string `json:"id,omitempty"` + + // LocationId: Location from which this application will be run. + // Application instances will run out of data centers in the chosen + // location, which is also where all of the application's end user + // content is stored.Defaults to us-central.Options are:us-central - + // Central USeurope-west - Western Europeus-east1 - Eastern US + LocationId string `json:"locationId,omitempty"` + + // Name: Full path to the Application resource in the API. Example: + // apps/myapp.@OutputOnly + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuthDomain") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuthDomain") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Application) MarshalJSON() ([]byte, error) { + type noMethod Application + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AutomaticScaling: Automatic scaling is based on request rate, +// response latencies, and other application metrics. +type AutomaticScaling struct { + // CoolDownPeriod: Amount of time that the Autoscaler + // (https://cloud.google.com/compute/docs/autoscaler/) should wait + // between changes to the number of virtual machines. Only applicable + // for VM runtimes. + CoolDownPeriod string `json:"coolDownPeriod,omitempty"` + + // CpuUtilization: Target scaling by CPU usage. + CpuUtilization *CpuUtilization `json:"cpuUtilization,omitempty"` + + // DiskUtilization: Target scaling by disk usage. + DiskUtilization *DiskUtilization `json:"diskUtilization,omitempty"` + + // MaxConcurrentRequests: Number of concurrent requests an automatic + // scaling instance can accept before the scheduler spawns a new + // instance.Defaults to a runtime-specific value. + MaxConcurrentRequests int64 `json:"maxConcurrentRequests,omitempty"` + + // MaxIdleInstances: Maximum number of idle instances that should be + // maintained for this version. + MaxIdleInstances int64 `json:"maxIdleInstances,omitempty"` + + // MaxPendingLatency: Maximum amount of time that a request should wait + // in the pending queue before starting a new instance to handle it. + MaxPendingLatency string `json:"maxPendingLatency,omitempty"` + + // MaxTotalInstances: Maximum number of instances that should be started + // to handle requests. + MaxTotalInstances int64 `json:"maxTotalInstances,omitempty"` + + // MinIdleInstances: Minimum number of idle instances that should be + // maintained for this version. Only applicable for the default version + // of a service. + MinIdleInstances int64 `json:"minIdleInstances,omitempty"` + + // MinPendingLatency: Minimum amount of time a request should wait in + // the pending queue before starting a new instance to handle it. + MinPendingLatency string `json:"minPendingLatency,omitempty"` + + // MinTotalInstances: Minimum number of instances that should be + // maintained for this version. + MinTotalInstances int64 `json:"minTotalInstances,omitempty"` + + // NetworkUtilization: Target scaling by network usage. + NetworkUtilization *NetworkUtilization `json:"networkUtilization,omitempty"` + + // RequestUtilization: Target scaling by request utilization. + RequestUtilization *RequestUtilization `json:"requestUtilization,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CoolDownPeriod") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CoolDownPeriod") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AutomaticScaling) MarshalJSON() ([]byte, error) { + type noMethod AutomaticScaling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BasicScaling: A service with basic scaling will create an instance +// when the application receives a request. The instance will be turned +// down when the app becomes idle. Basic scaling is ideal for work that +// is intermittent or driven by user activity. +type BasicScaling struct { + // IdleTimeout: Duration of time after the last request that an instance + // must wait before the instance is shut down. + IdleTimeout string `json:"idleTimeout,omitempty"` + + // MaxInstances: Maximum number of instances to create for this version. + MaxInstances int64 `json:"maxInstances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IdleTimeout") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IdleTimeout") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BasicScaling) MarshalJSON() ([]byte, error) { + type noMethod BasicScaling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ContainerInfo: Docker image that is used to create a container and +// start a VM instance for the version that you deploy. Only applicable +// for instances running in the App Engine flexible environment. +type ContainerInfo struct { + // Image: URI to the hosted container image in Google Container + // Registry. The URI must be fully qualified and include a tag or + // digest. Examples: "gcr.io/my-project/image:tag" or + // "gcr.io/my-project/image@digest" + Image string `json:"image,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Image") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Image") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ContainerInfo) MarshalJSON() ([]byte, error) { + type noMethod ContainerInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CpuUtilization: Target scaling by CPU usage. +type CpuUtilization struct { + // AggregationWindowLength: Period of time over which CPU utilization is + // calculated. + AggregationWindowLength string `json:"aggregationWindowLength,omitempty"` + + // TargetUtilization: Target CPU utilization ratio to maintain when + // scaling. Must be between 0 and 1. + TargetUtilization float64 `json:"targetUtilization,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AggregationWindowLength") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AggregationWindowLength") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CpuUtilization) MarshalJSON() ([]byte, error) { + type noMethod CpuUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *CpuUtilization) UnmarshalJSON(data []byte) error { + type noMethod CpuUtilization + var s1 struct { + TargetUtilization gensupport.JSONFloat64 `json:"targetUtilization"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.TargetUtilization = float64(s1.TargetUtilization) + return nil +} + +// DebugInstanceRequest: Request message for Instances.DebugInstance. +type DebugInstanceRequest struct { + // SshKey: Public SSH key to add to the instance. + // Examples: + // [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] + // [USERNAME]:ssh-rsa [KEY_VALUE] google-ssh + // {"userName":"[USERNAME]","expireOn":"[EXPIRE_TIME]"}For more + // information, see Adding and Removing SSH Keys + // (https://cloud.google.com/compute/docs/instances/adding-removing-ssh-k + // eys). + SshKey string `json:"sshKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SshKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SshKey") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DebugInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod DebugInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Deployment: Code and application artifacts used to deploy a version +// to App Engine. +type Deployment struct { + // Container: The Docker image for the container that runs the version. + // Only applicable for instances running in the App Engine flexible + // environment. + Container *ContainerInfo `json:"container,omitempty"` + + // Files: Manifest of the files stored in Google Cloud Storage that are + // included as part of this version. All files must be readable using + // the credentials supplied with this call. + Files map[string]FileInfo `json:"files,omitempty"` + + // Zip: The zip file for this deployment, if this is a zip deployment. + Zip *ZipInfo `json:"zip,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Container") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Container") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Deployment) MarshalJSON() ([]byte, error) { + type noMethod Deployment + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskUtilization: Target scaling by disk usage. Only applicable for VM +// runtimes. +type DiskUtilization struct { + // TargetReadBytesPerSecond: Target bytes read per second. + TargetReadBytesPerSecond int64 `json:"targetReadBytesPerSecond,omitempty"` + + // TargetReadOpsPerSecond: Target ops read per seconds. + TargetReadOpsPerSecond int64 `json:"targetReadOpsPerSecond,omitempty"` + + // TargetWriteBytesPerSecond: Target bytes written per second. + TargetWriteBytesPerSecond int64 `json:"targetWriteBytesPerSecond,omitempty"` + + // TargetWriteOpsPerSecond: Target ops written per second. + TargetWriteOpsPerSecond int64 `json:"targetWriteOpsPerSecond,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "TargetReadBytesPerSecond") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetReadBytesPerSecond") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskUtilization) MarshalJSON() ([]byte, error) { + type noMethod DiskUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// EndpointsApiService: Cloud Endpoints +// (https://cloud.google.com/endpoints) configuration. The Endpoints API +// Service provides tooling for serving Open API and gRPC endpoints via +// an NGINX proxy.The fields here refer to the name and configuration id +// of a "service" resource in the Service Management API +// (https://cloud.google.com/service-management/overview). +type EndpointsApiService struct { + // ConfigId: Endpoints service configuration id as specified by the + // Service Management API. For example "2016-09-19r1" + ConfigId string `json:"configId,omitempty"` + + // Name: Endpoints service name which is the name of the "service" + // resource in the Service Management API. For example + // "myapi.endpoints.myproject.cloud.goog" + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConfigId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConfigId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *EndpointsApiService) MarshalJSON() ([]byte, error) { + type noMethod EndpointsApiService + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ErrorHandler: Custom static error page to be served when an error +// occurs. +type ErrorHandler struct { + // ErrorCode: Error condition this handler applies to. + // + // Possible values: + // "ERROR_CODE_UNSPECIFIED" - Not specified. ERROR_CODE_DEFAULT is + // assumed. + // "ERROR_CODE_DEFAULT" - All other error types. + // "ERROR_CODE_OVER_QUOTA" - Application has exceeded a resource + // quota. + // "ERROR_CODE_DOS_API_DENIAL" - Client blocked by the application's + // Denial of Service protection configuration. + // "ERROR_CODE_TIMEOUT" - Deadline reached before the application + // responds. + ErrorCode string `json:"errorCode,omitempty"` + + // MimeType: MIME type of file. Defaults to text/html. + MimeType string `json:"mimeType,omitempty"` + + // StaticFile: Static file content to be served for this error. + StaticFile string `json:"staticFile,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorCode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorCode") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ErrorHandler) MarshalJSON() ([]byte, error) { + type noMethod ErrorHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FileInfo: Single source file that is part of the version to be +// deployed. Each source file that is deployed must be specified +// separately. +type FileInfo struct { + // MimeType: The MIME type of the file.Defaults to the value from Google + // Cloud Storage. + MimeType string `json:"mimeType,omitempty"` + + // Sha1Sum: The SHA1 hash of the file, in hex. + Sha1Sum string `json:"sha1Sum,omitempty"` + + // SourceUrl: URL source to use to fetch this file. Must be a URL to a + // resource in Google Cloud Storage in the form + // 'http(s)://storage.googleapis.com//'. + SourceUrl string `json:"sourceUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MimeType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MimeType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FileInfo) MarshalJSON() ([]byte, error) { + type noMethod FileInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthCheck: Health checking configuration for VM instances. +// Unhealthy instances are killed and replaced with new instances. Only +// applicable for instances in App Engine flexible environment. +type HealthCheck struct { + // CheckInterval: Interval between health checks. + CheckInterval string `json:"checkInterval,omitempty"` + + // DisableHealthCheck: Whether to explicitly disable health checks for + // this instance. + DisableHealthCheck bool `json:"disableHealthCheck,omitempty"` + + // HealthyThreshold: Number of consecutive successful health checks + // required before receiving traffic. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: Host header to send when performing an HTTP health check. + // Example: "myapp.appspot.com" + Host string `json:"host,omitempty"` + + // RestartThreshold: Number of consecutive failed health checks required + // before an instance is restarted. + RestartThreshold int64 `json:"restartThreshold,omitempty"` + + // Timeout: Time before the health check is considered failed. + Timeout string `json:"timeout,omitempty"` + + // UnhealthyThreshold: Number of consecutive failed health checks + // required before removing traffic. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CheckInterval") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckInterval") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// IdentityAwareProxy: Identity-Aware Proxy +type IdentityAwareProxy struct { + // Enabled: Whether the serving infrastructure will authenticate and + // authorize all incoming requests.If true, the oauth2_client_id and + // oauth2_client_secret fields must be non-empty. + Enabled bool `json:"enabled,omitempty"` + + // Oauth2ClientId: OAuth2 client ID to use for the authentication flow. + Oauth2ClientId string `json:"oauth2ClientId,omitempty"` + + // Oauth2ClientSecret: OAuth2 client secret to use for the + // authentication flow.For security reasons, this value cannot be + // retrieved via the API. Instead, the SHA-256 hash of the value is + // returned in the oauth2_client_secret_sha256 field.@InputOnly + Oauth2ClientSecret string `json:"oauth2ClientSecret,omitempty"` + + // Oauth2ClientSecretSha256: Hex-encoded SHA-256 hash of the client + // secret.@OutputOnly + Oauth2ClientSecretSha256 string `json:"oauth2ClientSecretSha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *IdentityAwareProxy) MarshalJSON() ([]byte, error) { + type noMethod IdentityAwareProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Instance: An Instance resource is the computing unit that App Engine +// uses to automatically scale an application. +type Instance struct { + // AppEngineRelease: App Engine release this instance is running + // on.@OutputOnly + AppEngineRelease string `json:"appEngineRelease,omitempty"` + + // Availability: Availability of the instance.@OutputOnly + // + // Possible values: + // "UNSPECIFIED" + // "RESIDENT" + // "DYNAMIC" + Availability string `json:"availability,omitempty"` + + // AverageLatency: Average latency (ms) over the last minute.@OutputOnly + AverageLatency int64 `json:"averageLatency,omitempty"` + + // Errors: Number of errors since this instance was started.@OutputOnly + Errors int64 `json:"errors,omitempty"` + + // Id: Relative name of the instance within the version. Example: + // instance-1.@OutputOnly + Id string `json:"id,omitempty"` + + // MemoryUsage: Total memory in use (bytes).@OutputOnly + MemoryUsage int64 `json:"memoryUsage,omitempty,string"` + + // Name: Full path to the Instance resource in the API. Example: + // apps/myapp/services/default/versions/v1/instances/instance-1.@OutputOn + // ly + Name string `json:"name,omitempty"` + + // Qps: Average queries per second (QPS) over the last + // minute.@OutputOnly + Qps float64 `json:"qps,omitempty"` + + // Requests: Number of requests since this instance was + // started.@OutputOnly + Requests int64 `json:"requests,omitempty"` + + // StartTime: Time that this instance was started.@OutputOnly + StartTime string `json:"startTime,omitempty"` + + // VmDebugEnabled: Whether this instance is in debug mode. Only + // applicable for instances in App Engine flexible + // environment.@OutputOnly + VmDebugEnabled bool `json:"vmDebugEnabled,omitempty"` + + // VmId: Virtual machine ID of this instance. Only applicable for + // instances in App Engine flexible environment.@OutputOnly + VmId string `json:"vmId,omitempty"` + + // VmIp: The IP address of this instance. Only applicable for instances + // in App Engine flexible environment.@OutputOnly + VmIp string `json:"vmIp,omitempty"` + + // VmName: Name of the virtual machine where this instance lives. Only + // applicable for instances in App Engine flexible + // environment.@OutputOnly + VmName string `json:"vmName,omitempty"` + + // VmStatus: Status of the virtual machine where this instance lives. + // Only applicable for instances in App Engine flexible + // environment.@OutputOnly + VmStatus string `json:"vmStatus,omitempty"` + + // VmZoneName: Zone where the virtual machine is located. Only + // applicable for instances in App Engine flexible + // environment.@OutputOnly + VmZoneName string `json:"vmZoneName,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AppEngineRelease") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AppEngineRelease") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Instance) MarshalJSON() ([]byte, error) { + type noMethod Instance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Instance) UnmarshalJSON(data []byte) error { + type noMethod Instance + var s1 struct { + Qps gensupport.JSONFloat64 `json:"qps"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Qps = float64(s1.Qps) + return nil +} + +// Library: Third-party Python runtime library that is required by the +// application. +type Library struct { + // Name: Name of the library. Example: "django". + Name string `json:"name,omitempty"` + + // Version: Version of the library to select, or "latest". + Version string `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Library) MarshalJSON() ([]byte, error) { + type noMethod Library + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListInstancesResponse: Response message for Instances.ListInstances. +type ListInstancesResponse struct { + // Instances: The instances belonging to the requested version. + Instances []*Instance `json:"instances,omitempty"` + + // NextPageToken: Continuation token for fetching the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListInstancesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListLocationsResponse: The response message for +// Locations.ListLocations. +type ListLocationsResponse struct { + // Locations: A list of locations that matches the specified filter in + // the request. + Locations []*Location `json:"locations,omitempty"` + + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Locations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListLocationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListOperationsResponse: The response message for +// Operations.ListOperations. +type ListOperationsResponse struct { + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Operations: A list of operations that matches the specified filter in + // the request. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListOperationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListServicesResponse: Response message for Services.ListServices. +type ListServicesResponse struct { + // NextPageToken: Continuation token for fetching the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Services: The services belonging to the requested application. + Services []*Service `json:"services,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListServicesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListVersionsResponse: Response message for Versions.ListVersions. +type ListVersionsResponse struct { + // NextPageToken: Continuation token for fetching the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Versions: The versions belonging to the requested service. + Versions []*Version `json:"versions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListVersionsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListVersionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LivenessCheck: Health checking configuration for VM instances. +// Unhealthy instances are killed and replaced with new instances. +type LivenessCheck struct { + // CheckInterval: Interval between health checks. + CheckInterval string `json:"checkInterval,omitempty"` + + // HealthyThreshold: Number of consecutive successful checks required + // before considering the VM healthy. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: Host header to send when performing a HTTP Liveness check. + // Example: "myapp.appspot.com" + Host string `json:"host,omitempty"` + + // InitialDelay: The initial delay before starting to execute the + // checks. + InitialDelay string `json:"initialDelay,omitempty"` + + // Path: The request path. + Path string `json:"path,omitempty"` + + // Timeout: Time before the check is considered failed. + Timeout string `json:"timeout,omitempty"` + + // UnhealthyThreshold: Number of consecutive failed checks required + // before considering the VM unhealthy. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CheckInterval") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckInterval") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LivenessCheck) MarshalJSON() ([]byte, error) { + type noMethod LivenessCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Location: A resource that represents Google Cloud Platform location. +type Location struct { + // Labels: Cross-service attributes for the location. For + // example + // {"cloud.googleapis.com/region": "us-east1"} + // + Labels map[string]string `json:"labels,omitempty"` + + // LocationId: The canonical id for this location. For example: + // "us-east1". + LocationId string `json:"locationId,omitempty"` + + // Metadata: Service-specific metadata. For example the available + // capacity at the given location. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: Resource name for the location, which may vary between + // implementations. For example: + // "projects/example-project/locations/us-east1" + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Labels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Labels") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Location) MarshalJSON() ([]byte, error) { + type noMethod Location + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LocationMetadata: Metadata for the given +// google.cloud.location.Location. +type LocationMetadata struct { + // FlexibleEnvironmentAvailable: App Engine Flexible Environment is + // available in the given location.@OutputOnly + FlexibleEnvironmentAvailable bool `json:"flexibleEnvironmentAvailable,omitempty"` + + // StandardEnvironmentAvailable: App Engine Standard Environment is + // available in the given location.@OutputOnly + StandardEnvironmentAvailable bool `json:"standardEnvironmentAvailable,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "FlexibleEnvironmentAvailable") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "FlexibleEnvironmentAvailable") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocationMetadata) MarshalJSON() ([]byte, error) { + type noMethod LocationMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ManualScaling: A service with manual scaling runs continuously, +// allowing you to perform complex initialization and rely on the state +// of its memory over time. +type ManualScaling struct { + // Instances: Number of instances to assign to the service at the start. + // This number can later be altered by using the Modules API + // (https://cloud.google.com/appengine/docs/python/modules/functions) + // set_num_instances() function. + Instances int64 `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManualScaling) MarshalJSON() ([]byte, error) { + type noMethod ManualScaling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Network: Extra network settings. Only applicable for VM runtimes. +type Network struct { + // ForwardedPorts: List of ports, or port pairs, to forward from the + // virtual machine to the application container. + ForwardedPorts []string `json:"forwardedPorts,omitempty"` + + // InstanceTag: Tag to apply to the VM instance during creation. + InstanceTag string `json:"instanceTag,omitempty"` + + // Name: Google Cloud Platform network where the virtual machines are + // created. Specify the short name, not the resource path.Defaults to + // default. + Name string `json:"name,omitempty"` + + // SubnetworkName: Google Cloud Platform sub-network where the virtual + // machines are created. Specify the short name, not the resource + // path.If a subnetwork name is specified, a network name will also be + // required unless it is for the default network. + // If the network the VM instance is being created in is a Legacy + // network, then the IP address is allocated from the IPv4Range. + // If the network the VM instance is being created in is an auto Subnet + // Mode Network, then only network name should be specified (not the + // subnetwork_name) and the IP address is created from the IPCidrRange + // of the subnetwork that exists in that zone for that network. + // If the network the VM instance is being created in is a custom Subnet + // Mode Network, then the subnetwork_name must be specified and the IP + // address is created from the IPCidrRange of the subnetwork.If + // specified, the subnetwork must exist in the same region as the Flex + // app. + SubnetworkName string `json:"subnetworkName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForwardedPorts") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ForwardedPorts") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Network) MarshalJSON() ([]byte, error) { + type noMethod Network + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkUtilization: Target scaling by network usage. Only applicable +// for VM runtimes. +type NetworkUtilization struct { + // TargetReceivedBytesPerSecond: Target bytes received per second. + TargetReceivedBytesPerSecond int64 `json:"targetReceivedBytesPerSecond,omitempty"` + + // TargetReceivedPacketsPerSecond: Target packets received per second. + TargetReceivedPacketsPerSecond int64 `json:"targetReceivedPacketsPerSecond,omitempty"` + + // TargetSentBytesPerSecond: Target bytes sent per second. + TargetSentBytesPerSecond int64 `json:"targetSentBytesPerSecond,omitempty"` + + // TargetSentPacketsPerSecond: Target packets sent per second. + TargetSentPacketsPerSecond int64 `json:"targetSentPacketsPerSecond,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "TargetReceivedBytesPerSecond") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "TargetReceivedBytesPerSecond") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkUtilization) MarshalJSON() ([]byte, error) { + type noMethod NetworkUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Operation: This resource represents a long-running operation that is +// the result of a network API call. +type Operation struct { + // Done: If the value is false, it means the operation is still in + // progress. If true, the operation is completed, and either error or + // response is available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *Status `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that originally returns it. If you use the default HTTP + // mapping, the name should have the format of + // operations/some/unique/name. + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as Delete, the + // response is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other + // methods, the response should have the type XxxResponse, where Xxx is + // the original method name. For example, if the original method name is + // TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadata: Metadata for the given +// google.longrunning.Operation. +type OperationMetadata struct { + // EndTime: Timestamp that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // InsertTime: Timestamp that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1beta4.Version.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // OperationType: Type of this operation. Deprecated, use method field + // instead. Example: "create_version".@OutputOnly + OperationType string `json:"operationType,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/modules/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadata) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataExperimental: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataExperimental struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.experimental.CustomDomains.CreateCustomDomain.@Output + // Only + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/customDomains/example.com.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataExperimental) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataExperimental + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1 struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // EphemeralMessage: Ephemeral message that may change every time the + // operation is polled. @OutputOnly + EphemeralMessage string `json:"ephemeralMessage,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1.Versions.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // Warning: Durable messages that persist on every operation poll. + // @OutputOnly + Warning []string `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1 + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1Alpha: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1Alpha struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // EphemeralMessage: Ephemeral message that may change every time the + // operation is polled. @OutputOnly + EphemeralMessage string `json:"ephemeralMessage,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1alpha.Versions.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // Warning: Durable messages that persist on every operation poll. + // @OutputOnly + Warning []string `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1Alpha) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1Alpha + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1Beta: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1Beta struct { + // EndTime: Time that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // EphemeralMessage: Ephemeral message that may change every time the + // operation is polled. @OutputOnly + EphemeralMessage string `json:"ephemeralMessage,omitempty"` + + // InsertTime: Time that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method that initiated this operation. Example: + // google.appengine.v1beta.Versions.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // Warning: Durable messages that persist on every operation poll. + // @OutputOnly + Warning []string `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1Beta) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1Beta + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadataV1Beta5: Metadata for the given +// google.longrunning.Operation. +type OperationMetadataV1Beta5 struct { + // EndTime: Timestamp that this operation completed.@OutputOnly + EndTime string `json:"endTime,omitempty"` + + // InsertTime: Timestamp that this operation was created.@OutputOnly + InsertTime string `json:"insertTime,omitempty"` + + // Method: API method name that initiated this operation. Example: + // google.appengine.v1beta5.Version.CreateVersion.@OutputOnly + Method string `json:"method,omitempty"` + + // Target: Name of the resource that this operation is acting on. + // Example: apps/myapp/services/default.@OutputOnly + Target string `json:"target,omitempty"` + + // User: User who requested this operation.@OutputOnly + User string `json:"user,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadataV1Beta5) MarshalJSON() ([]byte, error) { + type noMethod OperationMetadataV1Beta5 + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReadinessCheck: Readiness checking configuration for VM instances. +// Unhealthy instances are removed from traffic rotation. +type ReadinessCheck struct { + // CheckInterval: Interval between health checks. + CheckInterval string `json:"checkInterval,omitempty"` + + // HealthyThreshold: Number of consecutive successful checks required + // before receiving traffic. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: Host header to send when performing a HTTP Readiness check. + // Example: "myapp.appspot.com" + Host string `json:"host,omitempty"` + + // Path: The request path. + Path string `json:"path,omitempty"` + + // Timeout: Time before the check is considered failed. + Timeout string `json:"timeout,omitempty"` + + // UnhealthyThreshold: Number of consecutive failed checks required + // before removing traffic. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CheckInterval") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckInterval") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReadinessCheck) MarshalJSON() ([]byte, error) { + type noMethod ReadinessCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RepairApplicationRequest: Request message for +// 'Applications.RepairApplication'. +type RepairApplicationRequest struct { +} + +// RequestUtilization: Target scaling by request utilization. Only +// applicable for VM runtimes. +type RequestUtilization struct { + // TargetConcurrentRequests: Target number of concurrent requests. + TargetConcurrentRequests int64 `json:"targetConcurrentRequests,omitempty"` + + // TargetRequestCountPerSecond: Target requests per second. + TargetRequestCountPerSecond int64 `json:"targetRequestCountPerSecond,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "TargetConcurrentRequests") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetConcurrentRequests") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RequestUtilization) MarshalJSON() ([]byte, error) { + type noMethod RequestUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Resources: Machine resources for a version. +type Resources struct { + // Cpu: Number of CPU cores needed. + Cpu float64 `json:"cpu,omitempty"` + + // DiskGb: Disk size (GB) needed. + DiskGb float64 `json:"diskGb,omitempty"` + + // MemoryGb: Memory (GB) needed. + MemoryGb float64 `json:"memoryGb,omitempty"` + + // Volumes: User specified volumes. + Volumes []*Volume `json:"volumes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Cpu") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Cpu") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Resources) MarshalJSON() ([]byte, error) { + type noMethod Resources + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Resources) UnmarshalJSON(data []byte) error { + type noMethod Resources + var s1 struct { + Cpu gensupport.JSONFloat64 `json:"cpu"` + DiskGb gensupport.JSONFloat64 `json:"diskGb"` + MemoryGb gensupport.JSONFloat64 `json:"memoryGb"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Cpu = float64(s1.Cpu) + s.DiskGb = float64(s1.DiskGb) + s.MemoryGb = float64(s1.MemoryGb) + return nil +} + +// ScriptHandler: Executes a script to handle the request that matches +// the URL pattern. +type ScriptHandler struct { + // ScriptPath: Path to the script from the application root directory. + ScriptPath string `json:"scriptPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ScriptPath") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ScriptPath") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ScriptHandler) MarshalJSON() ([]byte, error) { + type noMethod ScriptHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Service: A Service resource is a logical component of an application +// that can share state and communicate in a secure fashion with other +// services. For example, an application that handles customer requests +// might include separate services to handle tasks such as backend data +// analysis or API requests from mobile devices. Each service has a +// collection of versions that define a specific set of code used to +// implement the functionality of that service. +type Service struct { + // Id: Relative name of the service within the application. Example: + // default.@OutputOnly + Id string `json:"id,omitempty"` + + // Name: Full path to the Service resource in the API. Example: + // apps/myapp/services/default.@OutputOnly + Name string `json:"name,omitempty"` + + // Split: Mapping that defines fractional HTTP traffic diversion to + // different versions within the service. + Split *TrafficSplit `json:"split,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Service) MarshalJSON() ([]byte, error) { + type noMethod Service + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StaticFilesHandler: Files served directly to the user for a given +// URL, such as images, CSS stylesheets, or JavaScript source files. +// Static file handlers describe which files in the application +// directory are static files, and which URLs serve them. +type StaticFilesHandler struct { + // ApplicationReadable: Whether files should also be uploaded as code + // data. By default, files declared in static file handlers are uploaded + // as static data and are only served to end users; they cannot be read + // by the application. If enabled, uploads are charged against both your + // code and static data storage resource quotas. + ApplicationReadable bool `json:"applicationReadable,omitempty"` + + // Expiration: Time a static file served by this handler should be + // cached by web proxies and browsers. + Expiration string `json:"expiration,omitempty"` + + // HttpHeaders: HTTP headers to use for all responses from these URLs. + HttpHeaders map[string]string `json:"httpHeaders,omitempty"` + + // MimeType: MIME type used to serve all files served by this + // handler.Defaults to file-specific MIME types, which are derived from + // each file's filename extension. + MimeType string `json:"mimeType,omitempty"` + + // Path: Path to the static files matched by the URL pattern, from the + // application root directory. The path can refer to text matched in + // groupings in the URL pattern. + Path string `json:"path,omitempty"` + + // RequireMatchingFile: Whether this handler should match the request if + // the file referenced by the handler does not exist. + RequireMatchingFile bool `json:"requireMatchingFile,omitempty"` + + // UploadPathRegex: Regular expression that matches the file paths for + // all files that should be referenced by this handler. + UploadPathRegex string `json:"uploadPathRegex,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ApplicationReadable") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApplicationReadable") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *StaticFilesHandler) MarshalJSON() ([]byte, error) { + type noMethod StaticFilesHandler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The Status type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by gRPC (https://github.com/grpc). The error +// model is designed to be: +// Simple to use and understand for most users +// Flexible enough to meet unexpected needsOverviewThe Status message +// contains three pieces of data: error code, error message, and error +// details. The error code should be an enum value of google.rpc.Code, +// but it may accept additional error codes if needed. The error message +// should be a developer-facing English message that helps developers +// understand and resolve the error. If a localized user-facing error +// message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of +// error detail types in the package google.rpc which can be used for +// common error conditions.Language mappingThe Status message is the +// logical representation of the error model, but it is not necessarily +// the actual wire format. When the Status message is exposed in +// different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions in Java, but more likely mapped to some error codes in +// C.Other usesThe error model and the Status message can be used in a +// variety of environments, either with or without APIs, to provide a +// consistent developer experience across different environments.Example +// uses of this error model include: +// Partial errors. If a service needs to return partial errors to the +// client, it may embed the Status in the normal response to indicate +// the partial errors. +// Workflow errors. A typical workflow has multiple steps. Each step may +// have a Status message for error reporting purpose. +// Batch operations. If a client uses batch request and batch response, +// the Status message should be used directly inside batch response, one +// for each error sub-response. +// Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the Status message. +// Logging. If some API errors are stored in logs, the message Status +// could be used directly after any stripping needed for +// security/privacy reasons. +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There will + // be a common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type noMethod Status + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TrafficSplit: Traffic routing configuration for versions within a +// single service. Traffic splits define how traffic directed to the +// service is assigned to versions. +type TrafficSplit struct { + // Allocations: Mapping from version IDs within the service to + // fractional (0.000, 1] allocations of traffic for that version. Each + // version can be specified only once, but some versions in the service + // may not have any traffic allocation. Services that have traffic + // allocated cannot be deleted until either the service is deleted or + // their traffic allocation is removed. Allocations must sum to 1. Up to + // two decimal place precision is supported for IP-based splits and up + // to three decimal places is supported for cookie-based splits. + Allocations map[string]float64 `json:"allocations,omitempty"` + + // ShardBy: Mechanism used to determine which version a request is sent + // to. The traffic selection algorithm will be stable for either type + // until allocations are changed. + // + // Possible values: + // "UNSPECIFIED" - Diversion method unspecified. + // "COOKIE" - Diversion based on a specially named cookie, + // "GOOGAPPUID." The cookie must be set by the application itself or no + // diversion will occur. + // "IP" - Diversion based on applying the modulus operation to a + // fingerprint of the IP address. + ShardBy string `json:"shardBy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Allocations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Allocations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TrafficSplit) MarshalJSON() ([]byte, error) { + type noMethod TrafficSplit + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlDispatchRule: Rules to match an HTTP request and dispatch that +// request to a service. +type UrlDispatchRule struct { + // Domain: Domain name to match against. The wildcard "*" is supported + // if specified before a period: "*.".Defaults to matching all domains: + // "*". + Domain string `json:"domain,omitempty"` + + // Path: Pathname within the host. Must start with a "/". A single "*" + // can be included at the end of the path. The sum of the lengths of the + // domain and path may not exceed 100 characters. + Path string `json:"path,omitempty"` + + // Service: Resource ID of a service in this application that should + // serve the matched request. The service must already exist. Example: + // default. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Domain") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Domain") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlDispatchRule) MarshalJSON() ([]byte, error) { + type noMethod UrlDispatchRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMap: URL pattern and description of how the URL should be handled. +// App Engine can handle URLs by executing application code or by +// serving static files uploaded with the version, such as images, CSS, +// or JavaScript. +type UrlMap struct { + // ApiEndpoint: Uses API Endpoints to handle requests. + ApiEndpoint *ApiEndpointHandler `json:"apiEndpoint,omitempty"` + + // AuthFailAction: Action to take when users access resources that + // require authentication. Defaults to redirect. + // + // Possible values: + // "AUTH_FAIL_ACTION_UNSPECIFIED" - Not specified. + // AUTH_FAIL_ACTION_REDIRECT is assumed. + // "AUTH_FAIL_ACTION_REDIRECT" - Redirects user to + // "accounts.google.com". The user is redirected back to the application + // URL after signing in or creating an account. + // "AUTH_FAIL_ACTION_UNAUTHORIZED" - Rejects request with a 401 HTTP + // status code and an error message. + AuthFailAction string `json:"authFailAction,omitempty"` + + // Login: Level of login required to access this resource. + // + // Possible values: + // "LOGIN_UNSPECIFIED" - Not specified. LOGIN_OPTIONAL is assumed. + // "LOGIN_OPTIONAL" - Does not require that the user is signed in. + // "LOGIN_ADMIN" - If the user is not signed in, the auth_fail_action + // is taken. In addition, if the user is not an administrator for the + // application, they are given an error message regardless of + // auth_fail_action. If the user is an administrator, the handler + // proceeds. + // "LOGIN_REQUIRED" - If the user has signed in, the handler proceeds + // normally. Otherwise, the auth_fail_action is taken. + Login string `json:"login,omitempty"` + + // RedirectHttpResponseCode: 30x code to use when performing redirects + // for the secure field. Defaults to 302. + // + // Possible values: + // "REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED" - Not specified. 302 is + // assumed. + // "REDIRECT_HTTP_RESPONSE_CODE_301" - 301 Moved Permanently code. + // "REDIRECT_HTTP_RESPONSE_CODE_302" - 302 Moved Temporarily code. + // "REDIRECT_HTTP_RESPONSE_CODE_303" - 303 See Other code. + // "REDIRECT_HTTP_RESPONSE_CODE_307" - 307 Temporary Redirect code. + RedirectHttpResponseCode string `json:"redirectHttpResponseCode,omitempty"` + + // Script: Executes a script to handle the request that matches this URL + // pattern. + Script *ScriptHandler `json:"script,omitempty"` + + // SecurityLevel: Security (HTTPS) enforcement for this URL. + // + // Possible values: + // "SECURE_UNSPECIFIED" - Not specified. + // "SECURE_DEFAULT" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used, and respond + // accordingly. + // "SECURE_NEVER" - Requests for a URL that match this handler that + // use HTTPS are automatically redirected to the HTTP equivalent URL. + // "SECURE_OPTIONAL" - Both HTTP and HTTPS requests with URLs that + // match the handler succeed without redirects. The application can + // examine the request to determine which protocol was used and respond + // accordingly. + // "SECURE_ALWAYS" - Requests for a URL that match this handler that + // do not use HTTPS are automatically redirected to the HTTPS URL with + // the same path. Query parameters are reserved for the redirect. + SecurityLevel string `json:"securityLevel,omitempty"` + + // StaticFiles: Returns the contents of a file, such as an image, as the + // response. + StaticFiles *StaticFilesHandler `json:"staticFiles,omitempty"` + + // UrlRegex: URL prefix. Uses regular expression syntax, which means + // regexp special characters must be escaped, but should not contain + // groupings. All URLs that begin with this prefix are handled by this + // handler, using the portion of the URL after the prefix as part of the + // file path. + UrlRegex string `json:"urlRegex,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ApiEndpoint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApiEndpoint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type noMethod UrlMap + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Version: A Version resource is a specific set of source code and +// configuration files that are deployed into a service. +type Version struct { + // ApiConfig: Serving configuration for Google Cloud Endpoints + // (https://cloud.google.com/appengine/docs/python/endpoints/).Only + // returned in GET requests if view=FULL is set. + ApiConfig *ApiConfigHandler `json:"apiConfig,omitempty"` + + // AutomaticScaling: Automatic scaling is based on request rate, + // response latencies, and other application metrics. + AutomaticScaling *AutomaticScaling `json:"automaticScaling,omitempty"` + + // BasicScaling: A service with basic scaling will create an instance + // when the application receives a request. The instance will be turned + // down when the app becomes idle. Basic scaling is ideal for work that + // is intermittent or driven by user activity. + BasicScaling *BasicScaling `json:"basicScaling,omitempty"` + + // BetaSettings: Metadata settings that are supplied to this version to + // enable beta runtime features. + BetaSettings map[string]string `json:"betaSettings,omitempty"` + + // CreateTime: Time that this version was created.@OutputOnly + CreateTime string `json:"createTime,omitempty"` + + // CreatedBy: Email address of the user who created this + // version.@OutputOnly + CreatedBy string `json:"createdBy,omitempty"` + + // DefaultExpiration: Duration that static files should be cached by web + // proxies and browsers. Only applicable if the corresponding + // StaticFilesHandler + // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be + // ta/apps.services.versions#staticfileshandler) does not specify its + // own expiration time.Only returned in GET requests if view=FULL is + // set. + DefaultExpiration string `json:"defaultExpiration,omitempty"` + + // Deployment: Code and application artifacts that make up this + // version.Only returned in GET requests if view=FULL is set. + Deployment *Deployment `json:"deployment,omitempty"` + + // DiskUsageBytes: Total size in bytes of all the files that are + // included in this version and curerntly hosted on the App Engine + // disk.@OutputOnly + DiskUsageBytes int64 `json:"diskUsageBytes,omitempty,string"` + + // EndpointsApiService: Cloud Endpoints configuration.If + // endpoints_api_service is set, the Cloud Endpoints Extensible Service + // Proxy will be provided to serve the API implemented by the app. + EndpointsApiService *EndpointsApiService `json:"endpointsApiService,omitempty"` + + // Env: App Engine execution environment for this version.Defaults to + // standard. + Env string `json:"env,omitempty"` + + // EnvVariables: Environment variables available to the application.Only + // returned in GET requests if view=FULL is set. + EnvVariables map[string]string `json:"envVariables,omitempty"` + + // ErrorHandlers: Custom static error pages. Limited to 10KB per + // page.Only returned in GET requests if view=FULL is set. + ErrorHandlers []*ErrorHandler `json:"errorHandlers,omitempty"` + + // Handlers: An ordered list of URL-matching patterns that should be + // applied to incoming requests. The first matching URL handles the + // request and other request handlers are not attempted.Only returned in + // GET requests if view=FULL is set. + Handlers []*UrlMap `json:"handlers,omitempty"` + + // HealthCheck: Configures health checking for VM instances. Unhealthy + // instances are stopped and replaced with new instances. Only + // applicable for VM runtimes.Only returned in GET requests if view=FULL + // is set. + HealthCheck *HealthCheck `json:"healthCheck,omitempty"` + + // Id: Relative name of the version within the service. Example: v1. + // Version names can contain only lowercase letters, numbers, or + // hyphens. Reserved names: "default", "latest", and any name with the + // prefix "ah-". + Id string `json:"id,omitempty"` + + // InboundServices: Before an application can receive email or XMPP + // messages, the application must be configured to enable the service. + // + // Possible values: + // "INBOUND_SERVICE_UNSPECIFIED" - Not specified. + // "INBOUND_SERVICE_MAIL" - Allows an application to receive mail. + // "INBOUND_SERVICE_MAIL_BOUNCE" - Allows an application to receive + // email-bound notifications. + // "INBOUND_SERVICE_XMPP_ERROR" - Allows an application to receive + // error stanzas. + // "INBOUND_SERVICE_XMPP_MESSAGE" - Allows an application to receive + // instant messages. + // "INBOUND_SERVICE_XMPP_SUBSCRIBE" - Allows an application to receive + // user subscription POSTs. + // "INBOUND_SERVICE_XMPP_PRESENCE" - Allows an application to receive + // a user's chat presence. + // "INBOUND_SERVICE_CHANNEL_PRESENCE" - Registers an application for + // notifications when a client connects or disconnects from a channel. + // "INBOUND_SERVICE_WARMUP" - Enables warmup requests. + InboundServices []string `json:"inboundServices,omitempty"` + + // InstanceClass: Instance class that is used to run this version. Valid + // values are: + // AutomaticScaling: F1, F2, F4, F4_1G + // ManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 + // for AutomaticScaling and B1 for ManualScaling or BasicScaling. + InstanceClass string `json:"instanceClass,omitempty"` + + // Libraries: Configuration for third-party Python runtime libraries + // that are required by the application.Only returned in GET requests if + // view=FULL is set. + Libraries []*Library `json:"libraries,omitempty"` + + // LivenessCheck: Configures liveness health checking for VM instances. + // Unhealthy instances are stopped and replaced with new instancesOnly + // returned in GET requests if view=FULL is set. + LivenessCheck *LivenessCheck `json:"livenessCheck,omitempty"` + + // ManualScaling: A service with manual scaling runs continuously, + // allowing you to perform complex initialization and rely on the state + // of its memory over time. + ManualScaling *ManualScaling `json:"manualScaling,omitempty"` + + // Name: Full path to the Version resource in the API. Example: + // apps/myapp/services/default/versions/v1.@OutputOnly + Name string `json:"name,omitempty"` + + // Network: Extra network settings. Only applicable for VM runtimes. + Network *Network `json:"network,omitempty"` + + // NobuildFilesRegex: Files that match this pattern will not be built + // into this version. Only applicable for Go runtimes.Only returned in + // GET requests if view=FULL is set. + NobuildFilesRegex string `json:"nobuildFilesRegex,omitempty"` + + // ReadinessCheck: Configures readiness health checking for VM + // instances. Unhealthy instances are not put into the backend traffic + // rotation.Only returned in GET requests if view=FULL is set. + ReadinessCheck *ReadinessCheck `json:"readinessCheck,omitempty"` + + // Resources: Machine resources for this version. Only applicable for VM + // runtimes. + Resources *Resources `json:"resources,omitempty"` + + // Runtime: Desired runtime. Example: python27. + Runtime string `json:"runtime,omitempty"` + + // ServingStatus: Current serving status of this version. Only the + // versions with a SERVING status create instances and can be + // billed.SERVING_STATUS_UNSPECIFIED is an invalid value. Defaults to + // SERVING. + // + // Possible values: + // "SERVING_STATUS_UNSPECIFIED" - Not specified. + // "SERVING" - Currently serving. Instances are created according to + // the scaling settings of the version. + // "STOPPED" - Disabled. No instances will be created and the scaling + // settings are ignored until the state of the version changes to + // SERVING. + ServingStatus string `json:"servingStatus,omitempty"` + + // Threadsafe: Whether multiple requests can be dispatched to this + // version at once. + Threadsafe bool `json:"threadsafe,omitempty"` + + // VersionUrl: Serving URL for this version. Example: + // "https://myversion-dot-myservice-dot-myapp.appspot.com"@OutputOnly + VersionUrl string `json:"versionUrl,omitempty"` + + // Vm: Whether to deploy this version in a container on a virtual + // machine. + Vm bool `json:"vm,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ApiConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApiConfig") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Version) MarshalJSON() ([]byte, error) { + type noMethod Version + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Volume: Volumes mounted within the app container. Only applicable for +// VM runtimes. +type Volume struct { + // Name: Unique name for the volume. + Name string `json:"name,omitempty"` + + // SizeGb: Volume size in gigabytes. + SizeGb float64 `json:"sizeGb,omitempty"` + + // VolumeType: Underlying volume type, e.g. 'tmpfs'. + VolumeType string `json:"volumeType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Volume) MarshalJSON() ([]byte, error) { + type noMethod Volume + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Volume) UnmarshalJSON(data []byte) error { + type noMethod Volume + var s1 struct { + SizeGb gensupport.JSONFloat64 `json:"sizeGb"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.SizeGb = float64(s1.SizeGb) + return nil +} + +// ZipInfo: The zip file information for a zip deployment. +type ZipInfo struct { + // FilesCount: An estimate of the number of files in a zip for a zip + // deployment. If set, must be greater than or equal to the actual + // number of files. Used for optimizing performance; if not provided, + // deployment may be slow. + FilesCount int64 `json:"filesCount,omitempty"` + + // SourceUrl: URL of the zip file to deploy from. Must be a URL to a + // resource in Google Cloud Storage in the form + // 'http(s)://storage.googleapis.com//'. + SourceUrl string `json:"sourceUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FilesCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FilesCount") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZipInfo) MarshalJSON() ([]byte, error) { + type noMethod ZipInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "appengine.apps.create": + +type AppsCreateCall struct { + s *APIService + application *Application + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an App Engine application for a Google Cloud Platform +// project. Required fields: +// id - The ID of the target Cloud Platform project. +// location - The region +// (https://cloud.google.com/appengine/docs/locations) where you want +// the App Engine application located.For more information about App +// Engine applications, see Managing Projects, Applications, and Billing +// (https://cloud.google.com/appengine/docs/python/console/). +func (r *AppsService) Create(application *Application) *AppsCreateCall { + c := &AppsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.application = application + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsCreateCall) Fields(s ...googleapi.Field) *AppsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsCreateCall) Context(ctx context.Context) *AppsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields:\nid - The ID of the target Cloud Platform project.\nlocation - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/python/console/).", + // "flatPath": "v1beta/apps", + // "httpMethod": "POST", + // "id": "appengine.apps.create", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v1beta/apps", + // "request": { + // "$ref": "Application" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.get": + +type AppsGetCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about an application. +func (r *AppsService) Get(appsId string) *AppsGetCall { + c := &AppsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsGetCall) Fields(s ...googleapi.Field) *AppsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsGetCall) IfNoneMatch(entityTag string) *AppsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsGetCall) Context(ctx context.Context) *AppsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.get" call. +// Exactly one of *Application or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Application.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsGetCall) Do(opts ...googleapi.CallOption) (*Application, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Application{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about an application.", + // "flatPath": "v1beta/apps/{appsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.get", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the Application resource to get. Example: apps/myapp.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}", + // "response": { + // "$ref": "Application" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.patch": + +type AppsPatchCall struct { + s *APIService + appsId string + application *Application + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified Application resource. You can update the +// following fields: +// auth_domain - Google authentication domain for controlling user +// access to the application. +// default_cookie_expiration - Cookie expiration policy for the +// application. +func (r *AppsService) Patch(appsId string, application *Application) *AppsPatchCall { + c := &AppsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.application = application + return c +} + +// UpdateMask sets the optional parameter "updateMask": Standard field +// mask for the set of fields to be updated. +func (c *AppsPatchCall) UpdateMask(updateMask string) *AppsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsPatchCall) Fields(s ...googleapi.Field) *AppsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsPatchCall) Context(ctx context.Context) *AppsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified Application resource. You can update the following fields:\nauth_domain - Google authentication domain for controlling user access to the application.\ndefault_cookie_expiration - Cookie expiration policy for the application.", + // "flatPath": "v1beta/apps/{appsId}", + // "httpMethod": "PATCH", + // "id": "appengine.apps.patch", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the Application resource to update. Example: apps/myapp.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Standard field mask for the set of fields to be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}", + // "request": { + // "$ref": "Application" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.repair": + +type AppsRepairCall struct { + s *APIService + appsId string + repairapplicationrequest *RepairApplicationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Repair: Recreates the required App Engine features for the specified +// App Engine application, for example a Cloud Storage bucket or App +// Engine service account. Use this method if you receive an error +// message about a missing feature, for example, Error retrieving the +// App Engine service account. +func (r *AppsService) Repair(appsId string, repairapplicationrequest *RepairApplicationRequest) *AppsRepairCall { + c := &AppsRepairCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.repairapplicationrequest = repairapplicationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsRepairCall) Fields(s ...googleapi.Field) *AppsRepairCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsRepairCall) Context(ctx context.Context) *AppsRepairCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsRepairCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsRepairCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.repairapplicationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}:repair") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.repair" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsRepairCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Recreates the required App Engine features for the specified App Engine application, for example a Cloud Storage bucket or App Engine service account. Use this method if you receive an error message about a missing feature, for example, Error retrieving the App Engine service account.", + // "flatPath": "v1beta/apps/{appsId}:repair", + // "httpMethod": "POST", + // "id": "appengine.apps.repair", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the application to repair. Example: apps/myapp", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}:repair", + // "request": { + // "$ref": "RepairApplicationRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.locations.get": + +type AppsLocationsGetCall struct { + s *APIService + appsId string + locationsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get information about a location. +func (r *AppsLocationsService) Get(appsId string, locationsId string) *AppsLocationsGetCall { + c := &AppsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.locationsId = locationsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsLocationsGetCall) Fields(s ...googleapi.Field) *AppsLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsLocationsGetCall) IfNoneMatch(entityTag string) *AppsLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsLocationsGetCall) Context(ctx context.Context) *AppsLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/locations/{locationsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "locationsId": c.locationsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.locations.get" call. +// Exactly one of *Location or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Location.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Location{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get information about a location.", + // "flatPath": "v1beta/apps/{appsId}/locations/{locationsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.locations.get", + // "parameterOrder": [ + // "appsId", + // "locationsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Resource name for the location.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "locationsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/locations/{locationsId}", + // "response": { + // "$ref": "Location" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.locations.list": + +type AppsLocationsListCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists information about the supported locations for this +// service. +func (r *AppsLocationsService) List(appsId string) *AppsLocationsListCall { + c := &AppsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *AppsLocationsListCall) Filter(filter string) *AppsLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *AppsLocationsListCall) PageSize(pageSize int64) *AppsLocationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *AppsLocationsListCall) PageToken(pageToken string) *AppsLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsLocationsListCall) Fields(s ...googleapi.Field) *AppsLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsLocationsListCall) IfNoneMatch(entityTag string) *AppsLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsLocationsListCall) Context(ctx context.Context) *AppsLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/locations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.locations.list" call. +// Exactly one of *ListLocationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLocationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListLocationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists information about the supported locations for this service.", + // "flatPath": "v1beta/apps/{appsId}/locations", + // "httpMethod": "GET", + // "id": "appengine.apps.locations.list", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. The resource that owns the locations collection, if applicable.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/locations", + // "response": { + // "$ref": "ListLocationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.operations.get": + +type AppsOperationsGetCall struct { + s *APIService + appsId string + operationsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. +func (r *AppsOperationsService) Get(appsId string, operationsId string) *AppsOperationsGetCall { + c := &AppsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.operationsId = operationsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsOperationsGetCall) Fields(s ...googleapi.Field) *AppsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsOperationsGetCall) IfNoneMatch(entityTag string) *AppsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsOperationsGetCall) Context(ctx context.Context) *AppsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/operations/{operationsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "operationsId": c.operationsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1beta/apps/{appsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.operations.get", + // "parameterOrder": [ + // "appsId", + // "operationsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. The name of the operation resource.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "operationsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/operations/{operationsId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.operations.list": + +type AppsOperationsListCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the +// request. If the server doesn't support this method, it returns +// UNIMPLEMENTED.NOTE: the name binding below allows API services to +// override the binding to use different resource name schemes, such as +// users/*/operations. +func (r *AppsOperationsService) List(appsId string) *AppsOperationsListCall { + c := &AppsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *AppsOperationsListCall) Filter(filter string) *AppsOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *AppsOperationsListCall) PageSize(pageSize int64) *AppsOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *AppsOperationsListCall) PageToken(pageToken string) *AppsOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsOperationsListCall) Fields(s ...googleapi.Field) *AppsOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsOperationsListCall) IfNoneMatch(entityTag string) *AppsOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsOperationsListCall) Context(ctx context.Context) *AppsOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.", + // "flatPath": "v1beta/apps/{appsId}/operations", + // "httpMethod": "GET", + // "id": "appengine.apps.operations.list", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. The name of the operation collection.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/operations", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.services.delete": + +type AppsServicesDeleteCall struct { + s *APIService + appsId string + servicesId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified service and all enclosed versions. +func (r *AppsServicesService) Delete(appsId string, servicesId string) *AppsServicesDeleteCall { + c := &AppsServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesDeleteCall) Fields(s ...googleapi.Field) *AppsServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesDeleteCall) Context(ctx context.Context) *AppsServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified service and all enclosed versions.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}", + // "httpMethod": "DELETE", + // "id": "appengine.apps.services.delete", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.get": + +type AppsServicesGetCall struct { + s *APIService + appsId string + servicesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the current configuration of the specified service. +func (r *AppsServicesService) Get(appsId string, servicesId string) *AppsServicesGetCall { + c := &AppsServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesGetCall) Fields(s ...googleapi.Field) *AppsServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesGetCall) IfNoneMatch(entityTag string) *AppsServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesGetCall) Context(ctx context.Context) *AppsServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.get" call. +// Exactly one of *Service or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Service.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AppsServicesGetCall) Do(opts ...googleapi.CallOption) (*Service, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Service{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the current configuration of the specified service.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}", + // "httpMethod": "GET", + // "id": "appengine.apps.services.get", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}", + // "response": { + // "$ref": "Service" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.services.list": + +type AppsServicesListCall struct { + s *APIService + appsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the services in the application. +func (r *AppsServicesService) List(appsId string) *AppsServicesListCall { + c := &AppsServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum results to +// return per page. +func (c *AppsServicesListCall) PageSize(pageSize int64) *AppsServicesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Continuation token +// for fetching the next page of results. +func (c *AppsServicesListCall) PageToken(pageToken string) *AppsServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesListCall) Fields(s ...googleapi.Field) *AppsServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesListCall) IfNoneMatch(entityTag string) *AppsServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesListCall) Context(ctx context.Context) *AppsServicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.list" call. +// Exactly one of *ListServicesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListServicesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListServicesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all the services in the application.", + // "flatPath": "v1beta/apps/{appsId}/services", + // "httpMethod": "GET", + // "id": "appengine.apps.services.list", + // "parameterOrder": [ + // "appsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent Application resource. Example: apps/myapp.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum results to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Continuation token for fetching the next page of results.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services", + // "response": { + // "$ref": "ListServicesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsServicesListCall) Pages(ctx context.Context, f func(*ListServicesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.services.patch": + +type AppsServicesPatchCall struct { + s *APIService + appsId string + servicesId string + service *Service + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the configuration of the specified service. +func (r *AppsServicesService) Patch(appsId string, servicesId string, service *Service) *AppsServicesPatchCall { + c := &AppsServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.service = service + return c +} + +// MigrateTraffic sets the optional parameter "migrateTraffic": Set to +// true to gradually shift traffic to one or more versions that you +// specify. By default, traffic is shifted immediately. For gradual +// traffic migration, the target versions must be located within +// instances that are configured for both warmup requests +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be +// ta/apps.services.versions#inboundservicetype) and automatic scaling +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be +// ta/apps.services.versions#automaticscaling). You must specify the +// shardBy +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be +// ta/apps.services#shardby) field in the Service resource. Gradual +// traffic migration is not supported in the App Engine flexible +// environment. For examples, see Migrating and Splitting Traffic +// (https://cloud.google.com/appengine/docs/admin-api/migrating-splitting +// -traffic). +func (c *AppsServicesPatchCall) MigrateTraffic(migrateTraffic bool) *AppsServicesPatchCall { + c.urlParams_.Set("migrateTraffic", fmt.Sprint(migrateTraffic)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Standard field +// mask for the set of fields to be updated. +func (c *AppsServicesPatchCall) UpdateMask(updateMask string) *AppsServicesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesPatchCall) Fields(s ...googleapi.Field) *AppsServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesPatchCall) Context(ctx context.Context) *AppsServicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.service) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the configuration of the specified service.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}", + // "httpMethod": "PATCH", + // "id": "appengine.apps.services.patch", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "migrateTraffic": { + // "description": "Set to true to gradually shift traffic to one or more versions that you specify. By default, traffic is shifted immediately. For gradual traffic migration, the target versions must be located within instances that are configured for both warmup requests (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#inboundservicetype) and automatic scaling (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#automaticscaling). You must specify the shardBy (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services#shardby) field in the Service resource. Gradual traffic migration is not supported in the App Engine flexible environment. For examples, see Migrating and Splitting Traffic (https://cloud.google.com/appengine/docs/admin-api/migrating-splitting-traffic).", + // "location": "query", + // "type": "boolean" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Standard field mask for the set of fields to be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}", + // "request": { + // "$ref": "Service" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.create": + +type AppsServicesVersionsCreateCall struct { + s *APIService + appsId string + servicesId string + version *Version + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Deploys code and resource files to a new version. +func (r *AppsServicesVersionsService) Create(appsId string, servicesId string, version *Version) *AppsServicesVersionsCreateCall { + c := &AppsServicesVersionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.version = version + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsCreateCall) Fields(s ...googleapi.Field) *AppsServicesVersionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsCreateCall) Context(ctx context.Context) *AppsServicesVersionsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deploys code and resource files to a new version.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions", + // "httpMethod": "POST", + // "id": "appengine.apps.services.versions.create", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent resource to create this version under. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions", + // "request": { + // "$ref": "Version" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.delete": + +type AppsServicesVersionsDeleteCall struct { + s *APIService + appsId string + servicesId string + versionsId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an existing Version resource. +func (r *AppsServicesVersionsService) Delete(appsId string, servicesId string, versionsId string) *AppsServicesVersionsDeleteCall { + c := &AppsServicesVersionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsDeleteCall) Fields(s ...googleapi.Field) *AppsServicesVersionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsDeleteCall) Context(ctx context.Context) *AppsServicesVersionsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an existing Version resource.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "httpMethod": "DELETE", + // "id": "appengine.apps.services.versions.delete", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.get": + +type AppsServicesVersionsGetCall struct { + s *APIService + appsId string + servicesId string + versionsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the specified Version resource. By default, only a +// BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get +// the full resource. +func (r *AppsServicesVersionsService) Get(appsId string, servicesId string, versionsId string) *AppsServicesVersionsGetCall { + c := &AppsServicesVersionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + return c +} + +// View sets the optional parameter "view": Controls the set of fields +// returned in the Get response. +// +// Possible values: +// "BASIC" +// "FULL" +func (c *AppsServicesVersionsGetCall) View(view string) *AppsServicesVersionsGetCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsGetCall) Fields(s ...googleapi.Field) *AppsServicesVersionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsGetCall) IfNoneMatch(entityTag string) *AppsServicesVersionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsGetCall) Context(ctx context.Context) *AppsServicesVersionsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.get" call. +// Exactly one of *Version or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Version.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AppsServicesVersionsGetCall) Do(opts ...googleapi.CallOption) (*Version, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Version{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the specified Version resource. By default, only a BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get the full resource.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.get", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "view": { + // "description": "Controls the set of fields returned in the Get response.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "response": { + // "$ref": "Version" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.services.versions.list": + +type AppsServicesVersionsListCall struct { + s *APIService + appsId string + servicesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the versions of a service. +func (r *AppsServicesVersionsService) List(appsId string, servicesId string) *AppsServicesVersionsListCall { + c := &AppsServicesVersionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum results to +// return per page. +func (c *AppsServicesVersionsListCall) PageSize(pageSize int64) *AppsServicesVersionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Continuation token +// for fetching the next page of results. +func (c *AppsServicesVersionsListCall) PageToken(pageToken string) *AppsServicesVersionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// View sets the optional parameter "view": Controls the set of fields +// returned in the List response. +// +// Possible values: +// "BASIC" +// "FULL" +func (c *AppsServicesVersionsListCall) View(view string) *AppsServicesVersionsListCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsListCall) Fields(s ...googleapi.Field) *AppsServicesVersionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsListCall) IfNoneMatch(entityTag string) *AppsServicesVersionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsListCall) Context(ctx context.Context) *AppsServicesVersionsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.list" call. +// Exactly one of *ListVersionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListVersionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsServicesVersionsListCall) Do(opts ...googleapi.CallOption) (*ListVersionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListVersionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the versions of a service.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.list", + // "parameterOrder": [ + // "appsId", + // "servicesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent Service resource. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum results to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Continuation token for fetching the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "view": { + // "description": "Controls the set of fields returned in the List response.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions", + // "response": { + // "$ref": "ListVersionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsServicesVersionsListCall) Pages(ctx context.Context, f func(*ListVersionsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "appengine.apps.services.versions.patch": + +type AppsServicesVersionsPatchCall struct { + s *APIService + appsId string + servicesId string + versionsId string + version *Version + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified Version resource. You can specify the +// following fields depending on the App Engine environment and type of +// scaling that the version resource uses: +// serving_status +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be +// ta/apps.services.versions#Version.FIELDS.serving_status): For +// Version resources that use basic scaling, manual scaling, or run in +// the App Engine flexible environment. +// instance_class +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be +// ta/apps.services.versions#Version.FIELDS.instance_class): For +// Version resources that run in the App Engine standard +// environment. +// automatic_scaling.min_idle_instances +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be +// ta/apps.services.versions#Version.FIELDS.automatic_scaling): For +// Version resources that use automatic scaling and run in the App +// Engine standard environment. +// automatic_scaling.max_idle_instances +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1be +// ta/apps.services.versions#Version.FIELDS.automatic_scaling): For +// Version resources that use automatic scaling and run in the App +// Engine standard environment. +func (r *AppsServicesVersionsService) Patch(appsId string, servicesId string, versionsId string, version *Version) *AppsServicesVersionsPatchCall { + c := &AppsServicesVersionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.version = version + return c +} + +// UpdateMask sets the optional parameter "updateMask": Standard field +// mask for the set of fields to be updated. +func (c *AppsServicesVersionsPatchCall) UpdateMask(updateMask string) *AppsServicesVersionsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsPatchCall) Fields(s ...googleapi.Field) *AppsServicesVersionsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsPatchCall) Context(ctx context.Context) *AppsServicesVersionsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.serving_status): For Version resources that use basic scaling, manual scaling, or run in the App Engine flexible environment.\ninstance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.instance_class): For Version resources that run in the App Engine standard environment.\nautomatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.\nautomatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#Version.FIELDS.automatic_scaling): For Version resources that use automatic scaling and run in the App Engine standard environment.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "httpMethod": "PATCH", + // "id": "appengine.apps.services.versions.patch", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource to update. Example: apps/myapp/services/default/versions/1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Standard field mask for the set of fields to be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}", + // "request": { + // "$ref": "Version" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.debug": + +type AppsServicesVersionsInstancesDebugCall struct { + s *APIService + appsId string + servicesId string + versionsId string + instancesId string + debuginstancerequest *DebugInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Debug: Enables debugging on a VM instance. This allows you to use the +// SSH command to connect to the virtual machine where the instance +// lives. While in "debug mode", the instance continues to serve live +// traffic. You should delete the instance when you are done debugging +// and then allow the system to take over and determine if another +// instance should be started.Only applicable for instances in App +// Engine flexible environment. +func (r *AppsServicesVersionsInstancesService) Debug(appsId string, servicesId string, versionsId string, instancesId string, debuginstancerequest *DebugInstanceRequest) *AppsServicesVersionsInstancesDebugCall { + c := &AppsServicesVersionsInstancesDebugCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.instancesId = instancesId + c.debuginstancerequest = debuginstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesDebugCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesDebugCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesDebugCall) Context(ctx context.Context) *AppsServicesVersionsInstancesDebugCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesDebugCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesDebugCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.debuginstancerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + "instancesId": c.instancesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.debug" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesDebugCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enables debugging on a VM instance. This allows you to use the SSH command to connect to the virtual machine where the instance lives. While in \"debug mode\", the instance continues to serve live traffic. You should delete the instance when you are done debugging and then allow the system to take over and determine if another instance should be started.Only applicable for instances in App Engine flexible environment.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug", + // "httpMethod": "POST", + // "id": "appengine.apps.services.versions.instances.debug", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId", + // "instancesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instancesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}:debug", + // "request": { + // "$ref": "DebugInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.delete": + +type AppsServicesVersionsInstancesDeleteCall struct { + s *APIService + appsId string + servicesId string + versionsId string + instancesId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Stops a running instance. +func (r *AppsServicesVersionsInstancesService) Delete(appsId string, servicesId string, versionsId string, instancesId string) *AppsServicesVersionsInstancesDeleteCall { + c := &AppsServicesVersionsInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.instancesId = instancesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesDeleteCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesDeleteCall) Context(ctx context.Context) *AppsServicesVersionsInstancesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + "instancesId": c.instancesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stops a running instance.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "httpMethod": "DELETE", + // "id": "appengine.apps.services.versions.instances.delete", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId", + // "instancesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instancesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.get": + +type AppsServicesVersionsInstancesGetCall struct { + s *APIService + appsId string + servicesId string + versionsId string + instancesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets instance information. +func (r *AppsServicesVersionsInstancesService) Get(appsId string, servicesId string, versionsId string, instancesId string) *AppsServicesVersionsInstancesGetCall { + c := &AppsServicesVersionsInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + c.instancesId = instancesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesGetCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsInstancesGetCall) IfNoneMatch(entityTag string) *AppsServicesVersionsInstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesGetCall) Context(ctx context.Context) *AppsServicesVersionsInstancesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + "instancesId": c.instancesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Instance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets instance information.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.instances.get", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId", + // "instancesId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "instancesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `name`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}", + // "response": { + // "$ref": "Instance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.apps.services.versions.instances.list": + +type AppsServicesVersionsInstancesListCall struct { + s *APIService + appsId string + servicesId string + versionsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the instances of a version. +func (r *AppsServicesVersionsInstancesService) List(appsId string, servicesId string, versionsId string) *AppsServicesVersionsInstancesListCall { + c := &AppsServicesVersionsInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.appsId = appsId + c.servicesId = servicesId + c.versionsId = versionsId + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum results to +// return per page. +func (c *AppsServicesVersionsInstancesListCall) PageSize(pageSize int64) *AppsServicesVersionsInstancesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Continuation token +// for fetching the next page of results. +func (c *AppsServicesVersionsInstancesListCall) PageToken(pageToken string) *AppsServicesVersionsInstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AppsServicesVersionsInstancesListCall) Fields(s ...googleapi.Field) *AppsServicesVersionsInstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AppsServicesVersionsInstancesListCall) IfNoneMatch(entityTag string) *AppsServicesVersionsInstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AppsServicesVersionsInstancesListCall) Context(ctx context.Context) *AppsServicesVersionsInstancesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AppsServicesVersionsInstancesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AppsServicesVersionsInstancesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "appsId": c.appsId, + "servicesId": c.servicesId, + "versionsId": c.versionsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.apps.services.versions.instances.list" call. +// Exactly one of *ListInstancesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListInstancesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AppsServicesVersionsInstancesListCall) Do(opts ...googleapi.CallOption) (*ListInstancesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListInstancesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances of a version.", + // "flatPath": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances", + // "httpMethod": "GET", + // "id": "appengine.apps.services.versions.instances.list", + // "parameterOrder": [ + // "appsId", + // "servicesId", + // "versionsId" + // ], + // "parameters": { + // "appsId": { + // "description": "Part of `parent`. Name of the parent Version resource. Example: apps/myapp/services/default/versions/v1.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum results to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Continuation token for fetching the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "versionsId": { + // "description": "Part of `parent`. See documentation of `appsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances", + // "response": { + // "$ref": "ListInstancesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AppsServicesVersionsInstancesListCall) Pages(ctx context.Context, f func(*ListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go index 7a162fc43..f45027193 100644 --- a/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*APIService, error) { } type APIService struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Apps *AppsService } @@ -81,6 +82,10 @@ func (s *APIService) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAppsService(s *APIService) *AppsService { rs := &AppsService{s: s} rs.Locations = NewAppsLocationsService(s) @@ -2362,6 +2367,7 @@ func (c *AppsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) if err != nil { @@ -2502,6 +2508,7 @@ func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2647,6 +2654,7 @@ func (c *AppsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) if err != nil { @@ -2796,6 +2804,7 @@ func (c *AppsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2965,6 +2974,7 @@ func (c *AppsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3132,6 +3142,7 @@ func (c *AppsModulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta4/apps/{appsId}/modules/{modulesId}") @@ -3276,6 +3287,7 @@ func (c *AppsModulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3437,6 +3449,7 @@ func (c *AppsModulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3629,6 +3642,7 @@ func (c *AppsModulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.module) if err != nil { @@ -3782,6 +3796,7 @@ func (c *AppsModulesVersionsCreateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -3925,6 +3940,7 @@ func (c *AppsModulesVersionsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta4/apps/{appsId}/modules/{modulesId}/versions/{versionsId}") @@ -4092,6 +4108,7 @@ func (c *AppsModulesVersionsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4283,6 +4300,7 @@ func (c *AppsModulesVersionsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4491,6 +4509,7 @@ func (c *AppsModulesVersionsPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -4657,6 +4676,7 @@ func (c *AppsModulesVersionsInstancesDebugCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.debuginstancerequest) if err != nil { @@ -4818,6 +4838,7 @@ func (c *AppsModulesVersionsInstancesDeleteCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta4/apps/{appsId}/modules/{modulesId}/versions/{versionsId}/instances/{instancesId}") @@ -4982,6 +5003,7 @@ func (c *AppsModulesVersionsInstancesGetCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5163,6 +5185,7 @@ func (c *AppsModulesVersionsInstancesListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5354,6 +5377,7 @@ func (c *AppsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5526,6 +5550,7 @@ func (c *AppsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go index 616296d94..a827ea4a0 100644 --- a/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*APIService, error) { } type APIService struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Apps *AppsService } @@ -81,6 +82,10 @@ func (s *APIService) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAppsService(s *APIService) *AppsService { rs := &AppsService{s: s} rs.Locations = NewAppsLocationsService(s) @@ -2364,6 +2369,7 @@ func (c *AppsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) if err != nil { @@ -2505,6 +2511,7 @@ func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2650,6 +2657,7 @@ func (c *AppsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) if err != nil { @@ -2799,6 +2807,7 @@ func (c *AppsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2968,6 +2977,7 @@ func (c *AppsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3148,6 +3158,7 @@ func (c *AppsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3320,6 +3331,7 @@ func (c *AppsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3487,6 +3499,7 @@ func (c *AppsServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta5/apps/{appsId}/services/{servicesId}") @@ -3631,6 +3644,7 @@ func (c *AppsServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3792,6 +3806,7 @@ func (c *AppsServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3984,6 +3999,7 @@ func (c *AppsServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.service) if err != nil { @@ -4137,6 +4153,7 @@ func (c *AppsServicesVersionsCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -4280,6 +4297,7 @@ func (c *AppsServicesVersionsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta5/apps/{appsId}/services/{servicesId}/versions/{versionsId}") @@ -4447,6 +4465,7 @@ func (c *AppsServicesVersionsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4638,6 +4657,7 @@ func (c *AppsServicesVersionsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4846,6 +4866,7 @@ func (c *AppsServicesVersionsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -5012,6 +5033,7 @@ func (c *AppsServicesVersionsInstancesDebugCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.debuginstancerequest) if err != nil { @@ -5173,6 +5195,7 @@ func (c *AppsServicesVersionsInstancesDeleteCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta5/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}") @@ -5337,6 +5360,7 @@ func (c *AppsServicesVersionsInstancesGetCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5518,6 +5542,7 @@ func (c *AppsServicesVersionsInstancesListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/appsactivity/v1/appsactivity-api.json b/vendor/google.golang.org/api/appsactivity/v1/appsactivity-api.json index ac92231dd..acd0af203 100644 --- a/vendor/google.golang.org/api/appsactivity/v1/appsactivity-api.json +++ b/vendor/google.golang.org/api/appsactivity/v1/appsactivity-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/reJOwDPkrjuqU_m9CL56zWFA9Q4\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/y29Ei9gfsS6BekrALyS-uw4tzTU\"", "discoveryVersion": "v1", "id": "appsactivity:v1", "name": "appsactivity", "version": "v1", - "revision": "20161111", - "title": "Google Apps Activity API", + "revision": "20170215", + "title": "G Suite Activity API", "description": "Provides a historical view of activity.", "ownerDomain": "google.com", "ownerName": "Google", @@ -70,7 +70,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/activity": { - "description": "View the activity history of your Google Apps" + "description": "View the activity history of your Google apps" }, "https://www.googleapis.com/auth/drive": { "description": "View and manage the files in your Google Drive" diff --git a/vendor/google.golang.org/api/appsactivity/v1/appsactivity-gen.go b/vendor/google.golang.org/api/appsactivity/v1/appsactivity-gen.go index fc3e25a2b..fe2eb3edc 100644 --- a/vendor/google.golang.org/api/appsactivity/v1/appsactivity-gen.go +++ b/vendor/google.golang.org/api/appsactivity/v1/appsactivity-gen.go @@ -1,4 +1,4 @@ -// Package appsactivity provides access to the Google Apps Activity API. +// Package appsactivity provides access to the G Suite Activity API. // // See https://developers.google.com/google-apps/activity/ // @@ -47,7 +47,7 @@ const basePath = "https://www.googleapis.com/appsactivity/v1/" // OAuth2 scopes used by this API. const ( - // View the activity history of your Google Apps + // View the activity history of your Google apps ActivityScope = "https://www.googleapis.com/auth/activity" // View and manage the files in your Google Drive @@ -73,9 +73,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Activities *ActivitiesService } @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewActivitiesService(s *Service) *ActivitiesService { rs := &ActivitiesService{s: s} return rs @@ -678,6 +683,7 @@ func (c *ActivitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/appstate/v1/appstate-gen.go b/vendor/google.golang.org/api/appstate/v1/appstate-gen.go index d1ad1bf30..996eb2f22 100644 --- a/vendor/google.golang.org/api/appstate/v1/appstate-gen.go +++ b/vendor/google.golang.org/api/appstate/v1/appstate-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only States *StatesService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewStatesService(s *Service) *StatesService { rs := &StatesService{s: s} return rs @@ -300,6 +305,7 @@ func (c *StatesClearCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "states/{stateKey}/clear") @@ -436,6 +442,7 @@ func (c *StatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "states/{stateKey}") @@ -546,6 +553,7 @@ func (c *StatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -691,6 +699,7 @@ func (c *StatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -827,6 +836,7 @@ func (c *StatesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.updaterequest) if err != nil { diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go index c50d7b4c5..d1990b6a3 100644 --- a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go +++ b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go @@ -83,9 +83,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Datasets *DatasetsService @@ -105,6 +106,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDatasetsService(s *Service) *DatasetsService { rs := &DatasetsService{s: s} return rs @@ -3149,6 +3154,7 @@ func (c *DatasetsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") @@ -3271,6 +3277,7 @@ func (c *DatasetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3409,6 +3416,7 @@ func (c *DatasetsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { @@ -3584,6 +3592,7 @@ func (c *DatasetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3761,6 +3770,7 @@ func (c *DatasetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { @@ -3907,6 +3917,7 @@ func (c *DatasetsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { @@ -4052,6 +4063,7 @@ func (c *JobsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs/{jobId}/cancel") @@ -4199,6 +4211,7 @@ func (c *JobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4378,6 +4391,7 @@ func (c *JobsGetQueryResultsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4614,6 +4628,7 @@ func (c *JobsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) if err != nil { @@ -4872,6 +4887,7 @@ func (c *JobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5069,6 +5085,7 @@ func (c *JobsQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.queryrequest) if err != nil { @@ -5226,6 +5243,7 @@ func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5381,6 +5399,7 @@ func (c *TabledataInsertAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tabledatainsertallrequest) if err != nil { @@ -5567,6 +5586,7 @@ func (c *TabledataListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5754,6 +5774,7 @@ func (c *TablesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") @@ -5883,6 +5904,7 @@ func (c *TablesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6031,6 +6053,7 @@ func (c *TablesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -6199,6 +6222,7 @@ func (c *TablesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6376,6 +6400,7 @@ func (c *TablesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -6532,6 +6557,7 @@ func (c *TablesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { diff --git a/vendor/google.golang.org/api/blogger/v2/blogger-gen.go b/vendor/google.golang.org/api/blogger/v2/blogger-gen.go index 71c2c38e9..00a784abe 100644 --- a/vendor/google.golang.org/api/blogger/v2/blogger-gen.go +++ b/vendor/google.golang.org/api/blogger/v2/blogger-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Blogs *BlogsService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBlogsService(s *Service) *BlogsService { rs := &BlogsService{s: s} return rs @@ -1179,6 +1184,7 @@ func (c *BlogsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1320,6 +1326,7 @@ func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1503,6 +1510,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1693,6 +1701,7 @@ func (c *PagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1845,6 +1854,7 @@ func (c *PagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1989,6 +1999,7 @@ func (c *PostsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2162,6 +2173,7 @@ func (c *PostsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2342,6 +2354,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2479,6 +2492,7 @@ func (c *UsersBlogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/blogger/v3/blogger-gen.go b/vendor/google.golang.org/api/blogger/v3/blogger-gen.go index 8a509db5a..37d49a765 100644 --- a/vendor/google.golang.org/api/blogger/v3/blogger-gen.go +++ b/vendor/google.golang.org/api/blogger/v3/blogger-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BlogUserInfos *BlogUserInfosService @@ -99,6 +100,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBlogUserInfosService(s *Service) *BlogUserInfosService { rs := &BlogUserInfosService{s: s} return rs @@ -1615,6 +1620,7 @@ func (c *BlogUserInfosGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1786,6 +1792,7 @@ func (c *BlogsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1956,6 +1963,7 @@ func (c *BlogsGetByUrlCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2154,6 +2162,7 @@ func (c *BlogsListByUserCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2336,6 +2345,7 @@ func (c *CommentsApproveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}/approve") @@ -2479,6 +2489,7 @@ func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}") @@ -2620,6 +2631,7 @@ func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2851,6 +2863,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3126,6 +3139,7 @@ func (c *CommentsListByBlogCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3323,6 +3337,7 @@ func (c *CommentsMarkAsSpamCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}/spam") @@ -3466,6 +3481,7 @@ func (c *CommentsRemoveContentCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}/removecontent") @@ -3627,6 +3643,7 @@ func (c *PageViewsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3770,6 +3787,7 @@ func (c *PagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/pages/{pageId}") @@ -3897,6 +3915,7 @@ func (c *PagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4055,6 +4074,7 @@ func (c *PagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.page) if err != nil { @@ -4247,6 +4267,7 @@ func (c *PagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4457,6 +4478,7 @@ func (c *PagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.page) if err != nil { @@ -4608,6 +4630,7 @@ func (c *PagesPublishCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/pages/{pageId}/publish") @@ -4741,6 +4764,7 @@ func (c *PagesRevertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/pages/{pageId}/revert") @@ -4890,6 +4914,7 @@ func (c *PagesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.page) if err != nil { @@ -5063,6 +5088,7 @@ func (c *PostUserInfosGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5304,6 +5330,7 @@ func (c *PostUserInfosListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5540,6 +5567,7 @@ func (c *PostsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}") @@ -5692,6 +5720,7 @@ func (c *PostsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5891,6 +5920,7 @@ func (c *PostsGetByPathCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6070,6 +6100,7 @@ func (c *PostsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.post) if err != nil { @@ -6314,6 +6345,7 @@ func (c *PostsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6586,6 +6618,7 @@ func (c *PostsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.post) if err != nil { @@ -6765,6 +6798,7 @@ func (c *PostsPublishCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/publish") @@ -6904,6 +6938,7 @@ func (c *PostsRevertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/revert") @@ -7066,6 +7101,7 @@ func (c *PostsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7260,6 +7296,7 @@ func (c *PostsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.post) if err != nil { @@ -7437,6 +7474,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/books/v1/books-gen.go b/vendor/google.golang.org/api/books/v1/books-gen.go index beaf67f7f..e55a3c984 100644 --- a/vendor/google.golang.org/api/books/v1/books-gen.go +++ b/vendor/google.golang.org/api/books/v1/books-gen.go @@ -72,9 +72,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Bookshelves *BookshelvesService @@ -108,6 +109,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBookshelvesService(s *Service) *BookshelvesService { rs := &BookshelvesService{s: s} rs.Volumes = NewBookshelvesVolumesService(s) @@ -4530,6 +4535,7 @@ func (c *BookshelvesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4687,6 +4693,7 @@ func (c *BookshelvesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4860,6 +4867,7 @@ func (c *BookshelvesVolumesListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5043,6 +5051,7 @@ func (c *CloudloadingAddBookCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "cloudloading/addBook") @@ -5172,6 +5181,7 @@ func (c *CloudloadingDeleteBookCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "cloudloading/deleteBook") @@ -5264,6 +5274,7 @@ func (c *CloudloadingUpdateBookCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bookscloudloadingresource) if err != nil { @@ -5392,6 +5403,7 @@ func (c *DictionaryListOfflineMetadataCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5542,6 +5554,7 @@ func (c *LayersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5725,6 +5738,7 @@ func (c *LayersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5934,6 +5948,7 @@ func (c *LayersAnnotationDataGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6205,6 +6220,7 @@ func (c *LayersAnnotationDataListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6455,6 +6471,7 @@ func (c *LayersVolumeAnnotationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6710,6 +6727,7 @@ func (c *LayersVolumeAnnotationsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6944,6 +6962,7 @@ func (c *MyconfigGetUserSettingsCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7070,6 +7089,7 @@ func (c *MyconfigReleaseDownloadAccessCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "myconfig/releaseDownloadAccess") @@ -7229,6 +7249,7 @@ func (c *MyconfigRequestAccessCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "myconfig/requestAccess") @@ -7431,6 +7452,7 @@ func (c *MyconfigSyncVolumeLicensesCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "myconfig/syncVolumeLicenses") @@ -7600,6 +7622,7 @@ func (c *MyconfigUpdateUserSettingsCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.usersettings) if err != nil { @@ -7724,6 +7747,7 @@ func (c *MylibraryAnnotationsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "mylibrary/annotations/{annotationId}") @@ -7846,6 +7870,7 @@ func (c *MylibraryAnnotationsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotation) if err != nil { @@ -8062,6 +8087,7 @@ func (c *MylibraryAnnotationsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8251,6 +8277,7 @@ func (c *MylibraryAnnotationsSummaryCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "mylibrary/annotations/summary") @@ -8388,6 +8415,7 @@ func (c *MylibraryAnnotationsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotation) if err != nil { @@ -8544,6 +8572,7 @@ func (c *MylibraryBookshelvesAddVolumeCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "mylibrary/bookshelves/{shelf}/addVolume") @@ -8673,6 +8702,7 @@ func (c *MylibraryBookshelvesClearVolumesCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "mylibrary/bookshelves/{shelf}/clearVolumes") @@ -8792,6 +8822,7 @@ func (c *MylibraryBookshelvesGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8940,6 +8971,7 @@ func (c *MylibraryBookshelvesListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9068,6 +9100,7 @@ func (c *MylibraryBookshelvesMoveVolumeCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "mylibrary/bookshelves/{shelf}/moveVolume") @@ -9201,6 +9234,7 @@ func (c *MylibraryBookshelvesRemoveVolumeCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "mylibrary/bookshelves/{shelf}/removeVolume") @@ -9383,6 +9417,7 @@ func (c *MylibraryBookshelvesVolumesListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9581,6 +9616,7 @@ func (c *MylibraryReadingpositionsGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9755,6 +9791,7 @@ func (c *MylibraryReadingpositionsSetPositionCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "mylibrary/readingpositions/{volumeId}/setPosition") @@ -9925,6 +9962,7 @@ func (c *NotificationGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10074,6 +10112,7 @@ func (c *OnboardingListCategoriesCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10244,6 +10283,7 @@ func (c *OnboardingListCategoryVolumesCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10451,6 +10491,7 @@ func (c *PersonalizedstreamGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10636,6 +10677,7 @@ func (c *PromoofferAcceptCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "promooffer/accept") @@ -10799,6 +10841,7 @@ func (c *PromoofferDismissCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "promooffer/dismiss") @@ -10963,6 +11006,7 @@ func (c *PromoofferGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11117,6 +11161,7 @@ func (c *SeriesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11265,6 +11310,7 @@ func (c *SeriesMembershipGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11457,6 +11503,7 @@ func (c *VolumesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11752,6 +11799,7 @@ func (c *VolumesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12058,6 +12106,7 @@ func (c *VolumesAssociatedListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12297,6 +12346,7 @@ func (c *VolumesMybooksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12520,6 +12570,7 @@ func (c *VolumesRecommendedListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12672,6 +12723,7 @@ func (c *VolumesRecommendedRateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "volumes/recommended/rate") @@ -12876,6 +12928,7 @@ func (c *VolumesUseruploadedListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/calendar/v3/calendar-api.json b/vendor/google.golang.org/api/calendar/v3/calendar-api.json index 9005397f1..2bc2216a8 100644 --- a/vendor/google.golang.org/api/calendar/v3/calendar-api.json +++ b/vendor/google.golang.org/api/calendar/v3/calendar-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/VWub4J15NN4SkSidNp3dhsb_pzo\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/yPodRz8hynPgVqC3iI-kd-YqLsk\"", "discoveryVersion": "v1", "id": "calendar:v3", "name": "calendar", "version": "v3", - "revision": "20160927", + "revision": "20170219", "title": "Calendar API", "description": "Manipulates events and other calendar data.", "ownerDomain": "google.com", @@ -337,7 +337,7 @@ "properties": { "method": { "type": "string", - "description": "The method used to deliver the notification. Possible values are: \n- \"email\" - Reminders are sent via email. \n- \"sms\" - Reminders are sent via SMS. This value is read-only and is ignored on inserts and updates. SMS reminders are only available for Google Apps for Work, Education, and Government customers.", + "description": "The method used to deliver the notification. Possible values are: \n- \"email\" - Reminders are sent via email. \n- \"sms\" - Reminders are sent via SMS. This value is read-only and is ignored on inserts and updates. SMS reminders are only available for G Suite customers.", "annotations": { "required": [ "calendar.calendarList.insert", @@ -901,7 +901,7 @@ "properties": { "method": { "type": "string", - "description": "The method used by this reminder. Possible values are: \n- \"email\" - Reminders are sent via email. \n- \"sms\" - Reminders are sent via SMS. These are only available for Google Apps for Work, Education, and Government customers. Requests to set SMS reminders for other account types are ignored. \n- \"popup\" - Reminders are sent via a UI popup.", + "description": "The method used by this reminder. Possible values are: \n- \"email\" - Reminders are sent via email. \n- \"sms\" - Reminders are sent via SMS. These are only available for G Suite customers. Requests to set SMS reminders for other account types are ignored. \n- \"popup\" - Reminders are sent via a UI popup.", "annotations": { "required": [ "calendar.calendarList.insert", diff --git a/vendor/google.golang.org/api/calendar/v3/calendar-gen.go b/vendor/google.golang.org/api/calendar/v3/calendar-gen.go index 2caa3c305..7ffc05807 100644 --- a/vendor/google.golang.org/api/calendar/v3/calendar-gen.go +++ b/vendor/google.golang.org/api/calendar/v3/calendar-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Acl *AclService @@ -99,6 +100,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAclService(s *Service) *AclService { rs := &AclService{s: s} return rs @@ -561,7 +566,7 @@ type CalendarNotification struct { // - "email" - Reminders are sent via email. // - "sms" - Reminders are sent via SMS. This value is read-only and is // ignored on inserts and updates. SMS reminders are only available for - // Google Apps for Work, Education, and Government customers. + // G Suite customers. Method string `json:"method,omitempty"` // Type: The type of notification. Possible values are: @@ -1406,9 +1411,9 @@ func (s *EventDateTime) MarshalJSON() ([]byte, error) { type EventReminder struct { // Method: The method used by this reminder. Possible values are: // - "email" - Reminders are sent via email. - // - "sms" - Reminders are sent via SMS. These are only available for - // Google Apps for Work, Education, and Government customers. Requests - // to set SMS reminders for other account types are ignored. + // - "sms" - Reminders are sent via SMS. These are only available for G + // Suite customers. Requests to set SMS reminders for other account + // types are ignored. // - "popup" - Reminders are sent via a UI popup. Method string `json:"method,omitempty"` @@ -1872,6 +1877,7 @@ func (c *AclDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "calendars/{calendarId}/acl/{ruleId}") @@ -1988,6 +1994,7 @@ func (c *AclGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2125,6 +2132,7 @@ func (c *AclInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.aclrule) if err != nil { @@ -2307,6 +2315,7 @@ func (c *AclListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2482,6 +2491,7 @@ func (c *AclPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.aclrule) if err != nil { @@ -2625,6 +2635,7 @@ func (c *AclUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.aclrule) if err != nil { @@ -2806,6 +2817,7 @@ func (c *AclWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -2961,6 +2973,7 @@ func (c *CalendarListDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/me/calendarList/{calendarId}") @@ -3067,6 +3080,7 @@ func (c *CalendarListGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3204,6 +3218,7 @@ func (c *CalendarListInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarlistentry) if err != nil { @@ -3402,6 +3417,7 @@ func (c *CalendarListListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3596,6 +3612,7 @@ func (c *CalendarListPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarlistentry) if err != nil { @@ -3744,6 +3761,7 @@ func (c *CalendarListUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarlistentry) if err != nil { @@ -3945,6 +3963,7 @@ func (c *CalendarListWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -4112,6 +4131,7 @@ func (c *CalendarsClearCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "calendars/{calendarId}/clear") @@ -4208,6 +4228,7 @@ func (c *CalendarsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "calendars/{calendarId}") @@ -4314,6 +4335,7 @@ func (c *CalendarsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4441,6 +4463,7 @@ func (c *CalendarsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendar) if err != nil { @@ -4561,6 +4584,7 @@ func (c *CalendarsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendar) if err != nil { @@ -4694,6 +4718,7 @@ func (c *CalendarsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendar) if err != nil { @@ -4825,6 +4850,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -4925,6 +4951,7 @@ func (c *ColorsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5048,6 +5075,7 @@ func (c *EventsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "calendars/{calendarId}/events/{eventId}") @@ -5197,6 +5225,7 @@ func (c *EventsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5360,6 +5389,7 @@ func (c *EventsImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.event) if err != nil { @@ -5523,6 +5553,7 @@ func (c *EventsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.event) if err != nil { @@ -5761,6 +5792,7 @@ func (c *EventsInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6160,6 +6192,7 @@ func (c *EventsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6424,6 +6457,7 @@ func (c *EventsMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "calendars/{calendarId}/events/{eventId}/move") @@ -6608,6 +6642,7 @@ func (c *EventsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.event) if err != nil { @@ -6778,6 +6813,7 @@ func (c *EventsQuickAddCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "calendars/{calendarId}/events/quickAdd") @@ -6954,6 +6990,7 @@ func (c *EventsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.event) if err != nil { @@ -7297,6 +7334,7 @@ func (c *EventsWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -7534,6 +7572,7 @@ func (c *FreebusyQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.freebusyrequest) if err != nil { @@ -7663,6 +7702,7 @@ func (c *SettingsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7828,6 +7868,7 @@ func (c *SettingsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8011,6 +8052,7 @@ func (c *SettingsWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { diff --git a/vendor/google.golang.org/api/civicinfo/v2/civicinfo-gen.go b/vendor/google.golang.org/api/civicinfo/v2/civicinfo-gen.go index 3c2a86911..0a36fba4a 100644 --- a/vendor/google.golang.org/api/civicinfo/v2/civicinfo-gen.go +++ b/vendor/google.golang.org/api/civicinfo/v2/civicinfo-gen.go @@ -57,9 +57,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Divisions *DivisionsService @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDivisionsService(s *Service) *DivisionsService { rs := &DivisionsService{s: s} return rs @@ -1495,6 +1500,7 @@ func (c *DivisionsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1625,6 +1631,7 @@ func (c *ElectionsElectionQueryCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1775,6 +1782,7 @@ func (c *ElectionsVoterInfoQueryCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1986,6 +1994,7 @@ func (c *RepresentativesRepresentativeInfoByAddressCall) doRequest(alt string) ( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2237,6 +2246,7 @@ func (c *RepresentativesRepresentativeInfoByDivisionCall) doRequest(alt string) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/classroom/v1/classroom-gen.go b/vendor/google.golang.org/api/classroom/v1/classroom-gen.go index 6269fad98..10de697a1 100644 --- a/vendor/google.golang.org/api/classroom/v1/classroom-gen.go +++ b/vendor/google.golang.org/api/classroom/v1/classroom-gen.go @@ -104,9 +104,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Courses *CoursesService @@ -122,6 +123,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewCoursesService(s *Service) *CoursesService { rs := &CoursesService{s: s} rs.Aliases = NewCoursesAliasesService(s) @@ -2063,6 +2068,7 @@ func (c *CoursesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.course) if err != nil { @@ -2183,6 +2189,7 @@ func (c *CoursesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/courses/{id}") @@ -2320,6 +2327,7 @@ func (c *CoursesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2513,6 +2521,7 @@ func (c *CoursesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2703,6 +2712,7 @@ func (c *CoursesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.course) if err != nil { @@ -2845,6 +2855,7 @@ func (c *CoursesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.course) if err != nil { @@ -2982,6 +2993,7 @@ func (c *CoursesAliasesCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.coursealias) if err != nil { @@ -3118,6 +3130,7 @@ func (c *CoursesAliasesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/courses/{courseId}/aliases/{alias}") @@ -3281,6 +3294,7 @@ func (c *CoursesAliasesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3454,6 +3468,7 @@ func (c *CoursesCourseWorkCreateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.coursework) if err != nil { @@ -3596,6 +3611,7 @@ func (c *CoursesCourseWorkDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/courses/{courseId}/courseWork/{id}") @@ -3744,6 +3760,7 @@ func (c *CoursesCourseWorkGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3943,6 +3960,7 @@ func (c *CoursesCourseWorkListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4153,6 +4171,7 @@ func (c *CoursesCourseWorkPatchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.coursework) if err != nil { @@ -4316,6 +4335,7 @@ func (c *CoursesCourseWorkStudentSubmissionsGetCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4543,6 +4563,7 @@ func (c *CoursesCourseWorkStudentSubmissionsListCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4760,6 +4781,7 @@ func (c *CoursesCourseWorkStudentSubmissionsModifyAttachmentsCall) doRequest(alt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyattachmentsrequest) if err != nil { @@ -4935,6 +4957,7 @@ func (c *CoursesCourseWorkStudentSubmissionsPatchCall) doRequest(alt string) (*h reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.studentsubmission) if err != nil { @@ -5108,6 +5131,7 @@ func (c *CoursesCourseWorkStudentSubmissionsReclaimCall) doRequest(alt string) ( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reclaimstudentsubmissionrequest) if err != nil { @@ -5274,6 +5298,7 @@ func (c *CoursesCourseWorkStudentSubmissionsReturnCall) doRequest(alt string) (* reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.returnstudentsubmissionrequest) if err != nil { @@ -5438,6 +5463,7 @@ func (c *CoursesCourseWorkStudentSubmissionsTurnInCall) doRequest(alt string) (* reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.turninstudentsubmissionrequest) if err != nil { @@ -5605,6 +5631,7 @@ func (c *CoursesStudentsCreateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.student) if err != nil { @@ -5749,6 +5776,7 @@ func (c *CoursesStudentsDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/courses/{courseId}/students/{userId}") @@ -5897,6 +5925,7 @@ func (c *CoursesStudentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6065,6 +6094,7 @@ func (c *CoursesStudentsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6236,6 +6266,7 @@ func (c *CoursesTeachersCreateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.teacher) if err != nil { @@ -6376,6 +6407,7 @@ func (c *CoursesTeachersDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/courses/{courseId}/teachers/{userId}") @@ -6524,6 +6556,7 @@ func (c *CoursesTeachersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6692,6 +6725,7 @@ func (c *CoursesTeachersListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6861,6 +6895,7 @@ func (c *InvitationsAcceptCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/invitations/{id}:accept") @@ -6992,6 +7027,7 @@ func (c *InvitationsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.invitation) if err != nil { @@ -7112,6 +7148,7 @@ func (c *InvitationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/invitations/{id}") @@ -7249,6 +7286,7 @@ func (c *InvitationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7423,6 +7461,7 @@ func (c *InvitationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7594,6 +7633,7 @@ func (c *UserProfilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7749,6 +7789,7 @@ func (c *UserProfilesGuardianInvitationsCreateCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.guardianinvitation) if err != nil { @@ -7900,6 +7941,7 @@ func (c *UserProfilesGuardianInvitationsGetCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8093,6 +8135,7 @@ func (c *UserProfilesGuardianInvitationsListCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8292,6 +8335,7 @@ func (c *UserProfilesGuardianInvitationsPatchCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.guardianinvitation) if err != nil { @@ -8447,6 +8491,7 @@ func (c *UserProfilesGuardiansDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/userProfiles/{studentId}/guardians/{guardianId}") @@ -8599,6 +8644,7 @@ func (c *UserProfilesGuardiansGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8784,6 +8830,7 @@ func (c *UserProfilesGuardiansListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go b/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go index a2a147405..91cd41513 100644 --- a/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go +++ b/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BillingAccounts *BillingAccountsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBillingAccountsService(s *Service) *BillingAccountsService { rs := &BillingAccountsService{s: s} rs.Projects = NewBillingAccountsProjectsService(s) @@ -354,6 +359,7 @@ func (c *BillingAccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -508,6 +514,7 @@ func (c *BillingAccountsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -684,6 +691,7 @@ func (c *BillingAccountsProjectsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -857,6 +865,7 @@ func (c *ProjectsGetBillingInfoCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1013,6 +1022,7 @@ func (c *ProjectsUpdateBillingInfoCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectbillinginfo) if err != nil { diff --git a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json index 6d78a3b44..6cee850bb 100644 --- a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json +++ b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json @@ -1,33 +1,247 @@ { + "discoveryVersion": "v1", + "version_module": "True", "schemas": { - "BuildOptions": { + "BuildTrigger": { + "description": "Configuration for an automated build in response to source repository\nchanges.", + "type": "object", "properties": { - "requestedVerifyOption": { - "description": "Requested verifiability options.", + "id": { + "description": "Unique identifier of the trigger.\n\n@OutputOnly", + "type": "string" + }, + "build": { + "$ref": "Build", + "description": "Contents of the build template." + }, + "substitutions": { + "description": "Substitutions data for Build resource.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "description": { + "description": "Human-readable description of this trigger.", + "type": "string" + }, + "createTime": { + "description": "Time when the trigger was created.\n\n@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "disabled": { + "description": "If true, the trigger will never result in a build.", + "type": "boolean" + }, + "triggerTemplate": { + "$ref": "RepoSource", + "description": "Template describing the types of source changes to trigger a build.\n\nBranch and tag names in trigger templates are interpreted as regular\nexpressions. Any branch or tag change that matches that regular expression\nwill trigger a build." + }, + "filename": { + "description": "Path, from the source root, to a file whose contents is used for the\ntemplate.", + "type": "string" + } + }, + "id": "BuildTrigger" + }, + "Build": { + "description": "A build resource in the Container Builder API.\n\nAt a high level, a Build describes where to find source code, how to build\nit (for example, the builder image to run on the source), and what tag to\napply to the built image when it is pushed to Google Container Registry.\n\nFields can include the following variables which will be expanded when the\nbuild is created:\n\n- $PROJECT_ID: the project ID of the build.\n- $BUILD_ID: the autogenerated ID of the build.\n- $REPO_NAME: the source repository name specified by RepoSource.\n- $BRANCH_NAME: the branch name specified by RepoSource.\n- $TAG_NAME: the tag name specified by RepoSource.\n- $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or\n resolved from the specified branch or tag.", + "type": "object", + "properties": { + "results": { + "$ref": "Results", + "description": "Results of the build.\n@OutputOnly" + }, + "logsBucket": { + "description": "Google Cloud Storage bucket where logs should be written (see\n[Bucket Name\nRequirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).\nLogs file names will be of the format `${logs_bucket}/log-${build_id}.txt`.", + "type": "string" + }, + "steps": { + "type": "array", + "items": { + "$ref": "BuildStep" + }, + "description": "Describes the operations to be performed on the workspace." + }, + "buildTriggerId": { + "description": "The ID of the BuildTrigger that triggered this build, if it was\ntriggered automatically.\n@OutputOnly", + "type": "string" + }, + "id": { + "description": "Unique identifier of the build.\n@OutputOnly", + "type": "string" + }, + "substitutions": { + "description": "Substitutions data for Build resource.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "startTime": { + "description": "Time at which execution of the build was started.\n@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "sourceProvenance": { + "$ref": "SourceProvenance", + "description": "A permanent fixed identifier for source.\n@OutputOnly" + }, + "createTime": { + "description": "Time at which the request to create the build was received.\n@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "images": { + "description": "A list of images to be pushed upon the successful completion of all build\nsteps.\n\nThe images will be pushed using the builder service account's credentials.\n\nThe digests of the pushed images will be stored in the Build resource's\nresults field.\n\nIf any of the images fail to be pushed, the build is marked FAILURE.", + "type": "array", + "items": { + "type": "string" + } + }, + "projectId": { + "type": "string", + "description": "ID of the project.\n@OutputOnly." + }, + "finishTime": { + "description": "Time at which execution of the build was finished.\n\nThe difference between finish_time and start_time is the duration of the\nbuild's execution.\n@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "logUrl": { + "description": "URL to logs for this build in Google Cloud Logging.\n@OutputOnly", + "type": "string" + }, + "source": { + "$ref": "Source", + "description": "Describes where to find the source files to build." + }, + "options": { + "description": "Special options for this build.", + "$ref": "BuildOptions" + }, + "timeout": { + "type": "string", + "description": "Amount of time that this build should be allowed to run, to second\ngranularity. If this amount of time elapses, work on the build will cease\nand the build status will be TIMEOUT.\n\nDefault time is ten minutes.", + "format": "google-duration" + }, + "status": { + "description": "Status of the build.\n@OutputOnly", "type": "string", "enumDescriptions": [ - "Not a verifiable build. (default)", - "Verified build." + "Status of the build is unknown.", + "Build is queued; work has not yet begun.", + "Build is being executed.", + "Build finished successfully.", + "Build failed to complete successfully.", + "Build failed due to an internal cause.", + "Build took longer than was allowed.", + "Build was canceled by a user." ], "enum": [ - "NOT_VERIFIED", - "VERIFIED" + "STATUS_UNKNOWN", + "QUEUED", + "WORKING", + "SUCCESS", + "FAILURE", + "INTERNAL_ERROR", + "TIMEOUT", + "CANCELLED" ] }, + "statusDetail": { + "description": "Customer-readable message about the current status.\n@OutputOnly", + "type": "string" + } + }, + "id": "Build" + }, + "CancelBuildRequest": { + "description": "Request to cancel an ongoing build.", + "type": "object", + "properties": {}, + "id": "CancelBuildRequest" + }, + "ListBuildsResponse": { + "description": "Response including listed builds.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "Token to receive the next page of results.", + "type": "string" + }, + "builds": { + "description": "Builds will be sorted by create_time, descending.", + "type": "array", + "items": { + "$ref": "Build" + } + } + }, + "id": "ListBuildsResponse" + }, + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "type": "object", + "properties": { + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Operation" + } + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + } + }, + "id": "ListOperationsResponse" + }, + "Source": { + "description": "Source describes the location of the source in a supported storage\nservice.", + "type": "object", + "properties": { + "storageSource": { + "$ref": "StorageSource", + "description": "If provided, get the source from this location in in Google Cloud\nStorage." + }, + "repoSource": { + "description": "If provided, get source from this location in a Cloud Repo.", + "$ref": "RepoSource" + } + }, + "id": "Source" + }, + "BuildOptions": { + "properties": { "sourceProvenanceHash": { "description": "Requested hash for SourceProvenance.", "type": "array", "items": { - "type": "string", "enum": [ "NONE", "SHA256" - ] + ], + "type": "string" }, "enumDescriptions": [ "No hash requested.", "Use a sha256 hash." ] + }, + "requestedVerifyOption": { + "enum": [ + "NOT_VERIFIED", + "VERIFIED" + ], + "description": "Requested verifiability options.", + "type": "string", + "enumDescriptions": [ + "Not a verifiable build. (default)", + "Verified build." + ] } }, "id": "BuildOptions", @@ -35,34 +249,35 @@ "type": "object" }, "StorageSource": { + "id": "StorageSource", "description": "StorageSource describes the location of the source in an archive file in\nGoogle Cloud Storage.", "type": "object", "properties": { + "generation": { + "description": "Google Cloud Storage generation for the object. If the generation is\nomitted, the latest generation will be used.", + "format": "int64", + "type": "string" + }, "bucket": { "description": "Google Cloud Storage bucket containing source (see\n[Bucket Name\nRequirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", "type": "string" }, "object": { - "type": "string", - "description": "Google Cloud Storage object containing source.\n\nThis object must be a gzipped archive file (.tar.gz) containing source to\nbuild." - }, - "generation": { - "type": "string", - "description": "Google Cloud Storage generation for the object. If the generation is\nomitted, the latest generation will be used.", - "format": "int64" + "description": "Google Cloud Storage object containing source.\n\nThis object must be a gzipped archive file (.tar.gz) containing source to\nbuild.", + "type": "string" } - }, - "id": "StorageSource" + } }, "Results": { + "description": "Results describes the artifacts created by the build pipeline.", "type": "object", "properties": { "buildStepImages": { + "description": "List of build step digests, in order corresponding to build step indices.", "type": "array", "items": { "type": "string" - }, - "description": "List of build step digests, in order corresponding to build step indices." + } }, "images": { "description": "Images that were built as a part of the build.", @@ -72,16 +287,15 @@ } } }, - "id": "Results", - "description": "Results describes the artifacts created by the build pipeline." + "id": "Results" }, "BuildOperationMetadata": { "description": "Metadata for build operations.", "type": "object", "properties": { "build": { - "$ref": "Build", - "description": "The build that the operation is tracking." + "description": "The build that the operation is tracking.", + "$ref": "Build" } }, "id": "BuildOperationMetadata" @@ -99,90 +313,100 @@ "description": "A copy of the build's source.storage_source, if exists, with any\ngenerations resolved." }, "fileHashes": { - "type": "object", "additionalProperties": { "$ref": "FileHashes" }, - "description": "Hash(es) of the build source, which can be used to verify that the original\nsource integrity was maintained in the build. Note that FileHashes will\nonly be populated if BuildOptions has requested a SourceProvenanceHash.\n\nThe keys to this map are file paths used as build source and the values\ncontain the hash values for those files.\n\nIf the build source came in a single package such as a gzipped tarfile\n(.tar.gz), the FileHash will be for the single path to that file.\n@OutputOnly" + "description": "Hash(es) of the build source, which can be used to verify that the original\nsource integrity was maintained in the build. Note that FileHashes will\nonly be populated if BuildOptions has requested a SourceProvenanceHash.\n\nThe keys to this map are file paths used as build source and the values\ncontain the hash values for those files.\n\nIf the build source came in a single package such as a gzipped tarfile\n(.tar.gz), the FileHash will be for the single path to that file.\n@OutputOnly", + "type": "object" } }, "id": "SourceProvenance" }, "CancelOperationRequest": { + "id": "CancelOperationRequest", "description": "The request message for Operations.CancelOperation.", "type": "object", - "properties": {}, - "id": "CancelOperationRequest" + "properties": {} + }, + "ListBuildTriggersResponse": { + "id": "ListBuildTriggersResponse", + "description": "Response containing existing BuildTriggers.", + "type": "object", + "properties": { + "triggers": { + "description": "BuildTriggers for the project, sorted by create_time descending.", + "type": "array", + "items": { + "$ref": "BuildTrigger" + } + } + } }, "Operation": { "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", "type": "object", "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + }, "response": { - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", "type": "object", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" - } + }, + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`." }, "name": { "type": "string", "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`." }, "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure or cancellation." + "description": "The error result of the operation in case of failure or cancellation.", + "$ref": "Status" }, "metadata": { + "type": "object", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", - "type": "object" - }, - "done": { - "type": "boolean", - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable." + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any." } }, "id": "Operation" }, - "ListBuildTriggersResponse": { - "id": "ListBuildTriggersResponse", - "description": "Response containing existing BuildTriggers.", - "type": "object", - "properties": { - "triggers": { - "description": "BuildTriggers for the project, sorted by create_time descending.", - "type": "array", - "items": { - "$ref": "BuildTrigger" - } - } - } - }, "BuiltImage": { - "description": "BuiltImage describes an image built by the pipeline.", - "type": "object", "properties": { "name": { - "description": "Name used to push the container image to Google Container Registry, as\npresented to `docker push`.", - "type": "string" + "type": "string", + "description": "Name used to push the container image to Google Container Registry, as\npresented to `docker push`." }, "digest": { "description": "Docker Registry 2.0 digest.", "type": "string" } }, - "id": "BuiltImage" + "id": "BuiltImage", + "description": "BuiltImage describes an image built by the pipeline.", + "type": "object" }, "BuildStep": { - "id": "BuildStep", - "description": "BuildStep describes a step to perform in the build pipeline.", "type": "object", "properties": { + "name": { + "description": "The name of the container image that will run this particular build step.\n\nIf the image is already available in the host's Docker daemon's cache, it\nwill be run directly. If not, the host will attempt to pull the image\nfirst, using the builder service account's credentials if necessary.\n\nThe Docker daemon's cache will already have the latest versions of all of\nthe officially supported build steps\n(https://github.com/GoogleCloudPlatform/cloud-builders). The Docker daemon\nwill also have cached many of the layers for some popular images, like\n\"ubuntu\", \"debian\", but they will be refreshed at the time you attempt to\nuse them.\n\nIf you built an image in a previous build step, it will be stored in the\nhost's Docker daemon's cache and is available to use as the name for a\nlater build step.", + "type": "string" + }, + "entrypoint": { + "description": "Optional entrypoint to be used instead of the build step image's default\nIf unset, the image's default will be used.", + "type": "string" + }, + "id": { + "description": "Optional unique identifier for this build step, used in wait_for to\nreference this build step as a dependency.", + "type": "string" + }, "dir": { "description": "Working directory (relative to project source root) to use when running\nthis operation's container.", "type": "string" @@ -202,29 +426,32 @@ } }, "args": { - "description": "A list of arguments that will be presented to the step when it is started.\n\nIf the image used to run the step's container has an entrypoint, these args\nwill be used as arguments to that entrypoint. If the image does not define\nan entrypoint, the first element in args will be used as the entrypoint,\nand the remainder will be used as arguments.", "type": "array", "items": { "type": "string" - } - }, - "name": { - "description": "The name of the container image that will run this particular build step.\n\nIf the image is already available in the host's Docker daemon's cache, it\nwill be run directly. If not, the host will attempt to pull the image\nfirst, using the builder service account's credentials if necessary.\n\nThe Docker daemon's cache will already have the latest versions of all of\nthe officially supported build steps\n(https://github.com/GoogleCloudPlatform/cloud-builders). The Docker daemon\nwill also have cached many of the layers for some popular images, like\n\"ubuntu\", \"debian\", but they will be refreshed at the time you attempt to\nuse them.\n\nIf you built an image in a previous build step, it will be stored in the\nhost's Docker daemon's cache and is available to use as the name for a\nlater build step.", - "type": "string" - }, - "entrypoint": { - "description": "Optional entrypoint to be used instead of the build step image's default\nIf unset, the image's default will be used.", - "type": "string" - }, - "id": { - "description": "Optional unique identifier for this build step, used in wait_for to\nreference this build step as a dependency.", - "type": "string" + }, + "description": "A list of arguments that will be presented to the step when it is started.\n\nIf the image used to run the step's container has an entrypoint, these args\nwill be used as arguments to that entrypoint. If the image does not define\nan entrypoint, the first element in args will be used as the entrypoint,\nand the remainder will be used as arguments." } - } + }, + "id": "BuildStep", + "description": "BuildStep describes a step to perform in the build pipeline." }, "RepoSource": { + "description": "RepoSource describes the location of the source in a Google Cloud Source\nRepository.", "type": "object", "properties": { + "projectId": { + "type": "string", + "description": "ID of the project that owns the repo. If omitted, the project ID requesting\nthe build is assumed." + }, + "repoName": { + "type": "string", + "description": "Name of the repo. If omitted, the name \"default\" is assumed." + }, + "branchName": { + "type": "string", + "description": "Name of the branch to build." + }, "tagName": { "description": "Name of the tag to build.", "type": "string" @@ -232,29 +459,16 @@ "commitSha": { "description": "Explicit commit SHA to build.", "type": "string" - }, - "projectId": { - "description": "ID of the project that owns the repo. If omitted, the project ID requesting\nthe build is assumed.", - "type": "string" - }, - "repoName": { - "description": "Name of the repo. If omitted, the name \"default\" is assumed.", - "type": "string" - }, - "branchName": { - "description": "Name of the branch to build.", - "type": "string" } }, - "id": "RepoSource", - "description": "RepoSource describes the location of the source in a Google Cloud Source\nRepository." + "id": "RepoSource" }, "Hash": { - "id": "Hash", "description": "Container message for hash values.", "type": "object", "properties": { "type": { + "description": "The type of hash that was performed.", "type": "string", "enumDescriptions": [ "No hash requested.", @@ -263,265 +477,67 @@ "enum": [ "NONE", "SHA256" - ], - "description": "The type of hash that was performed." + ] }, "value": { "description": "The hash value.", "format": "byte", "type": "string" } - } + }, + "id": "Hash" }, "FileHashes": { + "id": "FileHashes", "description": "Container message for hashes of byte content of files, used in\nSourceProvenance messages to verify integrity of source input to the build.", "type": "object", "properties": { "fileHash": { - "description": "Collection of file hashes.", "type": "array", "items": { "$ref": "Hash" - } - } - }, - "id": "FileHashes" - }, - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", - "type": "object", - "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" - }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - }, - "id": "Status" - }, - "Empty": { - "properties": {}, - "id": "Empty", - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object" - }, - "BuildTrigger": { - "description": "Configuration for an automated build in response to source repository\nchanges.", - "type": "object", - "properties": { - "disabled": { - "description": "If true, the trigger will never result in a build.", - "type": "boolean" - }, - "createTime": { - "description": "Time when the trigger was created.\n\n@OutputOnly", - "format": "google-datetime", - "type": "string" - }, - "filename": { - "description": "Path, from the source root, to a file whose contents is used for the\ntemplate.", - "type": "string" - }, - "triggerTemplate": { - "$ref": "RepoSource", - "description": "Template describing the types of source changes to trigger a build.\n\nBranch and tag names in trigger templates are interpreted as regular\nexpressions. Any branch or tag change that matches that regular expression\nwill trigger a build." - }, - "id": { - "description": "Unique identifier of the trigger.\n\n@OutputOnly", - "type": "string" - }, - "build": { - "$ref": "Build", - "description": "Contents of the build template." - }, - "description": { - "description": "Human-readable description of this trigger.", - "type": "string" - } - }, - "id": "BuildTrigger" - }, - "Build": { - "description": "A build resource in the Container Builder API.\n\nAt a high level, a Build describes where to find source code, how to build\nit (for example, the builder image to run on the source), and what tag to\napply to the built image when it is pushed to Google Container Registry.\n\nFields can include the following variables which will be expanded when the\nbuild is created:\n\n- $PROJECT_ID: the project ID of the build.\n- $BUILD_ID: the autogenerated ID of the build.\n- $REPO_NAME: the source repository name specified by RepoSource.\n- $BRANCH_NAME: the branch name specified by RepoSource.\n- $TAG_NAME: the tag name specified by RepoSource.\n- $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or\n resolved from the specified branch or tag.", - "type": "object", - "properties": { - "images": { - "description": "A list of images to be pushed upon the successful completion of all build\nsteps.\n\nThe images will be pushed using the builder service account's credentials.\n\nThe digests of the pushed images will be stored in the Build resource's\nresults field.\n\nIf any of the images fail to be pushed, the build is marked FAILURE.", - "type": "array", - "items": { - "type": "string" - } - }, - "projectId": { - "description": "ID of the project.\n@OutputOnly.", - "type": "string" - }, - "finishTime": { - "description": "Time at which execution of the build was finished.\n\nThe difference between finish_time and start_time is the duration of the\nbuild's execution.\n@OutputOnly", - "format": "google-datetime", - "type": "string" - }, - "logUrl": { - "description": "URL to logs for this build in Google Cloud Logging.\n@OutputOnly", - "type": "string" - }, - "options": { - "$ref": "BuildOptions", - "description": "Special options for this build." - }, - "source": { - "$ref": "Source", - "description": "Describes where to find the source files to build." - }, - "statusDetail": { - "description": "Customer-readable message about the current status.\n@OutputOnly", - "type": "string" - }, - "status": { - "type": "string", - "enumDescriptions": [ - "Status of the build is unknown.", - "Build is queued; work has not yet begun.", - "Build is being executed.", - "Build finished successfully.", - "Build failed to complete successfully.", - "Build failed due to an internal cause.", - "Build took longer than was allowed.", - "Build was canceled by a user." - ], - "enum": [ - "STATUS_UNKNOWN", - "QUEUED", - "WORKING", - "SUCCESS", - "FAILURE", - "INTERNAL_ERROR", - "TIMEOUT", - "CANCELLED" - ], - "description": "Status of the build.\n@OutputOnly" - }, - "timeout": { - "description": "Amount of time that this build should be allowed to run, to second\ngranularity. If this amount of time elapses, work on the build will cease\nand the build status will be TIMEOUT.\n\nDefault time is ten minutes.", - "format": "google-duration", - "type": "string" - }, - "results": { - "description": "Results of the build.\n@OutputOnly", - "$ref": "Results" - }, - "logsBucket": { - "description": "Google Cloud Storage bucket where logs should be written (see\n[Bucket Name\nRequirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).\nLogs file names will be of the format `${logs_bucket}/log-${build_id}.txt`.", - "type": "string" - }, - "steps": { - "description": "Describes the operations to be performed on the workspace.", - "type": "array", - "items": { - "$ref": "BuildStep" - } - }, - "buildTriggerId": { - "description": "The ID of the BuildTrigger that triggered this build, if it was\ntriggered automatically.\n@OutputOnly", - "type": "string" - }, - "id": { - "description": "Unique identifier of the build.\n@OutputOnly", - "type": "string" - }, - "startTime": { - "description": "Time at which execution of the build was started.\n@OutputOnly", - "format": "google-datetime", - "type": "string" - }, - "sourceProvenance": { - "description": "A permanent fixed identifier for source.\n@OutputOnly", - "$ref": "SourceProvenance" - }, - "createTime": { - "description": "Time at which the request to create the build was received.\n@OutputOnly", - "format": "google-datetime", - "type": "string" - } - }, - "id": "Build" - }, - "CancelBuildRequest": { - "description": "Request to cancel an ongoing build.", - "type": "object", - "properties": {}, - "id": "CancelBuildRequest" - }, - "ListBuildsResponse": { - "description": "Response including listed builds.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "Token to receive the next page of results.", - "type": "string" - }, - "builds": { - "description": "Builds will be sorted by create_time, descending.", - "type": "array", - "items": { - "$ref": "Build" - } + }, + "description": "Collection of file hashes." } - }, - "id": "ListBuildsResponse" + } }, - "ListOperationsResponse": { - "description": "The response message for Operations.ListOperations.", + "Status": { "type": "object", "properties": { - "nextPageToken": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { "type": "string", - "description": "The standard List next-page token." + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client." }, - "operations": { + "details": { "type": "array", "items": { - "$ref": "Operation" + "additionalProperties": { + "type": "any", + "description": "Properties of the object. Contains field @type with type URL." + }, + "type": "object" }, - "description": "A list of operations that matches the specified filter in the request." + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use." } }, - "id": "ListOperationsResponse" + "id": "Status", + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons." }, - "Source": { - "description": "Source describes the location of the source in a supported storage\nservice.", + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", "type": "object", - "properties": { - "storageSource": { - "description": "If provided, get the source from this location in in Google Cloud\nStorage.", - "$ref": "StorageSource" - }, - "repoSource": { - "description": "If provided, get source from this location in a Cloud Repo.", - "$ref": "RepoSource" - } - }, - "id": "Source" + "properties": {}, + "id": "Empty" } }, "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, "protocol": "rest", "canonicalName": "Cloud Build", @@ -541,24 +557,120 @@ "title": "Google Cloud Container Builder API", "ownerName": "Google", "resources": { + "operations": { + "methods": { + "cancel": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^operations/.+$", + "location": "path", + "description": "The name of the operation resource to be cancelled." + } + }, + "flatPath": "v1/operations/{operationsId}:cancel", + "path": "v1/{+name}:cancel", + "id": "cloudbuild.operations.cancel", + "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "request": { + "$ref": "CancelOperationRequest" + } + }, + "list": { + "parameters": { + "name": { + "pattern": "^operations$", + "location": "path", + "description": "The name of the operation collection.", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "The standard list page token.", + "type": "string" + }, + "pageSize": { + "type": "integer", + "location": "query", + "description": "The standard list page size.", + "format": "int32" + }, + "filter": { + "location": "query", + "description": "The standard list filter.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/operations", + "path": "v1/{+name}", + "id": "cloudbuild.operations.list", + "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", + "response": { + "$ref": "ListOperationsResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET" + }, + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "pattern": "^operations/.+$", + "location": "path", + "description": "The name of the operation resource.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/operations/{operationsId}", + "id": "cloudbuild.operations.get", + "path": "v1/{+name}", + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice." + } + } + }, "projects": { "resources": { "builds": { "methods": { - "cancel": { - "flatPath": "v1/projects/{projectId}/builds/{id}:cancel", - "path": "v1/projects/{projectId}/builds/{id}:cancel", - "id": "cloudbuild.projects.builds.cancel", - "description": "Cancels a requested build in progress.", + "create": { + "path": "v1/projects/{projectId}/builds", + "id": "cloudbuild.projects.builds.create", + "description": "Starts a build with the specified configuration.\n\nThe long-running Operation returned by this method will include the ID of\nthe build, which can be passed to GetBuild to determine its status (e.g.,\nsuccess or failure).", "request": { - "$ref": "CancelBuildRequest" + "$ref": "Build" }, "response": { - "$ref": "Build" + "$ref": "Operation" }, "parameterOrder": [ - "projectId", - "id" + "projectId" ], "httpMethod": "POST", "scopes": [ @@ -566,22 +678,20 @@ ], "parameters": { "projectId": { - "description": "ID of the project.", - "required": true, - "type": "string", - "location": "path" - }, - "id": { "location": "path", - "description": "ID of the build.", + "description": "ID of the project.", "required": true, "type": "string" } - } + }, + "flatPath": "v1/projects/{projectId}/builds" }, - "get": { - "description": "Returns information about a previously requested build.\n\nThe Build that is returned includes its status (e.g., success or failure,\nor in-progress), and timing information.", - "httpMethod": "GET", + "cancel": { + "request": { + "$ref": "CancelBuildRequest" + }, + "description": "Cancels a requested build in progress.", + "httpMethod": "POST", "parameterOrder": [ "projectId", "id" @@ -606,11 +716,20 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/builds/{id}", - "id": "cloudbuild.projects.builds.get", - "path": "v1/projects/{projectId}/builds/{id}" + "flatPath": "v1/projects/{projectId}/builds/{id}:cancel", + "id": "cloudbuild.projects.builds.cancel", + "path": "v1/projects/{projectId}/builds/{id}:cancel" }, - "list": { + "get": { + "description": "Returns information about a previously requested build.\n\nThe Build that is returned includes its status (e.g., success or failure,\nor in-progress), and timing information.", + "response": { + "$ref": "Build" + }, + "parameterOrder": [ + "projectId", + "id" + ], + "httpMethod": "GET", "parameters": { "projectId": { "location": "path", @@ -618,56 +737,48 @@ "required": true, "type": "string" }, - "filter": { + "id": { + "description": "ID of the build.", + "required": true, "type": "string", - "location": "query", - "description": "The raw filter text to constrain the results." - }, - "pageToken": { - "location": "query", - "description": "Token to provide to skip to a particular spot in the list.", - "type": "string" - }, - "pageSize": { - "description": "Number of results to return in the list.", - "format": "int32", - "type": "integer", - "location": "query" + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/builds", - "id": "cloudbuild.projects.builds.list", - "path": "v1/projects/{projectId}/builds", - "description": "Lists previously requested builds.\n\nPreviously requested builds may still be in-progress, or may have finished\nsuccessfully or unsuccessfully.", + "flatPath": "v1/projects/{projectId}/builds/{id}", + "path": "v1/projects/{projectId}/builds/{id}", + "id": "cloudbuild.projects.builds.get" + }, + "list": { "httpMethod": "GET", "response": { "$ref": "ListBuildsResponse" }, - "parameterOrder": [ - "projectId" - ] - }, - "create": { - "id": "cloudbuild.projects.builds.create", - "path": "v1/projects/{projectId}/builds", - "description": "Starts a build with the specified configuration.\n\nThe long-running Operation returned by this method will include the ID of\nthe build, which can be passed to GetBuild to determine its status (e.g.,\nsuccess or failure).", - "request": { - "$ref": "Build" - }, - "httpMethod": "POST", "parameterOrder": [ "projectId" ], - "response": { - "$ref": "Operation" - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { + "filter": { + "location": "query", + "description": "The raw filter text to constrain the results.", + "type": "string" + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "Token to provide to skip to a particular spot in the list." + }, + "pageSize": { + "location": "query", + "description": "Number of results to return in the list.", + "format": "int32", + "type": "integer" + }, "projectId": { "location": "path", "description": "ID of the project.", @@ -675,85 +786,57 @@ "type": "string" } }, - "flatPath": "v1/projects/{projectId}/builds" + "flatPath": "v1/projects/{projectId}/builds", + "id": "cloudbuild.projects.builds.list", + "path": "v1/projects/{projectId}/builds", + "description": "Lists previously requested builds.\n\nPreviously requested builds may still be in-progress, or may have finished\nsuccessfully or unsuccessfully." } } }, "triggers": { "methods": { - "delete": { + "get": { "response": { - "$ref": "Empty" + "$ref": "BuildTrigger" }, "parameterOrder": [ "projectId", "triggerId" ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "triggerId": { - "required": true, - "type": "string", - "location": "path", - "description": "ID of the BuildTrigger to delete." - }, - "projectId": { - "required": true, - "type": "string", - "location": "path", - "description": "ID of the project that owns the trigger." - } - }, - "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", - "path": "v1/projects/{projectId}/triggers/{triggerId}", - "id": "cloudbuild.projects.triggers.delete", - "description": "Deletes an BuildTrigger by its project ID and trigger ID.\n\nThis API is experimental." - }, - "get": { "httpMethod": "GET", - "parameterOrder": [ - "projectId", - "triggerId" - ], - "response": { - "$ref": "BuildTrigger" - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { "triggerId": { - "location": "path", "description": "ID of the BuildTrigger to get.", "required": true, - "type": "string" + "type": "string", + "location": "path" }, "projectId": { - "description": "ID of the project that owns the trigger.", "required": true, "type": "string", - "location": "path" + "location": "path", + "description": "ID of the project that owns the trigger." } }, "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", - "id": "cloudbuild.projects.triggers.get", "path": "v1/projects/{projectId}/triggers/{triggerId}", + "id": "cloudbuild.projects.triggers.get", "description": "Gets information about a BuildTrigger.\n\nThis API is experimental." }, "list": { + "id": "cloudbuild.projects.triggers.list", + "path": "v1/projects/{projectId}/triggers", + "description": "Lists existing BuildTrigger.\n\nThis API is experimental.", + "httpMethod": "GET", "response": { "$ref": "ListBuildTriggersResponse" }, "parameterOrder": [ "projectId" ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], "parameters": { "projectId": { "description": "ID of the project for which to list BuildTriggers.", @@ -762,29 +845,12 @@ "location": "path" } }, - "flatPath": "v1/projects/{projectId}/triggers", - "path": "v1/projects/{projectId}/triggers", - "id": "cloudbuild.projects.triggers.list", - "description": "Lists existing BuildTrigger.\n\nThis API is experimental." - }, - "patch": { - "path": "v1/projects/{projectId}/triggers/{triggerId}", - "id": "cloudbuild.projects.triggers.patch", - "description": "Updates an BuildTrigger by its project ID and trigger ID.\n\nThis API is experimental.", - "request": { - "$ref": "BuildTrigger" - }, - "response": { - "$ref": "BuildTrigger" - }, - "parameterOrder": [ - "projectId", - "triggerId" - ], - "httpMethod": "PATCH", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], + "flatPath": "v1/projects/{projectId}/triggers" + }, + "patch": { "parameters": { "triggerId": { "location": "path", @@ -799,20 +865,37 @@ "type": "string" } }, - "flatPath": "v1/projects/{projectId}/triggers/{triggerId}" - }, - "create": { - "description": "Creates a new BuildTrigger.\n\nThis API is experimental.", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", + "id": "cloudbuild.projects.triggers.patch", + "path": "v1/projects/{projectId}/triggers/{triggerId}", "request": { "$ref": "BuildTrigger" }, + "description": "Updates an BuildTrigger by its project ID and trigger ID.\n\nThis API is experimental.", + "httpMethod": "PATCH", + "parameterOrder": [ + "projectId", + "triggerId" + ], "response": { "$ref": "BuildTrigger" + } + }, + "create": { + "description": "Creates a new BuildTrigger.\n\nThis API is experimental.", + "request": { + "$ref": "BuildTrigger" }, + "httpMethod": "POST", "parameterOrder": [ "projectId" ], - "httpMethod": "POST", + "response": { + "$ref": "BuildTrigger" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], @@ -825,138 +908,50 @@ } }, "flatPath": "v1/projects/{projectId}/triggers", - "path": "v1/projects/{projectId}/triggers", - "id": "cloudbuild.projects.triggers.create" - } - } - } - } - }, - "operations": { - "methods": { - "list": { - "httpMethod": "GET", - "response": { - "$ref": "ListOperationsResponse" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "filter": { - "description": "The standard list filter.", - "type": "string", - "location": "query" - }, - "name": { - "pattern": "^operations$", - "location": "path", - "description": "The name of the operation collection.", - "required": true, - "type": "string" - }, - "pageToken": { - "location": "query", - "description": "The standard list page token.", - "type": "string" + "id": "cloudbuild.projects.triggers.create", + "path": "v1/projects/{projectId}/triggers" }, - "pageSize": { - "location": "query", - "description": "The standard list page size.", - "format": "int32", - "type": "integer" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/operations", - "id": "cloudbuild.operations.list", - "path": "v1/{+name}", - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`." - }, - "get": { - "path": "v1/{+name}", - "id": "cloudbuild.operations.get", - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^operations/.+$", - "location": "path", - "description": "The name of the operation resource.", - "required": true, - "type": "string" - } - }, - "flatPath": "v1/operations/{operationsId}" - }, - "cancel": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The name of the operation resource to be cancelled.", - "required": true, - "type": "string", - "pattern": "^operations/.+$", - "location": "path" + "delete": { + "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", + "id": "cloudbuild.projects.triggers.delete", + "path": "v1/projects/{projectId}/triggers/{triggerId}", + "description": "Deletes an BuildTrigger by its project ID and trigger ID.\n\nThis API is experimental.", + "httpMethod": "DELETE", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "projectId", + "triggerId" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "triggerId": { + "location": "path", + "description": "ID of the BuildTrigger to delete.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "ID of the project that owns the trigger.", + "required": true, + "type": "string", + "location": "path" + } + } } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/operations/{operationsId}:cancel", - "path": "v1/{+name}:cancel", - "id": "cloudbuild.operations.cancel", - "request": { - "$ref": "CancelOperationRequest" - }, - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`." + } } } } }, "parameters": { - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" - }, - "upload_protocol": { - "location": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true", - "location": "query" - }, "fields": { + "location": "query", "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" + "type": "string" }, "uploadType": { "location": "query", @@ -964,22 +959,22 @@ "type": "string" }, "callback": { + "location": "query", "description": "JSONP", - "type": "string", - "location": "query" + "type": "string" }, "$.xgafv": { - "enum": [ - "1", - "2" - ], "description": "V1 error format.", "type": "string", "enumDescriptions": [ "v1 error format", "v2 error format" ], - "location": "query" + "location": "query", + "enum": [ + "1", + "2" + ] }, "alt": { "enum": [ @@ -997,37 +992,56 @@ "description": "Data format for response.", "default": "json" }, - "access_token": { + "key": { "location": "query", - "description": "OAuth access token.", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "type": "string" }, - "key": { + "access_token": { "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "description": "OAuth access token.", "type": "string" }, "quotaUser": { - "location": "query", "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" + "type": "string", + "location": "query" }, "pp": { "description": "Pretty-print response.", "type": "boolean", "default": "true", "location": "query" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" } }, "version": "v1", "baseUrl": "https://cloudbuild.googleapis.com/", "servicePath": "", - "kind": "discovery#restDescription", "description": "Builds container images in the cloud.", + "kind": "discovery#restDescription", "basePath": "", - "revision": "20170125", - "documentationLink": "https://cloud.google.com/container-builder/docs/", + "revision": "20170207", "id": "cloudbuild:v1", - "discoveryVersion": "v1", - "version_module": "True" + "documentationLink": "https://cloud.google.com/container-builder/docs/" } diff --git a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go index a6887f7fa..32541a0b2 100644 --- a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go +++ b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Operations *OperationsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -240,6 +245,9 @@ type Build struct { // Steps: Describes the operations to be performed on the workspace. Steps []*BuildStep `json:"steps,omitempty"` + // Substitutions: Substitutions data for Build resource. + Substitutions map[string]string `json:"substitutions,omitempty"` + // Timeout: Amount of time that this build should be allowed to run, to // second // granularity. If this amount of time elapses, work on the build will @@ -476,6 +484,9 @@ type BuildTrigger struct { // @OutputOnly Id string `json:"id,omitempty"` + // Substitutions: Substitutions data for Build resource. + Substitutions map[string]string `json:"substitutions,omitempty"` + // TriggerTemplate: Template describing the types of source changes to // trigger a build. // @@ -1222,6 +1233,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { @@ -1370,6 +1382,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1537,6 +1550,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1706,6 +1720,7 @@ func (c *ProjectsBuildsCancelCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cancelbuildrequest) if err != nil { @@ -1854,6 +1869,7 @@ func (c *ProjectsBuildsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.build) if err != nil { @@ -2003,6 +2019,7 @@ func (c *ProjectsBuildsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2174,6 +2191,7 @@ func (c *ProjectsBuildsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2342,6 +2360,7 @@ func (c *ProjectsTriggersCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.buildtrigger) if err != nil { @@ -2479,6 +2498,7 @@ func (c *ProjectsTriggersDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/triggers/{triggerId}") @@ -2626,6 +2646,7 @@ func (c *ProjectsTriggersGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2774,6 +2795,7 @@ func (c *ProjectsTriggersListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2908,6 +2930,7 @@ func (c *ProjectsTriggersPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.buildtrigger) if err != nil { diff --git a/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-api.json b/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-api.json index 25ada33ff..52327d53d 100644 --- a/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-api.json +++ b/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-api.json @@ -1,522 +1,483 @@ { - "id": "clouddebugger:v2", + "canonicalName": "Cloud Debugger", "auth": { "oauth2": { "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, "https://www.googleapis.com/auth/cloud_debugger": { "description": "Manage cloud debugger" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" } } } }, - "description": "Examines the call stack and variables of a running application without stopping or slowing it down.\n", - "protocol": "rest", + "rootUrl": "https://clouddebugger.googleapis.com/", + "ownerDomain": "google.com", + "name": "clouddebugger", + "batchPath": "batch", "title": "Stackdriver Debugger API", + "ownerName": "Google", "resources": { - "controller": { + "debugger": { "resources": { "debuggees": { "resources": { "breakpoints": { "methods": { - "update": { - "id": "clouddebugger.controller.debuggees.breakpoints.update", + "set": { + "httpMethod": "POST", + "parameterOrder": [ + "debuggeeId" + ], "response": { - "$ref": "UpdateActiveBreakpointResponse" + "$ref": "SetBreakpointResponse" }, - "parameterOrder": [ - "debuggeeId", - "id" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger" ], - "description": "Updates the breakpoint state or mutable fields.\nThe entire Breakpoint message must be sent back to the controller\nservice.\n\nUpdates to active breakpoint fields are only allowed if the new value\ndoes not change the breakpoint specification. Updates to the `location`,\n`condition` and `expression` fields should not alter the breakpoint\nsemantics. These may only make changes such as canonicalizing a value\nor snapping the location to the correct line of code.", + "parameters": { + "clientVersion": { + "location": "query", + "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", + "type": "string" + }, + "debuggeeId": { + "location": "path", + "description": "ID of the debuggee where the breakpoint is to be set.", + "required": true, + "type": "string" + } + }, + "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints/set", + "id": "clouddebugger.debugger.debuggees.breakpoints.set", + "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints/set", + "description": "Sets the breakpoint to the debuggee.", "request": { - "$ref": "UpdateActiveBreakpointRequest" + "$ref": "Breakpoint" + } + }, + "delete": { + "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}", + "id": "clouddebugger.debugger.debuggees.breakpoints.delete", + "description": "Deletes the breakpoint from the debuggee.", + "response": { + "$ref": "Empty" }, - "flatPath": "v2/controller/debuggees/{debuggeeId}/breakpoints/{id}", - "httpMethod": "PUT", + "parameterOrder": [ + "debuggeeId", + "breakpointId" + ], + "httpMethod": "DELETE", "parameters": { + "clientVersion": { + "location": "query", + "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", + "type": "string" + }, + "breakpointId": { + "description": "ID of the breakpoint to delete.", + "required": true, + "type": "string", + "location": "path" + }, "debuggeeId": { - "description": "Identifies the debuggee being debugged.", + "description": "ID of the debuggee whose breakpoint to delete.", "required": true, - "location": "path", + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger" + ], + "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}" + }, + "get": { + "httpMethod": "GET", + "response": { + "$ref": "GetBreakpointResponse" + }, + "parameterOrder": [ + "debuggeeId", + "breakpointId" + ], + "parameters": { + "clientVersion": { + "location": "query", + "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", "type": "string" }, - "id": { - "description": "Breakpoint identifier, unique in the scope of the debuggee.", + "breakpointId": { + "location": "path", + "description": "ID of the breakpoint to get.", "required": true, + "type": "string" + }, + "debuggeeId": { "location": "path", + "description": "ID of the debuggee whose breakpoint to get.", + "required": true, "type": "string" } }, - "path": "v2/controller/debuggees/{debuggeeId}/breakpoints/{id}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud_debugger" - ] + ], + "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}", + "id": "clouddebugger.debugger.debuggees.breakpoints.get", + "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}", + "description": "Gets breakpoint information." }, "list": { - "id": "clouddebugger.controller.debuggees.breakpoints.list", + "httpMethod": "GET", "response": { - "$ref": "ListActiveBreakpointsResponse" + "$ref": "ListBreakpointsResponse" }, "parameterOrder": [ "debuggeeId" ], - "description": "Returns the list of all active breakpoints for the debuggee.\n\nThe breakpoint specification (location, condition, and expression\nfields) is semantically immutable, although the field values may\nchange. For example, an agent may update the location line number\nto reflect the actual line where the breakpoint was set, but this\ndoesn't change the breakpoint semantics.\n\nThis means that an agent does not need to check if a breakpoint has changed\nwhen it encounters the same breakpoint on a successive call.\nMoreover, an agent should remember the breakpoints that are completed\nuntil the controller removes them from the active list to avoid\nsetting those breakpoints again.", - "flatPath": "v2/controller/debuggees/{debuggeeId}/breakpoints", - "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger" + ], "parameters": { + "includeInactive": { + "location": "query", + "description": "When set to `true`, the response includes active and inactive\nbreakpoints. Otherwise, it includes only active breakpoints.", + "type": "boolean" + }, + "includeAllUsers": { + "description": "When set to `true`, the response includes the list of breakpoints set by\nany user. Otherwise, it includes only breakpoints set by the caller.", + "type": "boolean", + "location": "query" + }, + "stripResults": { + "description": "This field is deprecated. The following fields are always stripped out of\nthe result: `stack_frames`, `evaluated_expressions` and `variable_table`.", + "type": "boolean", + "location": "query" + }, "debuggeeId": { - "description": "Identifies the debuggee.", + "description": "ID of the debuggee whose breakpoints to list.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" }, - "successOnTimeout": { - "description": "If set to `true`, returns `google.rpc.Code.OK` status and sets the\n`wait_expired` response field to `true` when the server-selected timeout\nhas expired (recommended).\n\nIf set to `false`, returns `google.rpc.Code.ABORTED` status when the\nserver-selected timeout has expired (deprecated).", + "waitToken": { "location": "query", - "type": "boolean" + "description": "A wait token that, if specified, blocks the call until the breakpoints\nlist has changed, or a server selected timeout has expired. The value\nshould be set from the last response. The error code\n`google.rpc.Code.ABORTED` (RPC) is returned on wait timeout, which\nshould be called again with the same `wait_token`.", + "type": "string" }, - "waitToken": { - "description": "A wait token that, if specified, blocks the method call until the list\nof active breakpoints has changed, or a server selected timeout has\nexpired. The value should be set from the last returned response.", + "clientVersion": { "location": "query", + "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", "type": "string" + }, + "action.value": { + "enum": [ + "CAPTURE", + "LOG" + ], + "description": "Only breakpoints with the specified action will pass the filter.", + "type": "string", + "location": "query" } }, - "path": "v2/controller/debuggees/{debuggeeId}/breakpoints", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud_debugger" - ] + "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints", + "id": "clouddebugger.debugger.debuggees.breakpoints.list", + "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints", + "description": "Lists all breakpoints for the debuggee." } } } }, "methods": { - "register": { - "id": "clouddebugger.controller.debuggees.register", + "list": { "response": { - "$ref": "RegisterDebuggeeResponse" + "$ref": "ListDebuggeesResponse" }, + "httpMethod": "GET", "parameterOrder": [], - "description": "Registers the debuggee with the controller service.\n\nAll agents attached to the same application should call this method with\nthe same request content to get back the same stable `debuggee_id`. Agents\nshould call this method again whenever `google.rpc.Code.NOT_FOUND` is\nreturned from any controller method.\n\nThis allows the controller service to disable the agent or recover from any\ndata loss. If the debuggee is disabled by the server, the response will\nhave `is_disabled` set to `true`.", - "request": { - "$ref": "RegisterDebuggeeRequest" + "parameters": { + "clientVersion": { + "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", + "type": "string", + "location": "query" + }, + "includeInactive": { + "description": "When set to `true`, the result includes all debuggees. Otherwise, the\nresult includes only debuggees that are active.", + "type": "boolean", + "location": "query" + }, + "project": { + "location": "query", + "description": "Project number of a Google Cloud project whose debuggees to list.", + "type": "string" + } }, - "flatPath": "v2/controller/debuggees/register", - "httpMethod": "POST", - "parameters": {}, - "path": "v2/controller/debuggees/register", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud_debugger" - ] + ], + "flatPath": "v2/debugger/debuggees", + "path": "v2/debugger/debuggees", + "id": "clouddebugger.debugger.debuggees.list", + "description": "Lists all the debuggees that the user can set breakpoints to." } } } } }, - "debugger": { + "controller": { "resources": { "debuggees": { + "methods": { + "register": { + "path": "v2/controller/debuggees/register", + "id": "clouddebugger.controller.debuggees.register", + "description": "Registers the debuggee with the controller service.\n\nAll agents attached to the same application should call this method with\nthe same request content to get back the same stable `debuggee_id`. Agents\nshould call this method again whenever `google.rpc.Code.NOT_FOUND` is\nreturned from any controller method.\n\nThis allows the controller service to disable the agent or recover from any\ndata loss. If the debuggee is disabled by the server, the response will\nhave `is_disabled` set to `true`.", + "request": { + "$ref": "RegisterDebuggeeRequest" + }, + "response": { + "$ref": "RegisterDebuggeeResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger" + ], + "parameters": {}, + "flatPath": "v2/controller/debuggees/register" + } + }, "resources": { "breakpoints": { "methods": { - "get": { - "id": "clouddebugger.debugger.debuggees.breakpoints.get", - "response": { - "$ref": "GetBreakpointResponse" - }, + "list": { + "httpMethod": "GET", "parameterOrder": [ - "debuggeeId", - "breakpointId" + "debuggeeId" ], - "description": "Gets breakpoint information.", - "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}", - "httpMethod": "GET", + "response": { + "$ref": "ListActiveBreakpointsResponse" + }, "parameters": { - "debuggeeId": { - "description": "ID of the debuggee whose breakpoint to get.", - "required": true, - "location": "path", + "waitToken": { + "location": "query", + "description": "A wait token that, if specified, blocks the method call until the list\nof active breakpoints has changed, or a server selected timeout has\nexpired. The value should be set from the last returned response.", "type": "string" }, - "breakpointId": { - "description": "ID of the breakpoint to get.", - "required": true, + "debuggeeId": { "location": "path", + "description": "Identifies the debuggee.", + "required": true, "type": "string" }, - "clientVersion": { - "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", - "location": "query", - "type": "string" + "successOnTimeout": { + "description": "If set to `true`, returns `google.rpc.Code.OK` status and sets the\n`wait_expired` response field to `true` when the server-selected timeout\nhas expired (recommended).\n\nIf set to `false`, returns `google.rpc.Code.ABORTED` status when the\nserver-selected timeout has expired (deprecated).", + "type": "boolean", + "location": "query" } }, - "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud_debugger" - ] + ], + "flatPath": "v2/controller/debuggees/{debuggeeId}/breakpoints", + "id": "clouddebugger.controller.debuggees.breakpoints.list", + "path": "v2/controller/debuggees/{debuggeeId}/breakpoints", + "description": "Returns the list of all active breakpoints for the debuggee.\n\nThe breakpoint specification (location, condition, and expression\nfields) is semantically immutable, although the field values may\nchange. For example, an agent may update the location line number\nto reflect the actual line where the breakpoint was set, but this\ndoesn't change the breakpoint semantics.\n\nThis means that an agent does not need to check if a breakpoint has changed\nwhen it encounters the same breakpoint on a successive call.\nMoreover, an agent should remember the breakpoints that are completed\nuntil the controller removes them from the active list to avoid\nsetting those breakpoints again." }, - "list": { - "id": "clouddebugger.debugger.debuggees.breakpoints.list", + "update": { "response": { - "$ref": "ListBreakpointsResponse" + "$ref": "UpdateActiveBreakpointResponse" }, "parameterOrder": [ - "debuggeeId" + "debuggeeId", + "id" ], - "description": "Lists all breakpoints for the debuggee.", - "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints", - "httpMethod": "GET", - "parameters": { - "debuggeeId": { - "description": "ID of the debuggee whose breakpoints to list.", - "required": true, - "location": "path", - "type": "string" - }, - "clientVersion": { - "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", - "location": "query", - "type": "string" - }, - "includeAllUsers": { - "description": "When set to `true`, the response includes the list of breakpoints set by\nany user. Otherwise, it includes only breakpoints set by the caller.", - "location": "query", - "type": "boolean" - }, - "stripResults": { - "description": "This field is deprecated. The following fields are always stripped out of\nthe result: `stack_frames`, `evaluated_expressions` and `variable_table`.", - "location": "query", - "type": "boolean" - }, - "action.value": { - "description": "Only breakpoints with the specified action will pass the filter.", - "enum": [ - "CAPTURE", - "LOG" - ], - "location": "query", - "type": "string" - }, - "includeInactive": { - "description": "When set to `true`, the response includes active and inactive\nbreakpoints. Otherwise, it includes only active breakpoints.", - "location": "query", - "type": "boolean" - }, - "waitToken": { - "description": "A wait token that, if specified, blocks the call until the breakpoints\nlist has changed, or a server selected timeout has expired. The value\nshould be set from the last response. The error code\n`google.rpc.Code.ABORTED` (RPC) is returned on wait timeout, which\nshould be called again with the same `wait_token`.", - "location": "query", - "type": "string" - } - }, - "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints", + "httpMethod": "PUT", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud_debugger" - ] - }, - "set": { - "id": "clouddebugger.debugger.debuggees.breakpoints.set", - "response": { - "$ref": "SetBreakpointResponse" - }, - "parameterOrder": [ - "debuggeeId" ], - "description": "Sets the breakpoint to the debuggee.", - "request": { - "$ref": "Breakpoint" - }, - "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints/set", - "httpMethod": "POST", "parameters": { - "debuggeeId": { - "description": "ID of the debuggee where the breakpoint is to be set.", + "id": { + "description": "Breakpoint identifier, unique in the scope of the debuggee.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" }, - "clientVersion": { - "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", - "location": "query", - "type": "string" - } - }, - "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints/set", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud_debugger" - ] - }, - "delete": { - "id": "clouddebugger.debugger.debuggees.breakpoints.delete", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "debuggeeId", - "breakpointId" - ], - "description": "Deletes the breakpoint from the debuggee.", - "flatPath": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}", - "httpMethod": "DELETE", - "parameters": { "debuggeeId": { - "description": "ID of the debuggee whose breakpoint to delete.", - "required": true, - "location": "path", - "type": "string" - }, - "breakpointId": { - "description": "ID of the breakpoint to delete.", + "description": "Identifies the debuggee being debugged.", "required": true, - "location": "path", - "type": "string" - }, - "clientVersion": { - "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", - "location": "query", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud_debugger" - ] + "flatPath": "v2/controller/debuggees/{debuggeeId}/breakpoints/{id}", + "path": "v2/controller/debuggees/{debuggeeId}/breakpoints/{id}", + "id": "clouddebugger.controller.debuggees.breakpoints.update", + "description": "Updates the breakpoint state or mutable fields.\nThe entire Breakpoint message must be sent back to the controller\nservice.\n\nUpdates to active breakpoint fields are only allowed if the new value\ndoes not change the breakpoint specification. Updates to the `location`,\n`condition` and `expression` fields should not alter the breakpoint\nsemantics. These may only make changes such as canonicalizing a value\nor snapping the location to the correct line of code.", + "request": { + "$ref": "UpdateActiveBreakpointRequest" + } } } } - }, - "methods": { - "list": { - "id": "clouddebugger.debugger.debuggees.list", - "response": { - "$ref": "ListDebuggeesResponse" - }, - "parameterOrder": [], - "description": "Lists all the debuggees that the user can set breakpoints to.", - "flatPath": "v2/debugger/debuggees", - "httpMethod": "GET", - "parameters": { - "includeInactive": { - "description": "When set to `true`, the result includes all debuggees. Otherwise, the\nresult includes only debuggees that are active.", - "location": "query", - "type": "boolean" - }, - "project": { - "description": "Project number of a Google Cloud project whose debuggees to list.", - "location": "query", - "type": "string" - }, - "clientVersion": { - "description": "The client version making the call.\nFollowing: `domain/type/version` (e.g., `google.com/intellij/v1`).", - "location": "query", - "type": "string" - } - }, - "path": "v2/debugger/debuggees", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud_debugger" - ] - } } } } } }, - "schemas": { - "Debuggee": { - "description": "Represents the application to debug. The application may include one or more\nreplicated processes executing the same code. Each of these processes is\nattached with a debugger agent, carrying out the debugging commands.\nThe agents attached to the same debuggee are identified by using exactly the\nsame field values when registering.", - "type": "object", - "properties": { - "id": { - "description": "Unique identifier for the debuggee generated by the controller service.", - "type": "string" - }, - "project": { - "description": "Project the debuggee is associated with.\nUse the project number when registering a Google Cloud Platform project.", - "type": "string" - }, - "extSourceContexts": { - "description": "References to the locations and revisions of the source code used in the\ndeployed application.\n\nContexts describing a remote repo related to the source code\nhave a `category` label of `remote_repo`. Source snapshot source\ncontexts have a `category` of `snapshot`.", - "type": "array", - "items": { - "$ref": "ExtendedSourceContext" - } - }, - "description": { - "description": "Human readable description of the debuggee.\nIncluding a human-readable project name, environment name and version\ninformation is recommended.", - "type": "string" - }, - "isDisabled": { - "description": "If set to `true`, indicates that the agent should disable itself and\ndetach from the debuggee.", - "type": "boolean" - }, - "status": { - "description": "Human readable message to be displayed to the user about this debuggee.\nAbsence of this field indicates no status. The message can be either\ninformational or an error status.", - "$ref": "StatusMessage" - }, - "isInactive": { - "description": "If set to `true`, indicates that the debuggee is considered as inactive by\nthe Controller service.", - "type": "boolean" - }, - "agentVersion": { - "description": "Version ID of the agent release. The version ID is structured as\nfollowing: `domain/type/vmajor.minor` (for example\n`google.com/gcp-java/v1.1`).", - "type": "string" - }, - "labels": { - "description": "A set of custom debuggee properties, populated by the agent, to be\ndisplayed to the user.", - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - "uniquifier": { - "description": "Debuggee uniquifier within the project.\nAny string that identifies the application within the project can be used.\nIncluding environment and version or build IDs is recommended.", - "type": "string" - }, - "sourceContexts": { - "description": "References to the locations and revisions of the source code used in the\ndeployed application.\n\nNOTE: This field is deprecated. Consumers should use\n`ext_source_contexts` if it is not empty. Debug agents should populate\nboth this field and `ext_source_contexts`.", - "type": "array", - "items": { - "$ref": "SourceContext" - } - } - }, - "id": "Debuggee" + "parameters": { + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + } + }, + "version": "v2", + "baseUrl": "https://clouddebugger.googleapis.com/", + "kind": "discovery#restDescription", + "servicePath": "", + "description": "Examines the call stack and variables of a running application without stopping or slowing it down.\n", + "basePath": "", + "documentationLink": "http://cloud.google.com/debugger", + "revision": "20170208", + "id": "clouddebugger:v2", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { "StackFrame": { - "description": "Represents a stack frame context.", - "type": "object", "properties": { - "locals": { - "description": "Set of local variables at the stack frame location.\nNote that this might not be populated for all stack frames.", - "type": "array", - "items": { - "$ref": "Variable" - } - }, "function": { "description": "Demangled function name at the call site.", "type": "string" }, - "location": { - "description": "Source location of the call site.", - "$ref": "SourceLocation" - }, "arguments": { "description": "Set of arguments passed to this function.\nNote that this might not be populated for all stack frames.", "type": "array", "items": { "$ref": "Variable" } - } - }, - "id": "StackFrame" - }, - "ListBreakpointsResponse": { - "description": "Response for listing breakpoints.", - "type": "object", - "properties": { - "breakpoints": { - "description": "List of breakpoints matching the request.\nThe fields `id` and `location` are guaranteed to be set on each breakpoint.\nThe fields: `stack_frames`, `evaluated_expressions` and `variable_table`\nare cleared on each breakpoint regardless of it's status.", - "type": "array", - "items": { - "$ref": "Breakpoint" - } - }, - "nextWaitToken": { - "description": "A wait token that can be used in the next call to `list` (REST) or\n`ListBreakpoints` (RPC) to block until the list of breakpoints has changes.", - "type": "string" - } - }, - "id": "ListBreakpointsResponse" - }, - "Variable": { - "description": "Represents a variable or an argument possibly of a compound object type.\nNote how the following variables are represented:\n\n1) A simple variable:\n\n int x = 5\n\n { name: \"x\", value: \"5\", type: \"int\" } // Captured variable\n\n2) A compound object:\n\n struct T {\n int m1;\n int m2;\n };\n T x = { 3, 7 };\n\n { // Captured variable\n name: \"x\",\n type: \"T\",\n members { name: \"m1\", value: \"3\", type: \"int\" },\n members { name: \"m2\", value: \"7\", type: \"int\" }\n }\n\n3) A pointer where the pointee was captured:\n\n T x = { 3, 7 };\n T* p = &x;\n\n { // Captured variable\n name: \"p\",\n type: \"T*\",\n value: \"0x00500500\",\n members { name: \"m1\", value: \"3\", type: \"int\" },\n members { name: \"m2\", value: \"7\", type: \"int\" }\n }\n\n4) A pointer where the pointee was not captured:\n\n T* p = new T;\n\n { // Captured variable\n name: \"p\",\n type: \"T*\",\n value: \"0x00400400\"\n status { is_error: true, description { format: \"unavailable\" } }\n }\n\nThe status should describe the reason for the missing value,\nsuch as `\u003coptimized out\u003e`, `\u003cinaccessible\u003e`, `\u003cpointers limit reached\u003e`.\n\nNote that a null pointer should not have members.\n\n5) An unnamed value:\n\n int* p = new int(7);\n\n { // Captured variable\n name: \"p\",\n value: \"0x00500500\",\n type: \"int*\",\n members { value: \"7\", type: \"int\" } }\n\n6) An unnamed pointer where the pointee was not captured:\n\n int* p = new int(7);\n int** pp = &p;\n\n { // Captured variable\n name: \"pp\",\n value: \"0x00500500\",\n type: \"int**\",\n members {\n value: \"0x00400400\",\n type: \"int*\"\n status {\n is_error: true,\n description: { format: \"unavailable\" } }\n }\n }\n }\n\nTo optimize computation, memory and network traffic, variables that\nrepeat in the output multiple times can be stored once in a shared\nvariable table and be referenced using the `var_table_index` field. The\nvariables stored in the shared table are nameless and are essentially\na partition of the complete variable. To reconstruct the complete\nvariable, merge the referencing variable with the referenced variable.\n\nWhen using the shared variable table, the following variables:\n\n T x = { 3, 7 };\n T* p = &x;\n T& r = x;\n\n { name: \"x\", var_table_index: 3, type: \"T\" } // Captured variables\n { name: \"p\", value \"0x00500500\", type=\"T*\", var_table_index: 3 }\n { name: \"r\", type=\"T&\", var_table_index: 3 }\n\n { // Shared variable table entry #3:\n members { name: \"m1\", value: \"3\", type: \"int\" },\n members { name: \"m2\", value: \"7\", type: \"int\" }\n }\n\nNote that the pointer address is stored with the referencing variable\nand not with the referenced variable. This allows the referenced variable\nto be shared between pointers and references.\n\nThe type field is optional. The debugger agent may or may not support it.", - "type": "object", - "properties": { - "varTableIndex": { - "description": "Reference to a variable in the shared variable table. More than\none variable can reference the same variable in the table. The\n`var_table_index` field is an index into `variable_table` in Breakpoint.", - "type": "integer", - "format": "int32" - }, - "status": { - "description": "Status associated with the variable. This field will usually stay\nunset. A status of a single variable only applies to that variable or\nexpression. The rest of breakpoint data still remains valid. Variables\nmight be reported in error state even when breakpoint is not in final\nstate.\n\nThe message may refer to variable name with `refers_to` set to\n`VARIABLE_NAME`. Alternatively `refers_to` will be set to `VARIABLE_VALUE`.\nIn either case variable value and members will be unset.\n\nExample of error message applied to name: `Invalid expression syntax`.\n\nExample of information message applied to value: `Not captured`.\n\nExamples of error message applied to value:\n\n* `Malformed string`,\n* `Field f not found in class C`\n* `Null pointer dereference`", - "$ref": "StatusMessage" }, - "members": { - "description": "Members contained or pointed to by the variable.", + "locals": { + "description": "Set of local variables at the stack frame location.\nNote that this might not be populated for all stack frames.", "type": "array", "items": { "$ref": "Variable" } }, - "name": { - "description": "Name of the variable, if any.", - "type": "string" - }, - "value": { - "description": "Simple value of the variable.", - "type": "string" - }, - "type": { - "description": "Variable type (e.g. `MyClass`). If the variable is split with\n`var_table_index`, `type` goes next to `value`. The interpretation of\na type is agent specific. It is recommended to include the dynamic type\nrather than a static type of an object.", - "type": "string" - } - }, - "id": "Variable" - }, - "SourceLocation": { - "description": "Represents a location in the source code.", - "type": "object", - "properties": { - "path": { - "description": "Path to the source file within the source context of the target binary.", - "type": "string" - }, - "line": { - "description": "Line inside the file. The first line in the file has the value `1`.", - "type": "integer", - "format": "int32" + "location": { + "$ref": "SourceLocation", + "description": "Source location of the call site." } }, - "id": "SourceLocation" + "id": "StackFrame", + "description": "Represents a stack frame context.", + "type": "object" }, - "GerritSourceContext": { - "description": "A SourceContext referring to a Gerrit project.", - "type": "object", + "RepoId": { "properties": { - "hostUri": { - "description": "The URI of a running Gerrit instance.", - "type": "string" - }, - "aliasName": { - "description": "The name of an alias (branch, tag, etc.).", - "type": "string" - }, - "aliasContext": { - "description": "An alias, which may be a branch or tag.", - "$ref": "AliasContext" - }, - "gerritProject": { - "description": "The full project name within the host. Projects may be nested, so\n\"project/subproject\" is a valid project name.\nThe \"repo name\" is hostURI/project.", - "type": "string" + "projectRepoId": { + "$ref": "ProjectRepoId", + "description": "A combination of a project ID and a repo name." }, - "revisionId": { - "description": "A revision (commit) ID.", + "uid": { + "description": "A server-assigned, globally unique identifier.", "type": "string" } }, - "id": "GerritSourceContext" + "id": "RepoId", + "description": "A unique identifier for a cloud repo.", + "type": "object" }, "FormatMessage": { "description": "Represents a message with parameters.", @@ -536,195 +497,166 @@ }, "id": "FormatMessage" }, - "RegisterDebuggeeResponse": { - "description": "Response for registering a debuggee.", - "type": "object", - "properties": { - "debuggee": { - "description": "Debuggee resource.\nThe field `id` is guranteed to be set (in addition to the echoed fields).", - "$ref": "Debuggee" - } - }, - "id": "RegisterDebuggeeResponse" - }, - "GetBreakpointResponse": { - "description": "Response for getting breakpoint information.", - "type": "object", + "ExtendedSourceContext": { "properties": { - "breakpoint": { - "description": "Complete breakpoint state.\nThe fields `id` and `location` are guaranteed to be set.", - "$ref": "Breakpoint" + "context": { + "$ref": "SourceContext", + "description": "Any source context." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels with user defined metadata.", + "type": "object" } }, - "id": "GetBreakpointResponse" + "id": "ExtendedSourceContext", + "description": "An ExtendedSourceContext is a SourceContext combined with additional\ndetails describing the context.", + "type": "object" }, "AliasContext": { - "description": "An alias to a repo revision.", - "type": "object", "properties": { + "name": { + "description": "The alias name.", + "type": "string" + }, "kind": { - "description": "The alias kind.", - "enum": [ - "ANY", - "FIXED", - "MOVABLE", - "OTHER" - ], "enumDescriptions": [ "Do not use.", "Git tag", "Git branch", "OTHER is used to specify non-standard aliases, those not of the kinds\nabove. For example, if a Git repo has a ref named \"refs/foo/bar\", it\nis considered to be of kind OTHER." ], - "type": "string" - }, - "name": { - "description": "The alias name.", + "enum": [ + "ANY", + "FIXED", + "MOVABLE", + "OTHER" + ], + "description": "The alias kind.", "type": "string" } }, - "id": "AliasContext" + "id": "AliasContext", + "description": "An alias to a repo revision.", + "type": "object" }, - "CloudWorkspaceId": { - "description": "A CloudWorkspaceId is a unique identifier for a cloud workspace.\nA cloud workspace is a place associated with a repo where modified files\ncan be stored before they are committed.", + "ListDebuggeesResponse": { + "description": "Response for listing debuggees.", "type": "object", "properties": { - "repoId": { - "description": "The ID of the repo containing the workspace.", - "$ref": "RepoId" - }, - "name": { - "description": "The unique name of the workspace within the repo. This is the name\nchosen by the client in the Source API's CreateWorkspace method.", - "type": "string" + "debuggees": { + "description": "List of debuggees accessible to the calling user.\nNote that the `description` field is the only human readable field\nthat should be displayed to the user.\nThe fields `debuggee.id` and `description` fields are guaranteed to be\nset on each debuggee.", + "type": "array", + "items": { + "$ref": "Debuggee" + } } }, - "id": "CloudWorkspaceId" + "id": "ListDebuggeesResponse" }, - "Breakpoint": { - "description": "Represents the breakpoint specification, status and results.", + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "SourceLocation": { + "description": "Represents a location in the source code.", "type": "object", "properties": { - "id": { - "description": "Breakpoint identifier, unique in the scope of the debuggee.", - "type": "string" - }, - "stackFrames": { - "description": "The stack at breakpoint time.", - "type": "array", - "items": { - "$ref": "StackFrame" - } - }, - "location": { - "description": "Breakpoint source location.", - "$ref": "SourceLocation" - }, - "status": { - "description": "Breakpoint status.\n\nThe status includes an error flag and a human readable message.\nThis field is usually unset. The message can be either\ninformational or an error message. Regardless, clients should always\ndisplay the text message back to the user.\n\nError status indicates complete failure of the breakpoint.\n\nExample (non-final state): `Still loading symbols...`\n\nExamples (final state):\n\n* `Invalid line number` referring to location\n* `Field f not found in class C` referring to condition", - "$ref": "StatusMessage" - }, - "userEmail": { - "description": "E-mail address of the user that created this breakpoint", - "type": "string" - }, - "condition": { - "description": "Condition that triggers the breakpoint.\nThe condition is a compound boolean expression composed using expressions\nin a programming language at the source location.", - "type": "string" - }, - "finalTime": { - "description": "Time this breakpoint was finalized as seen by the server in seconds\nresolution.", - "type": "string", - "format": "google-datetime" + "line": { + "description": "Line inside the file. The first line in the file has the value `1`.", + "format": "int32", + "type": "integer" }, - "action": { - "description": "Action that the agent should perform when the code at the\nbreakpoint location is hit.", - "enum": [ - "CAPTURE", - "LOG" - ], - "enumDescriptions": [ - "Capture stack frame and variables and update the breakpoint.\nThe data is only captured once. After that the breakpoint is set\nin a final state.", - "Log each breakpoint hit. The breakpoint remains active until\ndeleted or expired." - ], + "path": { + "description": "Path to the source file within the source context of the target binary.", "type": "string" + } + }, + "id": "SourceLocation" + }, + "Debuggee": { + "properties": { + "extSourceContexts": { + "description": "References to the locations and revisions of the source code used in the\ndeployed application.\n\nContexts describing a remote repo related to the source code\nhave a `category` label of `remote_repo`. Source snapshot source\ncontexts have a `category` of `snapshot`.", + "type": "array", + "items": { + "$ref": "ExtendedSourceContext" + } }, "labels": { - "description": "A set of custom breakpoint properties, populated by the agent, to be\ndisplayed to the user.", "additionalProperties": { "type": "string" }, + "description": "A set of custom debuggee properties, populated by the agent, to be\ndisplayed to the user.", "type": "object" }, - "logMessageFormat": { - "description": "Only relevant when action is `LOG`. Defines the message to log when\nthe breakpoint hits. The message may include parameter placeholders `$0`,\n`$1`, etc. These placeholders are replaced with the evaluated value\nof the appropriate expression. Expressions not referenced in\n`log_message_format` are not logged.\n\nExample: `Message received, id = $0, count = $1` with\n`expressions` = `[ message.id, message.count ]`.", + "isInactive": { + "description": "If set to `true`, indicates that the debuggee is considered as inactive by\nthe Controller service.", + "type": "boolean" + }, + "status": { + "description": "Human readable message to be displayed to the user about this debuggee.\nAbsence of this field indicates no status. The message can be either\ninformational or an error status.", + "$ref": "StatusMessage" + }, + "project": { + "description": "Project the debuggee is associated with.\nUse the project number when registering a Google Cloud Platform project.", "type": "string" }, - "createTime": { - "description": "Time this breakpoint was created by the server in seconds resolution.", - "type": "string", - "format": "google-datetime" + "isDisabled": { + "description": "If set to `true`, indicates that the agent should disable itself and\ndetach from the debuggee.", + "type": "boolean" }, - "logLevel": { - "description": "Indicates the severity of the log. Only relevant when action is `LOG`.", - "enum": [ - "INFO", - "WARNING", - "ERROR" - ], - "enumDescriptions": [ - "Information log message.", - "Warning log message.", - "Error log message." - ], + "agentVersion": { + "description": "Version ID of the agent release. The version ID is structured as\nfollowing: `domain/type/vmajor.minor` (for example\n`google.com/gcp-java/v1.1`).", "type": "string" }, - "evaluatedExpressions": { - "description": "Values of evaluated expressions at breakpoint time.\nThe evaluated expressions appear in exactly the same order they\nare listed in the `expressions` field.\nThe `name` field holds the original expression text, the `value` or\n`members` field holds the result of the evaluated expression.\nIf the expression cannot be evaluated, the `status` inside the `Variable`\nwill indicate an error and contain the error text.", - "type": "array", - "items": { - "$ref": "Variable" - } + "id": { + "description": "Unique identifier for the debuggee generated by the controller service.", + "type": "string" }, - "isFinalState": { - "description": "When true, indicates that this is a final result and the\nbreakpoint state will not change from here on.", - "type": "boolean" + "uniquifier": { + "description": "Debuggee uniquifier within the project.\nAny string that identifies the application within the project can be used.\nIncluding environment and version or build IDs is recommended.", + "type": "string" }, - "expressions": { - "description": "List of read-only expressions to evaluate at the breakpoint location.\nThe expressions are composed using expressions in the programming language\nat the source location. If the breakpoint action is `LOG`, the evaluated\nexpressions are included in log statements.", - "type": "array", - "items": { - "type": "string" - } + "description": { + "description": "Human readable description of the debuggee.\nIncluding a human-readable project name, environment name and version\ninformation is recommended.", + "type": "string" }, - "variableTable": { - "description": "The `variable_table` exists to aid with computation, memory and network\ntraffic optimization. It enables storing a variable once and reference\nit from multiple variables, including variables stored in the\n`variable_table` itself.\nFor example, the same `this` object, which may appear at many levels of\nthe stack, can have all of its data stored once in this table. The\nstack frame variables then would hold only a reference to it.\n\nThe variable `var_table_index` field is an index into this repeated field.\nThe stored objects are nameless and get their name from the referencing\nvariable. The effective variable is a merge of the referencing variable\nand the referenced variable.", + "sourceContexts": { + "description": "References to the locations and revisions of the source code used in the\ndeployed application.\n\nNOTE: This field is deprecated. Consumers should use\n`ext_source_contexts` if it is not empty. Debug agents should populate\nboth this field and `ext_source_contexts`.", "type": "array", "items": { - "$ref": "Variable" + "$ref": "SourceContext" } } }, - "id": "Breakpoint" + "id": "Debuggee", + "description": "Represents the application to debug. The application may include one or more\nreplicated processes executing the same code. Each of these processes is\nattached with a debugger agent, carrying out the debugging commands.\nThe agents attached to the same debuggee are identified by using exactly the\nsame field values when registering.", + "type": "object" }, - "SetBreakpointResponse": { - "description": "Response for setting a breakpoint.", + "ProjectRepoId": { + "description": "Selects a repo using a Google Cloud Platform project ID\n(e.g. winged-cargo-31) and a repo name within that project.", "type": "object", "properties": { - "breakpoint": { - "description": "Breakpoint resource.\nThe field `id` is guaranteed to be set (in addition to the echoed fileds).", - "$ref": "Breakpoint" + "projectId": { + "description": "The ID of the project.", + "type": "string" + }, + "repoName": { + "description": "The name of the repo. Leave empty for the default repo.", + "type": "string" } }, - "id": "SetBreakpointResponse" + "id": "ProjectRepoId" }, "ListActiveBreakpointsResponse": { "description": "Response for listing active breakpoints.", "type": "object", "properties": { - "waitExpired": { - "description": "The `wait_expired` field is set to true by the server when the\nrequest times out and the field `success_on_timeout` is set to true.", - "type": "boolean" - }, "breakpoints": { "description": "List of all active breakpoints.\nThe fields `id` and `location` are guaranteed to be set on each breakpoint.", "type": "array", @@ -732,6 +664,10 @@ "$ref": "Breakpoint" } }, + "waitExpired": { + "description": "The `wait_expired` field is set to true by the server when the\nrequest times out and the field `success_on_timeout` is set to true.", + "type": "boolean" + }, "nextWaitToken": { "description": "A wait token that can be used in the next method call to block until\nthe list of breakpoints changes.", "type": "string" @@ -739,135 +675,224 @@ }, "id": "ListActiveBreakpointsResponse" }, - "ExtendedSourceContext": { - "description": "An ExtendedSourceContext is a SourceContext combined with additional\ndetails describing the context.", - "type": "object", + "CloudWorkspaceSourceContext": { "properties": { - "labels": { - "description": "Labels with user defined metadata.", - "additionalProperties": { - "type": "string" - }, - "type": "object" + "snapshotId": { + "description": "The ID of the snapshot.\nAn empty snapshot_id refers to the most recent snapshot.", + "type": "string" }, - "context": { - "description": "Any source context.", - "$ref": "SourceContext" + "workspaceId": { + "$ref": "CloudWorkspaceId", + "description": "The ID of the workspace." } }, - "id": "ExtendedSourceContext" + "id": "CloudWorkspaceSourceContext", + "description": "A CloudWorkspaceSourceContext denotes a workspace at a particular snapshot.", + "type": "object" }, - "RegisterDebuggeeRequest": { - "description": "Request to register a debuggee.", - "type": "object", - "properties": { - "debuggee": { - "description": "Debuggee information to register.\nThe fields `project`, `uniquifier`, `description` and `agent_version`\nof the debuggee must be set.", - "$ref": "Debuggee" - } - }, - "id": "RegisterDebuggeeRequest" + "UpdateActiveBreakpointResponse": { + "properties": {}, + "id": "UpdateActiveBreakpointResponse", + "description": "Response for updating an active breakpoint.\nThe message is defined to allow future extensions.", + "type": "object" }, - "RepoId": { - "description": "A unique identifier for a cloud repo.", - "type": "object", + "GerritSourceContext": { "properties": { - "projectRepoId": { - "description": "A combination of a project ID and a repo name.", - "$ref": "ProjectRepoId" + "gerritProject": { + "description": "The full project name within the host. Projects may be nested, so\n\"project/subproject\" is a valid project name.\nThe \"repo name\" is hostURI/project.", + "type": "string" }, - "uid": { - "description": "A server-assigned, globally unique identifier.", + "aliasContext": { + "$ref": "AliasContext", + "description": "An alias, which may be a branch or tag." + }, + "hostUri": { + "description": "The URI of a running Gerrit instance.", + "type": "string" + }, + "revisionId": { + "description": "A revision (commit) ID.", + "type": "string" + }, + "aliasName": { + "description": "The name of an alias (branch, tag, etc.).", "type": "string" } }, - "id": "RepoId" + "id": "GerritSourceContext", + "description": "A SourceContext referring to a Gerrit project.", + "type": "object" }, - "ProjectRepoId": { - "description": "Selects a repo using a Google Cloud Platform project ID\n(e.g. winged-cargo-31) and a repo name within that project.", + "CloudWorkspaceId": { + "description": "A CloudWorkspaceId is a unique identifier for a cloud workspace.\nA cloud workspace is a place associated with a repo where modified files\ncan be stored before they are committed.", "type": "object", "properties": { - "repoName": { - "description": "The name of the repo. Leave empty for the default repo.", + "name": { + "description": "The unique name of the workspace within the repo. This is the name\nchosen by the client in the Source API's CreateWorkspace method.", "type": "string" }, - "projectId": { - "description": "The ID of the project.", - "type": "string" + "repoId": { + "$ref": "RepoId", + "description": "The ID of the repo containing the workspace." } }, - "id": "ProjectRepoId" + "id": "CloudWorkspaceId" }, - "CloudWorkspaceSourceContext": { - "description": "A CloudWorkspaceSourceContext denotes a workspace at a particular snapshot.", - "type": "object", + "ListBreakpointsResponse": { "properties": { - "snapshotId": { - "description": "The ID of the snapshot.\nAn empty snapshot_id refers to the most recent snapshot.", + "nextWaitToken": { + "description": "A wait token that can be used in the next call to `list` (REST) or\n`ListBreakpoints` (RPC) to block until the list of breakpoints has changes.", "type": "string" }, - "workspaceId": { - "description": "The ID of the workspace.", - "$ref": "CloudWorkspaceId" + "breakpoints": { + "description": "List of breakpoints matching the request.\nThe fields `id` and `location` are guaranteed to be set on each breakpoint.\nThe fields: `stack_frames`, `evaluated_expressions` and `variable_table`\nare cleared on each breakpoint regardless of it's status.", + "type": "array", + "items": { + "$ref": "Breakpoint" + } } }, - "id": "CloudWorkspaceSourceContext" + "id": "ListBreakpointsResponse", + "description": "Response for listing breakpoints.", + "type": "object" }, - "ListDebuggeesResponse": { - "description": "Response for listing debuggees.", - "type": "object", + "Breakpoint": { "properties": { - "debuggees": { - "description": "List of debuggees accessible to the calling user.\nNote that the `description` field is the only human readable field\nthat should be displayed to the user.\nThe fields `debuggee.id` and `description` fields are guaranteed to be\nset on each debuggee.", + "userEmail": { + "description": "E-mail address of the user that created this breakpoint", + "type": "string" + }, + "action": { + "enumDescriptions": [ + "Capture stack frame and variables and update the breakpoint.\nThe data is only captured once. After that the breakpoint is set\nin a final state.", + "Log each breakpoint hit. The breakpoint remains active until\ndeleted or expired." + ], + "enum": [ + "CAPTURE", + "LOG" + ], + "description": "Action that the agent should perform when the code at the\nbreakpoint location is hit.", + "type": "string" + }, + "logLevel": { + "enumDescriptions": [ + "Information log message.", + "Warning log message.", + "Error log message." + ], + "enum": [ + "INFO", + "WARNING", + "ERROR" + ], + "description": "Indicates the severity of the log. Only relevant when action is `LOG`.", + "type": "string" + }, + "id": { + "description": "Breakpoint identifier, unique in the scope of the debuggee.", + "type": "string" + }, + "location": { + "description": "Breakpoint source location.", + "$ref": "SourceLocation" + }, + "finalTime": { + "description": "Time this breakpoint was finalized as seen by the server in seconds\nresolution.", + "format": "google-datetime", + "type": "string" + }, + "variableTable": { + "description": "The `variable_table` exists to aid with computation, memory and network\ntraffic optimization. It enables storing a variable once and reference\nit from multiple variables, including variables stored in the\n`variable_table` itself.\nFor example, the same `this` object, which may appear at many levels of\nthe stack, can have all of its data stored once in this table. The\nstack frame variables then would hold only a reference to it.\n\nThe variable `var_table_index` field is an index into this repeated field.\nThe stored objects are nameless and get their name from the referencing\nvariable. The effective variable is a merge of the referencing variable\nand the referenced variable.", + "type": "array", + "items": { + "$ref": "Variable" + } + }, + "createTime": { + "description": "Time this breakpoint was created by the server in seconds resolution.", + "format": "google-datetime", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "A set of custom breakpoint properties, populated by the agent, to be\ndisplayed to the user.", + "type": "object" + }, + "logMessageFormat": { + "description": "Only relevant when action is `LOG`. Defines the message to log when\nthe breakpoint hits. The message may include parameter placeholders `$0`,\n`$1`, etc. These placeholders are replaced with the evaluated value\nof the appropriate expression. Expressions not referenced in\n`log_message_format` are not logged.\n\nExample: `Message received, id = $0, count = $1` with\n`expressions` = `[ message.id, message.count ]`.", + "type": "string" + }, + "expressions": { + "description": "List of read-only expressions to evaluate at the breakpoint location.\nThe expressions are composed using expressions in the programming language\nat the source location. If the breakpoint action is `LOG`, the evaluated\nexpressions are included in log statements.", + "type": "array", + "items": { + "type": "string" + } + }, + "evaluatedExpressions": { + "description": "Values of evaluated expressions at breakpoint time.\nThe evaluated expressions appear in exactly the same order they\nare listed in the `expressions` field.\nThe `name` field holds the original expression text, the `value` or\n`members` field holds the result of the evaluated expression.\nIf the expression cannot be evaluated, the `status` inside the `Variable`\nwill indicate an error and contain the error text.", + "type": "array", + "items": { + "$ref": "Variable" + } + }, + "isFinalState": { + "description": "When true, indicates that this is a final result and the\nbreakpoint state will not change from here on.", + "type": "boolean" + }, + "stackFrames": { + "description": "The stack at breakpoint time.", "type": "array", "items": { - "$ref": "Debuggee" + "$ref": "StackFrame" } + }, + "condition": { + "description": "Condition that triggers the breakpoint.\nThe condition is a compound boolean expression composed using expressions\nin a programming language at the source location.", + "type": "string" + }, + "status": { + "$ref": "StatusMessage", + "description": "Breakpoint status.\n\nThe status includes an error flag and a human readable message.\nThis field is usually unset. The message can be either\ninformational or an error message. Regardless, clients should always\ndisplay the text message back to the user.\n\nError status indicates complete failure of the breakpoint.\n\nExample (non-final state): `Still loading symbols...`\n\nExamples (final state):\n\n* `Invalid line number` referring to location\n* `Field f not found in class C` referring to condition" } }, - "id": "ListDebuggeesResponse" + "id": "Breakpoint", + "description": "Represents the breakpoint specification, status and results.", + "type": "object" }, - "CloudRepoSourceContext": { - "description": "A CloudRepoSourceContext denotes a particular revision in a cloud\nrepo (a repo hosted by the Google Cloud Platform).", - "type": "object", + "SetBreakpointResponse": { "properties": { - "aliasName": { - "description": "The name of an alias (branch, tag, etc.).", - "type": "string" - }, - "aliasContext": { - "description": "An alias, which may be a branch or tag.", - "$ref": "AliasContext" - }, - "repoId": { - "description": "The ID of the repo.", - "$ref": "RepoId" - }, - "revisionId": { - "description": "A revision ID.", - "type": "string" + "breakpoint": { + "$ref": "Breakpoint", + "description": "Breakpoint resource.\nThe field `id` is guaranteed to be set (in addition to the echoed fileds)." } }, - "id": "CloudRepoSourceContext" + "id": "SetBreakpointResponse", + "description": "Response for setting a breakpoint.", + "type": "object" }, "UpdateActiveBreakpointRequest": { "description": "Request to update an active breakpoint.", "type": "object", "properties": { "breakpoint": { - "description": "Updated breakpoint information.\nThe field 'id' must be set.", - "$ref": "Breakpoint" + "$ref": "Breakpoint", + "description": "Updated breakpoint information.\nThe field 'id' must be set." } }, "id": "UpdateActiveBreakpointRequest" }, "SourceContext": { - "description": "A SourceContext is a reference to a tree of files. A SourceContext together\nwith a path point to a unique revision of a single file or directory.", - "type": "object", "properties": { + "gerrit": { + "description": "A SourceContext referring to a Gerrit project.", + "$ref": "GerritSourceContext" + }, "cloudRepo": { - "description": "A SourceContext referring to a revision in a cloud repo.", - "$ref": "CloudRepoSourceContext" + "$ref": "CloudRepoSourceContext", + "description": "A SourceContext referring to a revision in a cloud repo." }, "cloudWorkspace": { "description": "A SourceContext referring to a snapshot in a cloud workspace.", @@ -876,34 +901,67 @@ "git": { "description": "A SourceContext referring to any third party Git repo (e.g. GitHub).", "$ref": "GitSourceContext" - }, - "gerrit": { - "description": "A SourceContext referring to a Gerrit project.", - "$ref": "GerritSourceContext" } }, - "id": "SourceContext" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" + "id": "SourceContext", + "description": "A SourceContext is a reference to a tree of files. A SourceContext together\nwith a path point to a unique revision of a single file or directory.", + "type": "object" }, - "GitSourceContext": { - "description": "A GitSourceContext denotes a particular revision in a third party Git\nrepository (e.g. GitHub).", - "type": "object", + "CloudRepoSourceContext": { "properties": { - "url": { - "description": "Git repository URL.", - "type": "string" + "aliasContext": { + "$ref": "AliasContext", + "description": "An alias, which may be a branch or tag." }, "revisionId": { - "description": "Git commit hash.\nrequired.", + "description": "A revision ID.", + "type": "string" + }, + "aliasName": { + "description": "The name of an alias (branch, tag, etc.).", "type": "string" + }, + "repoId": { + "description": "The ID of the repo.", + "$ref": "RepoId" + } + }, + "id": "CloudRepoSourceContext", + "description": "A CloudRepoSourceContext denotes a particular revision in a cloud\nrepo (a repo hosted by the Google Cloud Platform).", + "type": "object" + }, + "RegisterDebuggeeResponse": { + "properties": { + "debuggee": { + "$ref": "Debuggee", + "description": "Debuggee resource.\nThe field `id` is guranteed to be set (in addition to the echoed fields)." + } + }, + "id": "RegisterDebuggeeResponse", + "description": "Response for registering a debuggee.", + "type": "object" + }, + "RegisterDebuggeeRequest": { + "properties": { + "debuggee": { + "description": "Debuggee information to register.\nThe fields `project`, `uniquifier`, `description` and `agent_version`\nof the debuggee must be set.", + "$ref": "Debuggee" + } + }, + "id": "RegisterDebuggeeRequest", + "description": "Request to register a debuggee.", + "type": "object" + }, + "GetBreakpointResponse": { + "properties": { + "breakpoint": { + "$ref": "Breakpoint", + "description": "Complete breakpoint state.\nThe fields `id` and `location` are guaranteed to be set." } }, - "id": "GitSourceContext" + "id": "GetBreakpointResponse", + "description": "Response for getting breakpoint information.", + "type": "object" }, "StatusMessage": { "description": "Represents a contextual status message.\nThe message can indicate an error or informational status, and refer to\nspecific parts of the containing object.\nFor example, the `Breakpoint.status` field can indicate an error referring\nto the `BREAKPOINT_SOURCE_LOCATION` with the message `Location not found`.", @@ -918,16 +976,6 @@ "$ref": "FormatMessage" }, "refersTo": { - "description": "Reference to which the message applies.", - "enum": [ - "UNSPECIFIED", - "BREAKPOINT_SOURCE_LOCATION", - "BREAKPOINT_CONDITION", - "BREAKPOINT_EXPRESSION", - "BREAKPOINT_AGE", - "VARIABLE_NAME", - "VARIABLE_VALUE" - ], "enumDescriptions": [ "Status doesn't refer to any particular input.", "Status applies to the breakpoint and is related to its location.", @@ -937,123 +985,75 @@ "Status applies to the entire variable.", "Status applies to variable value (variable name is valid)." ], + "enum": [ + "UNSPECIFIED", + "BREAKPOINT_SOURCE_LOCATION", + "BREAKPOINT_CONDITION", + "BREAKPOINT_EXPRESSION", + "BREAKPOINT_AGE", + "VARIABLE_NAME", + "VARIABLE_VALUE" + ], + "description": "Reference to which the message applies.", "type": "string" } }, "id": "StatusMessage" }, - "UpdateActiveBreakpointResponse": { - "description": "Response for updating an active breakpoint.\nThe message is defined to allow future extensions.", + "GitSourceContext": { + "properties": { + "revisionId": { + "description": "Git commit hash.\nrequired.", + "type": "string" + }, + "url": { + "description": "Git repository URL.", + "type": "string" + } + }, + "id": "GitSourceContext", + "description": "A GitSourceContext denotes a particular revision in a third party Git\nrepository (e.g. GitHub).", + "type": "object" + }, + "Variable": { + "description": "Represents a variable or an argument possibly of a compound object type.\nNote how the following variables are represented:\n\n1) A simple variable:\n\n int x = 5\n\n { name: \"x\", value: \"5\", type: \"int\" } // Captured variable\n\n2) A compound object:\n\n struct T {\n int m1;\n int m2;\n };\n T x = { 3, 7 };\n\n { // Captured variable\n name: \"x\",\n type: \"T\",\n members { name: \"m1\", value: \"3\", type: \"int\" },\n members { name: \"m2\", value: \"7\", type: \"int\" }\n }\n\n3) A pointer where the pointee was captured:\n\n T x = { 3, 7 };\n T* p = &x;\n\n { // Captured variable\n name: \"p\",\n type: \"T*\",\n value: \"0x00500500\",\n members { name: \"m1\", value: \"3\", type: \"int\" },\n members { name: \"m2\", value: \"7\", type: \"int\" }\n }\n\n4) A pointer where the pointee was not captured:\n\n T* p = new T;\n\n { // Captured variable\n name: \"p\",\n type: \"T*\",\n value: \"0x00400400\"\n status { is_error: true, description { format: \"unavailable\" } }\n }\n\nThe status should describe the reason for the missing value,\nsuch as `\u003coptimized out\u003e`, `\u003cinaccessible\u003e`, `\u003cpointers limit reached\u003e`.\n\nNote that a null pointer should not have members.\n\n5) An unnamed value:\n\n int* p = new int(7);\n\n { // Captured variable\n name: \"p\",\n value: \"0x00500500\",\n type: \"int*\",\n members { value: \"7\", type: \"int\" } }\n\n6) An unnamed pointer where the pointee was not captured:\n\n int* p = new int(7);\n int** pp = &p;\n\n { // Captured variable\n name: \"pp\",\n value: \"0x00500500\",\n type: \"int**\",\n members {\n value: \"0x00400400\",\n type: \"int*\"\n status {\n is_error: true,\n description: { format: \"unavailable\" } }\n }\n }\n }\n\nTo optimize computation, memory and network traffic, variables that\nrepeat in the output multiple times can be stored once in a shared\nvariable table and be referenced using the `var_table_index` field. The\nvariables stored in the shared table are nameless and are essentially\na partition of the complete variable. To reconstruct the complete\nvariable, merge the referencing variable with the referenced variable.\n\nWhen using the shared variable table, the following variables:\n\n T x = { 3, 7 };\n T* p = &x;\n T& r = x;\n\n { name: \"x\", var_table_index: 3, type: \"T\" } // Captured variables\n { name: \"p\", value \"0x00500500\", type=\"T*\", var_table_index: 3 }\n { name: \"r\", type=\"T&\", var_table_index: 3 }\n\n { // Shared variable table entry #3:\n members { name: \"m1\", value: \"3\", type: \"int\" },\n members { name: \"m2\", value: \"7\", type: \"int\" }\n }\n\nNote that the pointer address is stored with the referencing variable\nand not with the referenced variable. This allows the referenced variable\nto be shared between pointers and references.\n\nThe type field is optional. The debugger agent may or may not support it.", "type": "object", - "properties": {}, - "id": "UpdateActiveBreakpointResponse" + "properties": { + "name": { + "description": "Name of the variable, if any.", + "type": "string" + }, + "type": { + "description": "Variable type (e.g. `MyClass`). If the variable is split with\n`var_table_index`, `type` goes next to `value`. The interpretation of\na type is agent specific. It is recommended to include the dynamic type\nrather than a static type of an object.", + "type": "string" + }, + "varTableIndex": { + "description": "Reference to a variable in the shared variable table. More than\none variable can reference the same variable in the table. The\n`var_table_index` field is an index into `variable_table` in Breakpoint.", + "format": "int32", + "type": "integer" + }, + "value": { + "description": "Simple value of the variable.", + "type": "string" + }, + "members": { + "description": "Members contained or pointed to by the variable.", + "type": "array", + "items": { + "$ref": "Variable" + } + }, + "status": { + "$ref": "StatusMessage", + "description": "Status associated with the variable. This field will usually stay\nunset. A status of a single variable only applies to that variable or\nexpression. The rest of breakpoint data still remains valid. Variables\nmight be reported in error state even when breakpoint is not in final\nstate.\n\nThe message may refer to variable name with `refers_to` set to\n`VARIABLE_NAME`. Alternatively `refers_to` will be set to `VARIABLE_VALUE`.\nIn either case variable value and members will be unset.\n\nExample of error message applied to name: `Invalid expression syntax`.\n\nExample of information message applied to value: `Not captured`.\n\nExamples of error message applied to value:\n\n* `Malformed string`,\n* `Field f not found in class C`\n* `Null pointer dereference`" + } + }, + "id": "Variable" } }, - "revision": "20170117", - "basePath": "", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version_module": "True", - "canonicalName": "Cloud Debugger", - "discoveryVersion": "v1", - "baseUrl": "https://clouddebugger.googleapis.com/", - "name": "clouddebugger", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - } - }, - "documentationLink": "http://cloud.google.com/debugger", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v2", - "rootUrl": "https://clouddebugger.googleapis.com/", - "kind": "discovery#restDescription" + "protocol": "rest" } diff --git a/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-gen.go b/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-gen.go index 98f3871c3..08f2d9ce5 100644 --- a/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-gen.go +++ b/vendor/google.golang.org/api/clouddebugger/v2/clouddebugger-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Controller *ControllerService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewControllerService(s *Service) *ControllerService { rs := &ControllerService{s: s} rs.Debuggees = NewControllerDebuggeesService(s) @@ -1523,6 +1528,7 @@ func (c *ControllerDebuggeesRegisterCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.registerdebuggeerequest) if err != nil { @@ -1700,6 +1706,7 @@ func (c *ControllerDebuggeesBreakpointsListCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1855,6 +1862,7 @@ func (c *ControllerDebuggeesBreakpointsUpdateCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateactivebreakpointrequest) if err != nil { @@ -2029,6 +2037,7 @@ func (c *DebuggerDebuggeesListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2171,6 +2180,7 @@ func (c *DebuggerDebuggeesBreakpointsDeleteCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/debugger/debuggees/{debuggeeId}/breakpoints/{breakpointId}") @@ -2330,6 +2340,7 @@ func (c *DebuggerDebuggeesBreakpointsGetCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2541,6 +2552,7 @@ func (c *DebuggerDebuggeesBreakpointsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2713,6 +2725,7 @@ func (c *DebuggerDebuggeesBreakpointsSetCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.breakpoint) if err != nil { diff --git a/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-api.json b/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-api.json index bfbc8a7cb..ddf4ff951 100644 --- a/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-api.json +++ b/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-api.json @@ -1,204 +1,109 @@ { - "id": "clouderrorreporting:v1beta1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "description": "Stackdriver Error Reporting groups and counts similar errors from cloud services. The Stackdriver Error Reporting API provides a way to report new errors and read access to error groups and their associated errors.\n", - "protocol": "rest", "title": "Stackdriver Error Reporting API", + "ownerName": "Google", "resources": { "projects": { + "methods": { + "deleteEvents": { + "response": { + "$ref": "DeleteEventsResponse" + }, + "parameterOrder": [ + "projectName" + ], + "httpMethod": "DELETE", + "parameters": { + "projectName": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas `projects/` plus the\n[Google Cloud Platform project\nID](https://support.google.com/cloud/answer/6158840).\nExample: `projects/my-project-123`." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/events", + "path": "v1beta1/{+projectName}/events", + "id": "clouderrorreporting.projects.deleteEvents", + "description": "Deletes all error events of a given project." + } + }, "resources": { - "events": { + "groups": { "methods": { - "list": { - "id": "clouderrorreporting.projects.events.list", - "response": { - "$ref": "ListEventsResponse" - }, - "parameterOrder": [ - "projectName" - ], - "description": "Lists the specified events.", - "flatPath": "v1beta1/projects/{projectsId}/events", + "get": { + "flatPath": "v1beta1/projects/{projectsId}/groups/{groupsId}", + "id": "clouderrorreporting.projects.groups.get", + "path": "v1beta1/{+groupName}", + "description": "Get the specified group.", "httpMethod": "GET", - "parameters": { - "timeRange.period": { - "description": "Restricts the query to the specified time range.", - "enum": [ - "PERIOD_UNSPECIFIED", - "PERIOD_1_HOUR", - "PERIOD_6_HOURS", - "PERIOD_1_DAY", - "PERIOD_1_WEEK", - "PERIOD_30_DAYS" - ], - "location": "query", - "type": "string" - }, - "serviceFilter.resourceType": { - "description": "[Optional] The exact value to match against\n[`ServiceContext.resource_type`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.resource_type).", - "location": "query", - "type": "string" - }, - "projectName": { - "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas `projects/` plus the\n[Google Cloud Platform project\nID](https://support.google.com/cloud/answer/6158840).\nExample: `projects/my-project-123`.", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path", - "type": "string" - }, - "serviceFilter.service": { - "description": "[Optional] The exact value to match against\n[`ServiceContext.service`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.service).", - "location": "query", - "type": "string" - }, - "groupId": { - "description": "[Required] The group for which events shall be returned.", - "location": "query", - "type": "string" - }, - "serviceFilter.version": { - "description": "[Optional] The exact value to match against\n[`ServiceContext.version`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.version).", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "[Optional] The maximum number of results to return per response.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "pageToken": { - "description": "[Optional] A `next_page_token` provided by a previous response.", - "location": "query", - "type": "string" - } - }, - "path": "v1beta1/{+projectName}/events", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "report": { - "id": "clouderrorreporting.projects.events.report", - "response": { - "$ref": "ReportErrorEventResponse" - }, "parameterOrder": [ - "projectName" + "groupName" ], - "description": "Report an individual error event.\n\nThis endpoint accepts \u003cstrong\u003eeither\u003c/strong\u003e an OAuth token,\n\u003cstrong\u003eor\u003c/strong\u003e an\n\u003ca href=\"https://support.google.com/cloud/answer/6158862\"\u003eAPI key\u003c/a\u003e\nfor authentication. To use an API key, append it to the URL as the value of\na `key` parameter. For example:\n\u003cpre\u003ePOST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456\u003c/pre\u003e", - "request": { - "$ref": "ReportedErrorEvent" + "response": { + "$ref": "ErrorGroup" }, - "flatPath": "v1beta1/projects/{projectsId}/events:report", - "httpMethod": "POST", "parameters": { - "projectName": { - "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas `projects/` plus the\n[Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840).\nExample: `projects/my-project-123`.", - "required": true, - "pattern": "^projects/[^/]+$", + "groupName": { "location": "path", - "type": "string" + "description": "[Required] The group resource name. Written as\n\u003ccode\u003eprojects/\u003cvar\u003eprojectID\u003c/var\u003e/groups/\u003cvar\u003egroup_name\u003c/var\u003e\u003c/code\u003e.\nCall\n\u003ca href=\"/error-reporting/reference/rest/v1beta1/projects.groupStats/list\"\u003e\n\u003ccode\u003egroupStats.list\u003c/code\u003e\u003c/a\u003e to return a list of groups belonging to\nthis project.\n\nExample: \u003ccode\u003eprojects/my-project-123/groups/my-group\u003c/code\u003e", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$" } }, - "path": "v1beta1/{+projectName}/events:report", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] - } - } - }, - "groups": { - "methods": { + }, "update": { - "id": "clouderrorreporting.projects.groups.update", + "request": { + "$ref": "ErrorGroup" + }, + "description": "Replace the data for the specified group.\nFails if the group does not exist.", "response": { "$ref": "ErrorGroup" }, "parameterOrder": [ "name" ], - "description": "Replace the data for the specified group.\nFails if the group does not exist.", - "request": { - "$ref": "ErrorGroup" - }, - "flatPath": "v1beta1/projects/{projectsId}/groups/{groupsId}", "httpMethod": "PUT", "parameters": { "name": { + "location": "path", "description": "The group resource name.\nExample: \u003ccode\u003eprojects/my-project-123/groups/my-groupid\u003c/code\u003e", "required": true, - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$" } }, - "path": "v1beta1/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "id": "clouderrorreporting.projects.groups.get", - "response": { - "$ref": "ErrorGroup" - }, - "parameterOrder": [ - "groupName" ], - "description": "Get the specified group.", "flatPath": "v1beta1/projects/{projectsId}/groups/{groupsId}", - "httpMethod": "GET", - "parameters": { - "groupName": { - "description": "[Required] The group resource name. Written as\n\u003ccode\u003eprojects/\u003cvar\u003eprojectID\u003c/var\u003e/groups/\u003cvar\u003egroup_name\u003c/var\u003e\u003c/code\u003e.\nCall\n\u003ca href=\"/error-reporting/reference/rest/v1beta1/projects.groupStats/list\"\u003e\n\u003ccode\u003egroupStats.list\u003c/code\u003e\u003c/a\u003e to return a list of groups belonging to\nthis project.\n\nExample: \u003ccode\u003eprojects/my-project-123/groups/my-group\u003c/code\u003e", - "required": true, - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1beta1/{+groupName}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "path": "v1beta1/{+name}", + "id": "clouderrorreporting.projects.groups.update" } } }, "groupStats": { "methods": { "list": { - "id": "clouderrorreporting.projects.groupStats.list", "response": { "$ref": "ListGroupStatsResponse" }, "parameterOrder": [ "projectName" ], - "description": "Lists the specified groups.", - "flatPath": "v1beta1/projects/{projectsId}/groupStats", "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { - "alignment": { - "description": "[Optional] The alignment of the timed counts to be returned.\nDefault is `ALIGNMENT_EQUAL_AT_END`.", - "enum": [ - "ERROR_COUNT_ALIGNMENT_UNSPECIFIED", - "ALIGNMENT_EQUAL_ROUNDED", - "ALIGNMENT_EQUAL_AT_END" - ], - "location": "query", - "type": "string" - }, "timeRange.period": { - "description": "Restricts the query to the specified time range.", + "location": "query", "enum": [ "PERIOD_UNSPECIFIED", "PERIOD_1_HOUR", @@ -207,23 +112,44 @@ "PERIOD_1_WEEK", "PERIOD_30_DAYS" ], - "location": "query", + "description": "Restricts the query to the specified time range.", "type": "string" }, - "serviceFilter.resourceType": { - "description": "[Optional] The exact value to match against\n[`ServiceContext.resource_type`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.resource_type).", + "alignment": { + "type": "string", "location": "query", - "type": "string" + "enum": [ + "ERROR_COUNT_ALIGNMENT_UNSPECIFIED", + "ALIGNMENT_EQUAL_ROUNDED", + "ALIGNMENT_EQUAL_AT_END" + ], + "description": "[Optional] The alignment of the timed counts to be returned.\nDefault is `ALIGNMENT_EQUAL_AT_END`." }, - "projectName": { - "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas \u003ccode\u003eprojects/\u003c/code\u003e plus the\n\u003ca href=\"https://support.google.com/cloud/answer/6158840\"\u003eGoogle Cloud\nPlatform project ID\u003c/a\u003e.\n\nExample: \u003ccode\u003eprojects/my-project-123\u003c/code\u003e.", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path", - "type": "string" + "groupId": { + "location": "query", + "description": "[Optional] List all \u003ccode\u003eErrorGroupStats\u003c/code\u003e with these IDs.", + "type": "string", + "repeated": true + }, + "serviceFilter.service": { + "type": "string", + "location": "query", + "description": "[Optional] The exact value to match against\n[`ServiceContext.service`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.service)." + }, + "pageSize": { + "location": "query", + "description": "[Optional] The maximum number of results to return per response.\nDefault is 20.", + "format": "int32", + "type": "integer" + }, + "serviceFilter.version": { + "type": "string", + "location": "query", + "description": "[Optional] The exact value to match against\n[`ServiceContext.version`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.version)." }, "order": { - "description": "[Optional] The sort order in which the results are returned.\nDefault is `COUNT_DESC`.", + "type": "string", + "location": "query", "enum": [ "GROUP_ORDER_UNSPECIFIED", "COUNT_DESC", @@ -231,167 +157,359 @@ "CREATED_DESC", "AFFECTED_USERS_DESC" ], - "location": "query", - "type": "string" - }, - "groupId": { - "description": "[Optional] List all \u003ccode\u003eErrorGroupStats\u003c/code\u003e with these IDs.", - "repeated": true, - "location": "query", - "type": "string" + "description": "[Optional] The sort order in which the results are returned.\nDefault is `COUNT_DESC`." }, - "serviceFilter.service": { - "description": "[Optional] The exact value to match against\n[`ServiceContext.service`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.service).", + "serviceFilter.resourceType": { "location": "query", + "description": "[Optional] The exact value to match against\n[`ServiceContext.resource_type`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.resource_type).", "type": "string" }, "alignmentTime": { - "description": "[Optional] Time where the timed counts shall be aligned if rounded\nalignment is chosen. Default is 00:00 UTC.", - "location": "query", - "type": "string", - "format": "google-datetime" - }, - "serviceFilter.version": { - "description": "[Optional] The exact value to match against\n[`ServiceContext.version`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.version).", "location": "query", + "description": "[Optional] Time where the timed counts shall be aligned if rounded\nalignment is chosen. Default is 00:00 UTC.", + "format": "google-datetime", "type": "string" }, - "pageSize": { - "description": "[Optional] The maximum number of results to return per response.\nDefault is 20.", - "location": "query", - "type": "integer", - "format": "int32" + "projectName": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas \u003ccode\u003eprojects/\u003c/code\u003e plus the\n\u003ca href=\"https://support.google.com/cloud/answer/6158840\"\u003eGoogle Cloud\nPlatform project ID\u003c/a\u003e.\n\nExample: \u003ccode\u003eprojects/my-project-123\u003c/code\u003e." }, "timedCountDuration": { - "description": "[Optional] The preferred duration for a single returned `TimedCount`.\nIf not set, no timed counts are returned.", "location": "query", - "type": "string", - "format": "google-duration" + "description": "[Optional] The preferred duration for a single returned `TimedCount`.\nIf not set, no timed counts are returned.", + "format": "google-duration", + "type": "string" }, "pageToken": { - "description": "[Optional] A `next_page_token` provided by a previous response. To view\nadditional results, pass this token along with the identical query\nparameters as the first request.", "location": "query", + "description": "[Optional] A `next_page_token` provided by a previous response. To view\nadditional results, pass this token along with the identical query\nparameters as the first request.", "type": "string" } }, + "flatPath": "v1beta1/projects/{projectsId}/groupStats", "path": "v1beta1/{+projectName}/groupStats", + "id": "clouderrorreporting.projects.groupStats.list", + "description": "Lists the specified groups." + } + } + }, + "events": { + "methods": { + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "projectName" + ], + "response": { + "$ref": "ListEventsResponse" + }, + "parameters": { + "serviceFilter.resourceType": { + "type": "string", + "location": "query", + "description": "[Optional] The exact value to match against\n[`ServiceContext.resource_type`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.resource_type)." + }, + "timeRange.period": { + "location": "query", + "enum": [ + "PERIOD_UNSPECIFIED", + "PERIOD_1_HOUR", + "PERIOD_6_HOURS", + "PERIOD_1_DAY", + "PERIOD_1_WEEK", + "PERIOD_30_DAYS" + ], + "description": "Restricts the query to the specified time range.", + "type": "string" + }, + "projectName": { + "location": "path", + "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas `projects/` plus the\n[Google Cloud Platform project\nID](https://support.google.com/cloud/answer/6158840).\nExample: `projects/my-project-123`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + }, + "groupId": { + "type": "string", + "location": "query", + "description": "[Required] The group for which events shall be returned." + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "[Optional] A `next_page_token` provided by a previous response." + }, + "serviceFilter.service": { + "location": "query", + "description": "[Optional] The exact value to match against\n[`ServiceContext.service`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.service).", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "[Optional] The maximum number of results to return per response.", + "format": "int32", + "type": "integer" + }, + "serviceFilter.version": { + "location": "query", + "description": "[Optional] The exact value to match against\n[`ServiceContext.version`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.version).", + "type": "string" + } + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] + ], + "flatPath": "v1beta1/projects/{projectsId}/events", + "id": "clouderrorreporting.projects.events.list", + "path": "v1beta1/{+projectName}/events", + "description": "Lists the specified events." + }, + "report": { + "request": { + "$ref": "ReportedErrorEvent" + }, + "description": "Report an individual error event.\n\nThis endpoint accepts \u003cstrong\u003eeither\u003c/strong\u003e an OAuth token,\n\u003cstrong\u003eor\u003c/strong\u003e an\n\u003ca href=\"https://support.google.com/cloud/answer/6158862\"\u003eAPI key\u003c/a\u003e\nfor authentication. To use an API key, append it to the URL as the value of\na `key` parameter. For example:\n\u003cpre\u003ePOST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456\u003c/pre\u003e", + "httpMethod": "POST", + "parameterOrder": [ + "projectName" + ], + "response": { + "$ref": "ReportErrorEventResponse" + }, + "parameters": { + "projectName": { + "location": "path", + "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas `projects/` plus the\n[Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840).\nExample: `projects/my-project-123`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/events:report", + "id": "clouderrorreporting.projects.events.report", + "path": "v1beta1/{+projectName}/events:report" } } } - }, - "methods": { - "deleteEvents": { - "id": "clouderrorreporting.projects.deleteEvents", - "response": { - "$ref": "DeleteEventsResponse" - }, - "parameterOrder": [ - "projectName" - ], - "description": "Deletes all error events of a given project.", - "flatPath": "v1beta1/projects/{projectsId}/events", - "httpMethod": "DELETE", - "parameters": { - "projectName": { - "description": "[Required] The resource name of the Google Cloud Platform project. Written\nas `projects/` plus the\n[Google Cloud Platform project\nID](https://support.google.com/cloud/answer/6158840).\nExample: `projects/my-project-123`.", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1beta1/{+projectName}/events", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } } } }, + "parameters": { + "upload_protocol": { + "type": "string", + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\")." + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "$.xgafv": { + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format." + }, + "alt": { + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Pretty-print response." + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + } + }, + "version": "v1beta1", + "baseUrl": "https://clouderrorreporting.googleapis.com/", + "servicePath": "", + "description": "Stackdriver Error Reporting groups and counts similar errors from cloud services. The Stackdriver Error Reporting API provides a way to report new errors and read access to error groups and their associated errors.\n", + "kind": "discovery#restDescription", + "basePath": "", + "documentationLink": "https://cloud.google.com/error-reporting/", + "revision": "20170113", + "id": "clouderrorreporting:v1beta1", + "discoveryVersion": "v1", + "version_module": "True", "schemas": { - "ReportedErrorEvent": { - "description": "An error event which is reported to the Error Reporting system.", + "DeleteEventsResponse": { + "type": "object", + "properties": {}, + "id": "DeleteEventsResponse", + "description": "Response message for deleting error events." + }, + "ErrorEvent": { "type": "object", "properties": { - "serviceContext": { - "description": "[Required] The service context in which this error has occurred.", - "$ref": "ServiceContext" - }, "context": { - "description": "[Optional] A description of the context in which the error occurred.", - "$ref": "ErrorContext" + "$ref": "ErrorContext", + "description": "Data about the context in which the error occurred." + }, + "message": { + "description": "The stack trace that was reported or logged by the service.", + "type": "string" + }, + "serviceContext": { + "$ref": "ServiceContext", + "description": "The `ServiceContext` for which this error was reported." }, "eventTime": { - "description": "[Optional] Time when the event occurred.\nIf not provided, the time when the event was received by the\nError Reporting system will be used.", "type": "string", + "description": "Time when the event occurred as provided in the error report.\nIf the report did not contain a timestamp, the time the error was received\nby the Error Reporting system is used.", "format": "google-datetime" + } + }, + "id": "ErrorEvent", + "description": "An error event which is returned by the Error Reporting system." + }, + "ReportedErrorEvent": { + "type": "object", + "properties": { + "context": { + "$ref": "ErrorContext", + "description": "[Optional] A description of the context in which the error occurred." }, "message": { - "description": "[Required] A message describing the error. The message can contain an\nexception stack in one of the supported programming languages and formats.\nIn that case, the message is parsed and detailed exception information\nis returned when retrieving the error event again.", - "type": "string" + "type": "string", + "description": "[Required] The error message.\nIf no `context.reportLocation` is provided, the message must contain a\nheader (typically consisting of the exception type name and an error\nmessage) and an exception stack trace in one of the supported programming\nlanguages and formats.\nSupported languages are Java, Python, JavaScript, Ruby, C#, PHP, and Go.\nSupported stack trace formats are:\n\n* **Java**: Must be the return value of [`Throwable.printStackTrace()`](https://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html#printStackTrace%28%29).\n* **Python**: Must be the return value of [`traceback.format_exc()`](https://docs.python.org/2/library/traceback.html#traceback.format_exc).\n* **JavaScript**: Must be the value of [`error.stack`](https://github.com/v8/v8/wiki/Stack-Trace-API)\nas returned by V8.\n* **Ruby**: Must contain frames returned by [`Exception.backtrace`](https://ruby-doc.org/core-2.2.0/Exception.html#method-i-backtrace).\n* **C#**: Must be the return value of [`Exception.ToString()`](https://msdn.microsoft.com/en-us/library/system.exception.tostring.aspx).\n* **PHP**: Must start with `PHP (Notice|Parse error|Fatal error|Warning)`\nand contain the result of [`(string)$exception`](http://php.net/manual/en/exception.tostring.php).\n* **Go**: Must be the return value of [`runtime.Stack()`](https://golang.org/pkg/runtime/debug/#Stack)." + }, + "serviceContext": { + "$ref": "ServiceContext", + "description": "[Required] The service context in which this error has occurred." + }, + "eventTime": { + "type": "string", + "description": "[Optional] Time when the event occurred.\nIf not provided, the time when the event was received by the\nError Reporting system will be used.", + "format": "google-datetime" } }, - "id": "ReportedErrorEvent" + "id": "ReportedErrorEvent", + "description": "An error event which is reported to the Error Reporting system." }, - "SourceLocation": { - "description": "Indicates a location in the source code of the service for which\nerrors are reported.\nThis data should be provided by the application when reporting an error,\nunless the error report has been generated automatically from Google App\nEngine logs. All fields are optional.", + "ErrorContext": { + "description": "A description of the context in which an error occurred.\nThis data should be provided by the application when reporting an error,\nunless the\nerror report has been generated automatically from Google App Engine logs.", "type": "object", "properties": { - "filePath": { - "description": "The source code filename, which can include a truncated relative\npath, or a full path from a production machine.", - "type": "string" + "reportLocation": { + "$ref": "SourceLocation", + "description": "The location in the source code where the decision was made to\nreport the error, usually the place where it was logged.\nFor a logged exception this would be the source line where the\nexception is logged, usually close to the place where it was\ncaught. This value is in contrast to `Exception.cause_location`,\nwhich describes the source line where the exception was thrown." }, - "functionName": { - "description": "Human-readable name of a function or method.\nThe value can include optional context like the class or package name.\nFor example, `my.package.MyClass.method` in case of Java.", - "type": "string" + "httpRequest": { + "$ref": "HttpRequestContext", + "description": "The HTTP request which was processed when the error was\ntriggered." }, - "lineNumber": { - "description": "1-based. 0 indicates that the line number is unknown.", - "type": "integer", - "format": "int32" + "user": { + "description": "The user who caused or was affected by the crash.\nThis can be a user ID, an email address, or an arbitrary token that\nuniquely identifies the user.\nWhen sending an error report, leave this field empty if the user was not\nlogged in. In this case the\nError Reporting system will use other data, such as remote IP address, to\ndistinguish affected users. See `affected_users_count` in\n`ErrorGroupStats`.", + "type": "string" + } + }, + "id": "ErrorContext" + }, + "TrackingIssue": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "A URL pointing to a related entry in an issue tracking system.\nExample: https://github.com/user/project/issues/4" } }, - "id": "SourceLocation" + "id": "TrackingIssue", + "description": "Information related to tracking the progress on resolving the error." }, "ErrorGroupStats": { - "description": "Data extracted for a specific group based on certain filter criteria,\nsuch as a given time period and/or service filter.", "type": "object", "properties": { - "representative": { - "description": "An arbitrary event that is chosen as representative for the whole group.\nThe representative event is intended to be used as a quick preview for\nthe whole group. Events in the group are usually sufficiently similar\nto each other such that showing an arbitrary representative provides\ninsight into the characteristics of the group as a whole.", - "$ref": "ErrorEvent" - }, - "numAffectedServices": { - "description": "The total number of services with a non-zero error count for the given\nfilter criteria.", - "type": "integer", - "format": "int32" + "group": { + "$ref": "ErrorGroup", + "description": "Group data that is independent of the filter criteria." }, - "affectedUsersCount": { - "description": "Approximate number of affected users in the given group that\nmatch the filter criteria.\nUsers are distinguished by data in the `ErrorContext` of the\nindividual error events, such as their login name or their remote\nIP address in case of HTTP requests.\nThe number of affected users can be zero even if the number of\nerrors is non-zero if no data was provided from which the\naffected user could be deduced.\nUsers are counted based on data in the request\ncontext that was provided in the error report. If more users are\nimplicitly affected, such as due to a crash of the whole service,\nthis is not reflected here.", + "firstSeenTime": { "type": "string", - "format": "int64" + "description": "Approximate first occurrence that was ever seen for this group\nand which matches the given filter criteria, ignoring the\ntime_range that was specified in the request.", + "format": "google-datetime" }, "count": { - "description": "Approximate total number of events in the given group that match\nthe filter criteria.", "type": "string", + "description": "Approximate total number of events in the given group that match\nthe filter criteria.", "format": "int64" }, - "firstSeenTime": { - "description": "Approximate first occurrence that was ever seen for this group\nand which matches the given filter criteria, ignoring the\ntime_range that was specified in the request.", - "type": "string", - "format": "google-datetime" - }, "lastSeenTime": { - "description": "Approximate last occurrence that was ever seen for this group and\nwhich matches the given filter criteria, ignoring the time_range\nthat was specified in the request.", "type": "string", + "description": "Approximate last occurrence that was ever seen for this group and\nwhich matches the given filter criteria, ignoring the time_range\nthat was specified in the request.", "format": "google-datetime" }, - "group": { - "description": "Group data that is independent of the filter criteria.", - "$ref": "ErrorGroup" + "affectedUsersCount": { + "type": "string", + "description": "Approximate number of affected users in the given group that\nmatch the filter criteria.\nUsers are distinguished by data in the `ErrorContext` of the\nindividual error events, such as their login name or their remote\nIP address in case of HTTP requests.\nThe number of affected users can be zero even if the number of\nerrors is non-zero if no data was provided from which the\naffected user could be deduced.\nUsers are counted based on data in the request\ncontext that was provided in the error report. If more users are\nimplicitly affected, such as due to a crash of the whole service,\nthis is not reflected here.", + "format": "int64" }, "affectedServices": { "description": "Service contexts with a non-zero error count for the given filter\ncriteria. This list can be truncated if multiple services are affected.\nRefer to `num_affected_services` for the total count.", @@ -400,6 +518,15 @@ "$ref": "ServiceContext" } }, + "numAffectedServices": { + "description": "The total number of services with a non-zero error count for the given\nfilter criteria.", + "format": "int32", + "type": "integer" + }, + "representative": { + "$ref": "ErrorEvent", + "description": "An arbitrary event that is chosen as representative for the whole group.\nThe representative event is intended to be used as a quick preview for\nthe whole group. Events in the group are usually sufficiently similar\nto each other such that showing an arbitrary representative provides\ninsight into the characteristics of the group as a whole." + }, "timedCounts": { "description": "Approximate number of occurrences over time.\nTimed counts returned by ListGroups are guaranteed to be:\n\n- Inside the requested time interval\n- Non-overlapping, and\n- Ordered by ascending time.", "type": "array", @@ -408,181 +535,128 @@ } } }, - "id": "ErrorGroupStats" + "id": "ErrorGroupStats", + "description": "Data extracted for a specific group based on certain filter criteria,\nsuch as a given time period and/or service filter." }, - "ErrorContext": { - "description": "A description of the context in which an error occurred.\nThis data should be provided by the application when reporting an error,\nunless the\nerror report has been generated automatically from Google App Engine logs.", + "ListEventsResponse": { "type": "object", "properties": { - "httpRequest": { - "description": "The HTTP request which was processed when the error was\ntriggered.", - "$ref": "HttpRequestContext" + "nextPageToken": { + "type": "string", + "description": "If non-empty, more results are available.\nPass this token, along with the same query parameters as the first\nrequest, to view the next page of results." }, - "reportLocation": { - "description": "The location in the source code where the decision was made to\nreport the error, usually the place where it was logged.\nFor a logged exception this would be the source line where the\nexception is logged, usually close to the place where it was\ncaught. This value is in contrast to `Exception.cause_location`,\nwhich describes the source line where the exception was thrown.", - "$ref": "SourceLocation" + "timeRangeBegin": { + "type": "string", + "description": "The timestamp specifies the start time to which the request was restricted.", + "format": "google-datetime" }, - "user": { - "description": "The user who caused or was affected by the crash.\nThis can be a user ID, an email address, or an arbitrary token that\nuniquely identifies the user.\nWhen sending an error report, leave this field empty if the user was not\nlogged in. In this case the\nError Reporting system will use other data, such as remote IP address, to\ndistinguish affected users. See `affected_users_count` in\n`ErrorGroupStats`.", - "type": "string" + "errorEvents": { + "type": "array", + "items": { + "$ref": "ErrorEvent" + }, + "description": "The error events which match the given request." } }, - "id": "ErrorContext" + "id": "ListEventsResponse", + "description": "Contains a set of requested error events." }, - "ServiceContext": { - "description": "Describes a running service that sends errors.\nIts version changes over time and multiple versions can run in parallel.", + "TimedCount": { "type": "object", "properties": { - "resourceType": { - "description": "Type of the MonitoredResource. List of possible values:\nhttps://cloud.google.com/monitoring/api/resources\n\nValue is set automatically for incoming errors and must not be set when\nreporting errors.", - "type": "string" + "endTime": { + "type": "string", + "description": "End of the time period to which `count` refers (excluded).", + "format": "google-datetime" }, - "service": { - "description": "An identifier of the service, such as the name of the\nexecutable, job, or Google App Engine service name. This field is expected\nto have a low number of values that are relatively stable over time, as\nopposed to `version`, which can be changed whenever new code is deployed.\n\nContains the service name for error reports extracted from Google\nApp Engine logs or `default` if the App Engine default service is used.", + "count": { + "description": "Approximate number of occurrences in the given time period.", + "format": "int64", "type": "string" }, - "version": { - "description": "Represents the source code version that the developer provided,\nwhich could represent a version label or a Git SHA-1 hash, for example.", - "type": "string" + "startTime": { + "type": "string", + "description": "Start of the time period to which `count` refers (included).", + "format": "google-datetime" } }, - "id": "ServiceContext" + "id": "TimedCount", + "description": "The number of errors in a given time period.\nAll numbers are approximate since the error events are sampled\nbefore counting them." }, "ErrorGroup": { "description": "Description of a group of similar error events.", "type": "object", "properties": { - "groupId": { - "description": "Group IDs are unique for a given project. If the same kind of error\noccurs in different service contexts, it will receive the same group ID.", - "type": "string" - }, "name": { "description": "The group resource name.\nExample: \u003ccode\u003eprojects/my-project-123/groups/my-groupid\u003c/code\u003e", "type": "string" }, + "groupId": { + "description": "Group IDs are unique for a given project. If the same kind of error\noccurs in different service contexts, it will receive the same group ID.", + "type": "string" + }, "trackingIssues": { - "description": "Associated tracking issues.", "type": "array", "items": { "$ref": "TrackingIssue" - } + }, + "description": "Associated tracking issues." } }, "id": "ErrorGroup" }, - "ReportErrorEventResponse": { - "description": "Response for reporting an individual error event.\nData may be added to this message in the future.", - "type": "object", - "properties": {}, - "id": "ReportErrorEventResponse" - }, - "TrackingIssue": { - "description": "Information related to tracking the progress on resolving the error.", - "type": "object", - "properties": { - "url": { - "description": "A URL pointing to a related entry in an issue tracking system.\nExample: https://github.com/user/project/issues/4", - "type": "string" - } - }, - "id": "TrackingIssue" - }, - "DeleteEventsResponse": { - "description": "Response message for deleting error events.", - "type": "object", - "properties": {}, - "id": "DeleteEventsResponse" - }, - "ErrorEvent": { - "description": "An error event which is returned by the Error Reporting system.", + "ServiceContext": { + "description": "Describes a running service that sends errors.\nIts version changes over time and multiple versions can run in parallel.", "type": "object", "properties": { - "serviceContext": { - "description": "The `ServiceContext` for which this error was reported.", - "$ref": "ServiceContext" - }, - "context": { - "description": "Data about the context in which the error occurred.", - "$ref": "ErrorContext" - }, - "eventTime": { - "description": "Time when the event occurred as provided in the error report.\nIf the report did not contain a timestamp, the time the error was received\nby the Error Reporting system is used.", + "resourceType": { "type": "string", - "format": "google-datetime" + "description": "Type of the MonitoredResource. List of possible values:\nhttps://cloud.google.com/monitoring/api/resources\n\nValue is set automatically for incoming errors and must not be set when\nreporting errors." }, - "message": { - "description": "The stack trace that was reported or logged by the service.", + "version": { + "description": "Represents the source code version that the developer provided,\nwhich could represent a version label or a Git SHA-1 hash, for example.", + "type": "string" + }, + "service": { + "description": "An identifier of the service, such as the name of the\nexecutable, job, or Google App Engine service name. This field is expected\nto have a low number of values that are relatively stable over time, as\nopposed to `version`, which can be changed whenever new code is deployed.\n\nContains the service name for error reports extracted from Google\nApp Engine logs or `default` if the App Engine default service is used.", "type": "string" } }, - "id": "ErrorEvent" + "id": "ServiceContext" }, - "ListEventsResponse": { - "description": "Contains a set of requested error events.", + "SourceLocation": { "type": "object", "properties": { - "nextPageToken": { - "description": "If non-empty, more results are available.\nPass this token, along with the same query parameters as the first\nrequest, to view the next page of results.", + "functionName": { + "description": "Human-readable name of a function or method.\nThe value can include optional context like the class or package name.\nFor example, `my.package.MyClass.method` in case of Java.", "type": "string" }, - "timeRangeBegin": { - "description": "The timestamp specifies the start time to which the request was restricted.", - "type": "string", - "format": "google-datetime" + "filePath": { + "description": "The source code filename, which can include a truncated relative\npath, or a full path from a production machine.", + "type": "string" }, - "errorEvents": { - "description": "The error events which match the given request.", - "type": "array", - "items": { - "$ref": "ErrorEvent" - } + "lineNumber": { + "type": "integer", + "description": "1-based. 0 indicates that the line number is unknown.", + "format": "int32" } }, - "id": "ListEventsResponse" + "id": "SourceLocation", + "description": "Indicates a location in the source code of the service for which\nerrors are reported.\nThis data should be provided by the application when reporting an error,\nunless the error report has been generated automatically from Google App\nEngine logs. All fields are optional." }, - "TimedCount": { - "description": "The number of errors in a given time period.\nAll numbers are approximate since the error events are sampled\nbefore counting them.", + "ReportErrorEventResponse": { "type": "object", - "properties": { - "startTime": { - "description": "Start of the time period to which `count` refers (included).", - "type": "string", - "format": "google-datetime" - }, - "endTime": { - "description": "End of the time period to which `count` refers (excluded).", - "type": "string", - "format": "google-datetime" - }, - "count": { - "description": "Approximate number of occurrences in the given time period.", - "type": "string", - "format": "int64" - } - }, - "id": "TimedCount" + "properties": {}, + "id": "ReportErrorEventResponse", + "description": "Response for reporting an individual error event.\nData may be added to this message in the future." }, "HttpRequestContext": { - "description": "HTTP request data that is related to a reported error.\nThis data should be provided by the application when reporting an error,\nunless the\nerror report has been generated automatically from Google App Engine logs.", "type": "object", "properties": { - "method": { - "description": "The type of HTTP request, such as `GET`, `POST`, etc.", - "type": "string" - }, - "responseStatusCode": { - "description": "The HTTP response status code for the request.", - "type": "integer", - "format": "int32" - }, "remoteIp": { "description": "The IP address from which the request originated.\nThis can be IPv4, IPv6, or a token which is derived from the\nIP address, depending on the data that has been provided\nin the error report.", "type": "string" }, - "url": { - "description": "The URL of the request.", - "type": "string" - }, "referrer": { "description": "The referrer information that is provided with the request.", "type": "string" @@ -590,9 +664,23 @@ "userAgent": { "description": "The user agent information that is provided with the request.", "type": "string" + }, + "url": { + "type": "string", + "description": "The URL of the request." + }, + "responseStatusCode": { + "description": "The HTTP response status code for the request.", + "format": "int32", + "type": "integer" + }, + "method": { + "type": "string", + "description": "The type of HTTP request, such as `GET`, `POST`, etc." } }, - "id": "HttpRequestContext" + "id": "HttpRequestContext", + "description": "HTTP request data that is related to a reported error.\nThis data should be provided by the application when reporting an error,\nunless the\nerror report has been generated automatically from Google App Engine logs." }, "ListGroupStatsResponse": { "description": "Contains a set of requested error group stats.", @@ -603,126 +691,38 @@ "type": "string" }, "timeRangeBegin": { - "description": "The timestamp specifies the start time to which the request was restricted.\nThe start time is set based on the requested time range. It may be adjusted\nto a later time if a project has exceeded the storage quota and older data\nhas been deleted.", "type": "string", + "description": "The timestamp specifies the start time to which the request was restricted.\nThe start time is set based on the requested time range. It may be adjusted\nto a later time if a project has exceeded the storage quota and older data\nhas been deleted.", "format": "google-datetime" }, "errorGroupStats": { - "description": "The error group stats which match the given request.", "type": "array", "items": { "$ref": "ErrorGroupStats" - } + }, + "description": "The error group stats which match the given request." } }, "id": "ListGroupStatsResponse" } }, - "revision": "20161122", - "basePath": "", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version_module": "True", + "protocol": "rest", "canonicalName": "Clouderrorreporting", - "discoveryVersion": "v1", - "baseUrl": "https://clouderrorreporting.googleapis.com/", - "name": "clouderrorreporting", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } }, - "documentationLink": "https://cloud.google.com/error-reporting/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1beta1", "rootUrl": "https://clouderrorreporting.googleapis.com/", - "kind": "discovery#restDescription" + "ownerDomain": "google.com", + "name": "clouderrorreporting", + "batchPath": "batch" } diff --git a/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-gen.go b/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-gen.go index cb0959203..0e62c1595 100644 --- a/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-gen.go +++ b/vendor/google.golang.org/api/clouderrorreporting/v1beta1/clouderrorreporting-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Events = NewProjectsEventsService(s) @@ -535,13 +540,40 @@ type ReportedErrorEvent struct { // Error Reporting system will be used. EventTime string `json:"eventTime,omitempty"` - // Message: [Required] A message describing the error. The message can - // contain an - // exception stack in one of the supported programming languages and - // formats. - // In that case, the message is parsed and detailed exception - // information - // is returned when retrieving the error event again. + // Message: [Required] The error message. + // If no `context.reportLocation` is provided, the message must contain + // a + // header (typically consisting of the exception type name and an + // error + // message) and an exception stack trace in one of the supported + // programming + // languages and formats. + // Supported languages are Java, Python, JavaScript, Ruby, C#, PHP, and + // Go. + // Supported stack trace formats are: + // + // * **Java**: Must be the return value of + // [`Throwable.printStackTrace()`](https://docs.oracle.com/javase/7/docs/ + // api/java/lang/Throwable.html#printStackTrace%28%29). + // * **Python**: Must be the return value of + // [`traceback.format_exc()`](https://docs.python.org/2/library/traceback + // .html#traceback.format_exc). + // * **JavaScript**: Must be the value of + // [`error.stack`](https://github.com/v8/v8/wiki/Stack-Trace-API) + // as returned by V8. + // * **Ruby**: Must contain frames returned by + // [`Exception.backtrace`](https://ruby-doc.org/core-2.2.0/Exception.html + // #method-i-backtrace). + // * **C#**: Must be the return value of + // [`Exception.ToString()`](https://msdn.microsoft.com/en-us/library/syst + // em.exception.tostring.aspx). + // * **PHP**: Must start with `PHP (Notice|Parse error|Fatal + // error|Warning)` + // and contain the result of + // [`(string)$exception`](http://php.net/manual/en/exception.tostring.php + // ). + // * **Go**: Must be the return value of + // [`runtime.Stack()`](https://golang.org/pkg/runtime/debug/#Stack). Message string `json:"message,omitempty"` // ServiceContext: [Required] The service context in which this error @@ -789,6 +821,7 @@ func (c *ProjectsDeleteEventsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+projectName}/events") @@ -991,6 +1024,7 @@ func (c *ProjectsEventsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1197,6 +1231,7 @@ func (c *ProjectsEventsReportCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reportederrorevent) if err != nil { @@ -1456,6 +1491,7 @@ func (c *ProjectsGroupStatsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1695,6 +1731,7 @@ func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1826,6 +1863,7 @@ func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.errorgroup) if err != nil { diff --git a/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-api.json b/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-api.json index a37d827f5..cd6820980 100644 --- a/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-api.json +++ b/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-api.json @@ -1,715 +1,296 @@ { - "discoveryVersion": "v1", - "version_module": "True", - "schemas": { - "DecryptRequest": { - "description": "Request message for KeyManagementService.Decrypt.", - "type": "object", - "properties": { - "ciphertext": { - "description": "Required. The encrypted data originally returned in\nEncryptResponse.ciphertext.", - "format": "byte", - "type": "string" - }, - "additionalAuthenticatedData": { - "description": "Optional data that must match the data originally supplied in\nEncryptRequest.additional_authenticated_data.", - "format": "byte", - "type": "string" - } - }, - "id": "DecryptRequest" - }, - "Location": { - "properties": { - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", - "type": "object" - }, - "name": { - "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", - "type": "string" - }, - "locationId": { - "description": "The canonical id for this location. For example: `\"us-east1\"`.", - "type": "string" - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", - "type": "object" - } - }, - "id": "Location", - "description": "A resource that represents Google Cloud Platform location.", - "type": "object" - }, - "ListCryptoKeysResponse": { - "properties": { - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeysRequest.page_token to retrieve the next page of results.", - "type": "string" - }, - "cryptoKeys": { - "description": "The list of CryptoKeys.", - "type": "array", - "items": { - "$ref": "CryptoKey" - } - }, - "totalSize": { - "description": "The total number of CryptoKeys that matched the query.", - "format": "int32", - "type": "integer" - } - }, - "id": "ListCryptoKeysResponse", - "description": "Response message for KeyManagementService.ListCryptoKeys.", - "type": "object" - }, - "Condition": { - "properties": { - "iam": { - "enumDescriptions": [ - "Default non-attribute.", - "Either principal or (if present) authority selector.", - "The principal (even if an authority selector is present), which\nmust only be used for attribution, not authorization." - ], - "enum": [ - "NO_ATTR", - "AUTHORITY", - "ATTRIBUTION" - ], - "description": "Trusted attributes supplied by the IAM system.", - "type": "string" - }, - "values": { - "description": "The objects of the condition. This is mutually exclusive with 'value'.", - "type": "array", - "items": { - "type": "string" - } - }, - "op": { - "enum": [ - "NO_OP", - "EQUALS", - "NOT_EQUALS", - "IN", - "NOT_IN", - "DISCHARGED" - ], - "description": "An operator to apply the subject with.", - "type": "string", - "enumDescriptions": [ - "Default no-op.", - "DEPRECATED. Use IN instead.", - "DEPRECATED. Use NOT_IN instead.", - "Set-inclusion check.", - "Set-exclusion check.", - "Subject is discharged" - ] - }, - "svc": { - "description": "Trusted attributes discharged by the service.", - "type": "string" - }, - "value": { - "description": "DEPRECATED. Use 'values' instead.", - "type": "string" - }, - "sys": { - "enum": [ - "NO_ATTR", - "REGION", - "SERVICE", - "NAME", - "IP" - ], - "description": "Trusted attributes supplied by any service that owns resources and uses\nthe IAM system for access control.", - "type": "string", - "enumDescriptions": [ - "Default non-attribute type", - "Region of the resource", - "Service name", - "Resource name", - "IP address of the caller" - ] - } - }, - "id": "Condition", - "description": "A condition to be met.", - "type": "object" - }, - "CounterOptions": { - "properties": { - "metric": { - "description": "The metric to update.", - "type": "string" - }, - "field": { - "description": "The field value to attribute.", - "type": "string" - } - }, - "id": "CounterOptions", - "description": "Options for counters", - "type": "object" - }, - "AuditLogConfig": { - "properties": { - "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", - "type": "array", - "items": { - "type": "string" - } - }, - "logType": { - "enum": [ - "LOG_TYPE_UNSPECIFIED", - "ADMIN_READ", - "DATA_WRITE", - "DATA_READ" - ], - "description": "The log type that this config enables.", - "type": "string", - "enumDescriptions": [ - "Default case. Should never be this.", - "Admin reads. Example: CloudIAM getIamPolicy", - "Data writes. Example: CloudSQL Users create", - "Data reads. Example: CloudSQL Users list" - ] - } - }, - "id": "AuditLogConfig", - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", - "type": "object" - }, - "DecryptResponse": { - "properties": { - "plaintext": { - "description": "The decrypted data originally supplied in EncryptRequest.plaintext.", - "format": "byte", - "type": "string" - } - }, - "id": "DecryptResponse", - "description": "Response message for KeyManagementService.Decrypt.", - "type": "object" - }, - "TestIamPermissionsRequest": { - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "TestIamPermissionsRequest", - "description": "Request message for `TestIamPermissions` method.", - "type": "object" - }, - "EncryptResponse": { - "properties": { - "name": { - "description": "The resource name of the CryptoKeyVersion used in encryption.", - "type": "string" - }, - "ciphertext": { - "description": "The encrypted data.", - "format": "byte", - "type": "string" - } - }, - "id": "EncryptResponse", - "description": "Response message for KeyManagementService.Encrypt.", - "type": "object" - }, - "ListLocationsResponse": { - "description": "The response message for Locations.ListLocations.", - "type": "object", - "properties": { - "locations": { - "description": "A list of locations that matches the specified filter in the request.", - "type": "array", - "items": { - "$ref": "Location" - } - }, - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - } - }, - "id": "ListLocationsResponse" - }, - "KeyRing": { - "description": "A KeyRing is a toplevel logical grouping of CryptoKeys.", - "type": "object", - "properties": { - "createTime": { - "description": "Output only. The time at which this KeyRing was created.", - "format": "google-datetime", - "type": "string" - }, - "name": { - "description": "Output only. The resource name for the KeyRing in the format\n`projects/*/locations/*/keyRings/*`.", - "type": "string" - } - }, - "id": "KeyRing" - }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", - "type": "object", - "properties": { - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "format": "int32", - "type": "integer" - }, - "auditConfigs": { - "description": "Specifies cloud audit logging configuration for this policy.", - "type": "array", - "items": { - "$ref": "AuditConfig" - } - }, - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", - "type": "array", - "items": { - "$ref": "Binding" - } - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "format": "byte", - "type": "string" - }, - "iamOwned": { - "type": "boolean" - }, - "rules": { - "description": "If more than one rule is specified, the rules are applied in the following\nmanner:\n- All matching LOG rules are always applied.\n- If any DENY/DENY_WITH_LOG rule matches, permission is denied.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is\n granted.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if no rule applies, permission is denied.", - "type": "array", - "items": { - "$ref": "Rule" - } - } - }, - "id": "Policy" - }, - "UpdateCryptoKeyPrimaryVersionRequest": { - "description": "Request message for KeyManagementService.UpdateCryptoKeyPrimaryVersion.", - "type": "object", - "properties": { - "cryptoKeyVersionId": { - "description": "The id of the child CryptoKeyVersion to use as primary.", - "type": "string" - } - }, - "id": "UpdateCryptoKeyPrimaryVersionRequest" - }, - "RestoreCryptoKeyVersionRequest": { - "description": "Request message for KeyManagementService.RestoreCryptoKeyVersion.", - "type": "object", - "properties": {}, - "id": "RestoreCryptoKeyVersionRequest" - }, - "DataAccessOptions": { - "properties": {}, - "id": "DataAccessOptions", - "description": "Write a Data Access (Gin) log", - "type": "object" - }, - "ListKeyRingsResponse": { - "description": "Response message for KeyManagementService.ListKeyRings.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListKeyRingsRequest.page_token to retrieve the next page of results.", - "type": "string" - }, - "totalSize": { - "description": "The total number of KeyRings that matched the query.", - "format": "int32", - "type": "integer" - }, - "keyRings": { - "description": "The list of KeyRings.", - "type": "array", - "items": { - "$ref": "KeyRing" - } - } - }, - "id": "ListKeyRingsResponse" - }, - "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nIt consists of which permission types are logged, and what identities, if\nany, are exempted from logging.\nAn AuditConifg must have one or more AuditLogConfigs.", - "type": "object", - "properties": { - "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `resourcemanager`, `storage`, `compute`.\n`allServices` is a special value that covers all services.", - "type": "string" - }, - "auditLogConfigs": { - "description": "The configuration for logging of each type of permission.\nNext ID: 4", - "type": "array", - "items": { - "$ref": "AuditLogConfig" - } - }, - "exemptedMembers": { - "description": "Specifies the identities that are exempted from \"data access\" audit\nlogging for the `service` specified above.\nFollows the same format of Binding.members.\nThis field is deprecated in favor of per-permission-type exemptions.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "AuditConfig" - }, - "CryptoKeyVersion": { - "properties": { - "destroyTime": { - "description": "Output only. The time this CryptoKeyVersion's key material is scheduled\nfor destruction. Only present if state is\nDESTROY_SCHEDULED.", - "format": "google-datetime", - "type": "string" - }, - "createTime": { - "description": "Output only. The time at which this CryptoKeyVersion was created.", - "format": "google-datetime", - "type": "string" - }, - "state": { - "enumDescriptions": [ - "Not specified.", - "This version may be used in Encrypt and\nDecrypt requests.", - "This version may not be used, but the key material is still available,\nand the version can be placed back into the ENABLED state.", - "This version is destroyed, and the key material is no longer stored.\nA version may not leave this state once entered.", - "This version is scheduled for destruction, and will be destroyed soon.\nCall\nRestoreCryptoKeyVersion\nto put it back into the DISABLED state." - ], - "enum": [ - "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", - "ENABLED", - "DISABLED", - "DESTROYED", - "DESTROY_SCHEDULED" - ], - "description": "The current state of the CryptoKeyVersion.", - "type": "string" - }, - "name": { - "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", - "type": "string" - }, - "destroyEventTime": { - "description": "Output only. The time this CryptoKeyVersion's key material was\ndestroyed. Only present if state is\nDESTROYED.", - "format": "google-datetime", - "type": "string" - } - }, - "id": "CryptoKeyVersion", - "description": "A CryptoKeyVersion represents an individual cryptographic key, and the\nassociated key material.\n\nIt can be used for cryptographic operations either directly, or via its\nparent CryptoKey, in which case the server will choose the appropriate\nversion for the operation.", - "type": "object" - }, - "CloudAuditOptions": { - "description": "Write a Cloud Audit log", - "type": "object", - "properties": {}, - "id": "CloudAuditOptions" - }, - "Binding": { - "description": "Associates `members` with a `role`.", - "type": "object", - "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", - "type": "array", - "items": { - "type": "string" - } - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" - } - }, - "id": "Binding" - }, - "EncryptRequest": { - "description": "Request message for KeyManagementService.Encrypt.", - "type": "object", - "properties": { - "additionalAuthenticatedData": { - "description": "Optional data that, if specified, must also be provided during decryption\nthrough DecryptRequest.additional_authenticated_data. Must be no\nlarger than 64KiB.", - "format": "byte", - "type": "string" - }, - "plaintext": { - "description": "Required. The data to encrypt. Must be no larger than 64KiB.", - "format": "byte", - "type": "string" - } - }, - "id": "EncryptRequest" - }, - "ListCryptoKeyVersionsResponse": { - "properties": { - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeyVersionsRequest.page_token to retrieve the next page of\nresults.", - "type": "string" - }, - "totalSize": { - "description": "The total number of CryptoKeyVersions that matched the\nquery.", - "format": "int32", - "type": "integer" - }, - "cryptoKeyVersions": { - "description": "The list of CryptoKeyVersions.", - "type": "array", - "items": { - "$ref": "CryptoKeyVersion" - } - } - }, - "id": "ListCryptoKeyVersionsResponse", - "description": "Response message for KeyManagementService.ListCryptoKeyVersions.", - "type": "object" - }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "TestIamPermissionsResponse" - }, - "DestroyCryptoKeyVersionRequest": { - "description": "Request message for KeyManagementService.DestroyCryptoKeyVersion.", - "type": "object", - "properties": {}, - "id": "DestroyCryptoKeyVersionRequest" - }, - "Rule": { - "description": "A rule to be applied in a Policy.", - "type": "object", - "properties": { - "notIn": { - "description": "If one or more 'not_in' clauses are specified, the rule matches\nif the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.\nThe format for in and not_in entries is the same as for members in a\nBinding (see google/iam/v1/policy.proto).", - "type": "array", - "items": { - "type": "string" - } - }, - "description": { - "description": "Human-readable description of the rule.", - "type": "string" - }, - "conditions": { - "description": "Additional restrictions that must be met", - "type": "array", - "items": { - "$ref": "Condition" - } - }, - "logConfig": { - "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries\nthat match the LOG action.", - "type": "array", - "items": { - "$ref": "LogConfig" - } - }, - "in": { - "description": "If one or more 'in' clauses are specified, the rule matches if\nthe PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", - "type": "array", - "items": { - "type": "string" - } - }, - "permissions": { - "description": "A permission is a string of form '\u003cservice\u003e.\u003cresource type\u003e.\u003cverb\u003e'\n(e.g., 'storage.buckets.list'). A value of '*' matches all permissions,\nand a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", - "type": "array", - "items": { - "type": "string" - } - }, - "action": { - "enumDescriptions": [ - "Default no action.", - "Matching 'Entries' grant access.", - "Matching 'Entries' grant access and the caller promises to log\nthe request per the returned log_configs.", - "Matching 'Entries' deny access.", - "Matching 'Entries' deny access and the caller promises to log\nthe request per the returned log_configs.", - "Matching 'Entries' tell IAM.Check callers to generate logs." - ], - "enum": [ - "NO_ACTION", - "ALLOW", - "ALLOW_WITH_LOG", - "DENY", - "DENY_WITH_LOG", - "LOG" - ], - "description": "Required", - "type": "string" - } - }, - "id": "Rule" - }, - "CryptoKey": { - "description": "A CryptoKey represents a logical key that can be used for cryptographic\noperations.\n\nA CryptoKey is made up of one or more versions, which\nrepresent the actual key material used in cryptographic operations.", - "type": "object", - "properties": { - "createTime": { - "description": "Output only. The time at which this CryptoKey was created.", - "format": "google-datetime", - "type": "string" - }, - "rotationPeriod": { - "description": "next_rotation_time will be advanced by this period when the service\nautomatically rotates a key. Must be at least one day.\n\nIf rotation_period is set, next_rotation_time must also be set.", - "format": "google-duration", - "type": "string" - }, - "primary": { - "$ref": "CryptoKeyVersion", - "description": "Output only. A copy of the \"primary\" CryptoKeyVersion that will be used\nby Encrypt when this CryptoKey is given\nin EncryptRequest.name.\n\nThe CryptoKey's primary version can be updated via\nUpdateCryptoKeyPrimaryVersion." - }, - "name": { - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - "type": "string" - }, - "purpose": { - "enumDescriptions": [ - "Not specified.", - "CryptoKeys with this purpose may be used with\nEncrypt and\nDecrypt." - ], - "enum": [ - "CRYPTO_KEY_PURPOSE_UNSPECIFIED", - "ENCRYPT_DECRYPT" - ], - "description": "The immutable purpose of this CryptoKey. Currently, the only acceptable\npurpose is ENCRYPT_DECRYPT.", - "type": "string" - }, - "nextRotationTime": { - "description": "At next_rotation_time, the Key Management Service will automatically:\n\n1. Create a new version of this CryptoKey.\n2. Mark the new version as primary.\n\nKey rotations performed manually via\nCreateCryptoKeyVersion and\nUpdateCryptoKeyPrimaryVersion\ndo not affect next_rotation_time.", - "format": "google-datetime", - "type": "string" - } - }, - "id": "CryptoKey" - }, - "LogConfig": { - "properties": { - "counter": { - "description": "Counter options.", - "$ref": "CounterOptions" - }, - "dataAccess": { - "$ref": "DataAccessOptions", - "description": "Data access options." - }, - "cloudAudit": { - "description": "Cloud audit options.", - "$ref": "CloudAuditOptions" - } - }, - "id": "LogConfig", - "description": "Specifies what kind of log the caller must write\nIncrement a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only,\nand end in \"_count\". Field names should not contain an initial slash.\nThe actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are\ntheir respective values.\n\nAt present the only supported field names are\n - \"iam_principal\", corresponding to IAMContext.principal;\n - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples:\n counter { metric: \"/debug_access_count\" field: \"iam_principal\" }\n ==\u003e increment counter /iam/policy/backend_debug_access_count\n {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support:\n* multiple field names (though this may be supported in the future)\n* decrementing the counter\n* incrementing it by anything other than 1", - "type": "object" - }, - "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "type": "object", - "properties": { - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." - }, - "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, a default\nmask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", - "format": "google-fieldmask", - "type": "string" - } - }, - "id": "SetIamPolicyRequest" - } - }, - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "protocol": "rest", - "canonicalName": "Cloud KMS", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "rootUrl": "https://cloudkms.googleapis.com/", - "ownerDomain": "google.com", - "name": "cloudkms", - "batchPath": "batch", - "title": "Google Cloud KMS API", - "ownerName": "Google", - "resources": { - "projects": { - "resources": { + "ownerName": "Google", + "resources": { + "projects": { + "resources": { "locations": { + "methods": { + "list": { + "description": "Lists information about the supported locations for this service.", + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "response": { + "$ref": "ListLocationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The resource that owns the locations collection, if applicable.", + "required": true, + "type": "string" + }, + "pageToken": { + "description": "The standard list page token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + }, + "filter": { + "description": "The standard list filter.", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/locations", + "path": "v1beta1/{+name}/locations", + "id": "cloudkms.projects.locations.list" + }, + "get": { + "httpMethod": "GET", + "response": { + "$ref": "Location" + }, + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/locations/[^/]+$", + "location": "path", + "description": "Resource name for the location.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}", + "id": "cloudkms.projects.locations.get", + "path": "v1beta1/{+name}", + "description": "Get information about a location." + } + }, "resources": { "keyRings": { + "methods": { + "setIamPolicy": { + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", + "path": "v1beta1/{+resource}:setIamPolicy", + "id": "cloudkms.projects.locations.keyRings.setIamPolicy" + }, + "create": { + "response": { + "$ref": "KeyRing" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "POST", + "parameters": { + "parent": { + "pattern": "^projects/[^/]+/locations/[^/]+$", + "location": "path", + "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", + "required": true, + "type": "string" + }, + "keyRingId": { + "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings", + "path": "v1beta1/{+parent}/keyRings", + "id": "cloudkms.projects.locations.keyRings.create", + "request": { + "$ref": "KeyRing" + }, + "description": "Create a new KeyRing in a given Project and Location." + }, + "getIamPolicy": { + "path": "v1beta1/{+resource}:getIamPolicy", + "id": "cloudkms.projects.locations.keyRings.getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "parameterOrder": [ + "resource" + ], + "httpMethod": "GET", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", + "location": "path", + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy" + }, + "get": { + "id": "cloudkms.projects.locations.keyRings.get", + "path": "v1beta1/{+name}", + "description": "Returns metadata for a given KeyRing.", + "httpMethod": "GET", + "response": { + "$ref": "KeyRing" + }, + "parameterOrder": [ + "name" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the KeyRing to get.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}" + }, + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", + "path": "v1beta1/{+resource}:testIamPermissions", + "id": "cloudkms.projects.locations.keyRings.testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning." + }, + "list": { + "description": "Lists KeyRings.", + "response": { + "$ref": "ListKeyRingsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "parameters": { + "pageToken": { + "description": "Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + "format": "int32", + "type": "integer" + }, + "parent": { + "pattern": "^projects/[^/]+/locations/[^/]+$", + "location": "path", + "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings", + "path": "v1beta1/{+parent}/keyRings", + "id": "cloudkms.projects.locations.keyRings.list" + } + }, "resources": { "cryptoKeys": { "resources": { "cryptoKeyVersions": { "methods": { - "create": { - "httpMethod": "POST", + "list": { + "path": "v1beta1/{+parent}/cryptoKeyVersions", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list", + "description": "Lists CryptoKeyVersions.", "parameterOrder": [ "parent" ], + "httpMethod": "GET", "response": { - "$ref": "CryptoKeyVersion" + "$ref": "ListCryptoKeyVersionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { + "pageToken": { + "description": "Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", + "format": "int32", + "type": "integer" + }, "parent": { "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "location": "path", - "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", + "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", "required": true, "type": "string" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", - "path": "v1beta1/{+parent}/cryptoKeyVersions", - "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED.", - "request": { - "$ref": "CryptoKeyVersion" - } + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions" }, "destroy": { "path": "v1beta1/{+name}:destroy", @@ -739,66 +320,63 @@ }, "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:destroy" }, - "restore": { - "path": "v1beta1/{+name}:restore", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", - "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED,\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", + "create": { + "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED.", "request": { - "$ref": "RestoreCryptoKeyVersionRequest" - }, - "response": { "$ref": "CryptoKeyVersion" }, + "httpMethod": "POST", "parameterOrder": [ - "name" + "parent" ], - "httpMethod": "POST", + "response": { + "$ref": "CryptoKeyVersion" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "name": { - "description": "The resource name of the CryptoKeyVersion to restore.", + "parent": { + "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", "required": true, "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "location": "path" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore" + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", + "path": "v1beta1/{+parent}/cryptoKeyVersions" }, - "get": { - "description": "Returns metadata for a given CryptoKeyVersion.", - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", + "restore": { "response": { "$ref": "CryptoKeyVersion" }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { "name": { - "description": "The name of the CryptoKeyVersion to get.", + "description": "The resource name of the CryptoKeyVersion to restore.", "required": true, "type": "string", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", "location": "path" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - "path": "v1beta1/{+name}", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get" + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore", + "path": "v1beta1/{+name}:restore", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", + "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED,\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", + "request": { + "$ref": "RestoreCryptoKeyVersionRequest" + } }, "patch": { - "path": "v1beta1/{+name}", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", - "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", - "request": { - "$ref": "CryptoKeyVersion" - }, "response": { "$ref": "CryptoKeyVersion" }, @@ -824,690 +402,1114 @@ "location": "query" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}" + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", + "path": "v1beta1/{+name}", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", + "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", + "request": { + "$ref": "CryptoKeyVersion" + } }, - "list": { - "path": "v1beta1/{+parent}/cryptoKeyVersions", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list", - "description": "Lists CryptoKeyVersions.", + "get": { "parameterOrder": [ - "parent" + "name" ], "httpMethod": "GET", "response": { - "$ref": "ListCryptoKeyVersionsResponse" + "$ref": "CryptoKeyVersion" }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { - "pageToken": { - "location": "query", - "description": "Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", - "type": "string" - }, - "pageSize": { - "description": "Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "parent": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "name": { + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", "location": "path", - "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "description": "The name of the CryptoKeyVersion to get.", "required": true, "type": "string" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions" + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", + "path": "v1beta1/{+name}", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get", + "description": "Returns metadata for a given CryptoKeyVersion." } } } }, "methods": { - "encrypt": { - "path": "v1beta1/{+name}:encrypt", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", - "description": "Encrypt data, so that it can only be recovered by a call to Decrypt.", + "testIamPermissions": { + "path": "v1beta1/{+resource}:testIamPermissions", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", "request": { - "$ref": "EncryptRequest" + "$ref": "TestIamPermissionsRequest" }, + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", "response": { - "$ref": "EncryptResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.+$", - "location": "path" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt" - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" + "$ref": "TestIamPermissionsResponse" }, - "httpMethod": "POST", "parameterOrder": [ "resource" ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], + "httpMethod": "POST", "parameters": { "resource": { "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "location": "path", - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, "type": "string" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", - "path": "v1beta1/{+resource}:setIamPolicy" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions" }, - "create": { - "response": { - "$ref": "CryptoKey" - }, + "decrypt": { + "httpMethod": "POST", "parameterOrder": [ - "parent" + "name" ], - "httpMethod": "POST", + "response": { + "$ref": "DecryptResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "cryptoKeyId": { - "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - "type": "string", - "location": "query" - }, - "parent": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path", - "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", + "name": { + "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", "required": true, - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "location": "path" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - "path": "v1beta1/{+parent}/cryptoKeys", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", - "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose is required.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", + "path": "v1beta1/{+name}:decrypt", + "description": "Decrypt data that was protected by Encrypt.", "request": { - "$ref": "CryptoKey" + "$ref": "DecryptRequest" } }, - "updatePrimaryVersion": { + "list": { + "path": "v1beta1/{+parent}/cryptoKeys", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.list", + "description": "Lists CryptoKeys.", "response": { - "$ref": "CryptoKey" + "$ref": "ListCryptoKeysResponse" }, "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" + "parent" ], + "httpMethod": "GET", "parameters": { - "name": { - "description": "The resource name of the CryptoKey to update.", + "parent": { + "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", "required": true, "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", + "location": "path" + }, + "pageToken": { + "description": "Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + "format": "int32", + "type": "integer" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion", - "path": "v1beta1/{+name}:updatePrimaryVersion", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", - "description": "Update the version of a CryptoKey that will be used in Encrypt", - "request": { - "$ref": "UpdateCryptoKeyPrimaryVersionRequest" - } + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys" }, - "getIamPolicy": { - "path": "v1beta1/{+resource}:getIamPolicy", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "response": { - "$ref": "Policy" + "encrypt": { + "description": "Encrypt data, so that it can only be recovered by a call to Decrypt.", + "request": { + "$ref": "EncryptRequest" }, + "httpMethod": "POST", "parameterOrder": [ - "resource" + "name" ], - "httpMethod": "GET", + "response": { + "$ref": "EncryptResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "resource": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "name": { + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.+$", "location": "path", - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", "required": true, "type": "string" } }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy" + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", + "path": "v1beta1/{+name}:encrypt" }, - "patch": { - "response": { - "$ref": "CryptoKey" - }, + "setIamPolicy": { + "httpMethod": "POST", "parameterOrder": [ - "name" + "resource" ], - "httpMethod": "PATCH", + "response": { + "$ref": "Policy" + }, "parameters": { - "name": { - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, "type": "string", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "location": "path" - }, - "updateMask": { - "location": "query", - "description": "Required list of fields to be updated in this request.", - "format": "google-fieldmask", - "type": "string" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - "path": "v1beta1/{+name}", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.patch", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", + "path": "v1beta1/{+resource}:setIamPolicy", "request": { - "$ref": "CryptoKey" + "$ref": "SetIamPolicyRequest" }, - "description": "Update a CryptoKey." + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy." }, - "get": { - "httpMethod": "GET", + "create": { "response": { "$ref": "CryptoKey" }, "parameterOrder": [ - "name" + "parent" ], + "httpMethod": "POST", "parameters": { - "name": { - "description": "The name of the CryptoKey to get.", + "cryptoKeyId": { + "location": "query", + "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + "type": "string" + }, + "parent": { + "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", "required": true, "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", - "path": "v1beta1/{+name}", - "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion." + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", + "path": "v1beta1/{+parent}/cryptoKeys", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", + "request": { + "$ref": "CryptoKey" + }, + "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose is required." }, - "testIamPermissions": { + "updatePrimaryVersion": { + "path": "v1beta1/{+name}:updatePrimaryVersion", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", "request": { - "$ref": "TestIamPermissionsRequest" + "$ref": "UpdateCryptoKeyPrimaryVersionRequest" }, - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], + "description": "Update the version of a CryptoKey that will be used in Encrypt", "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" - } + "$ref": "CryptoKey" }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", - "path": "v1beta1/{+resource}:testIamPermissions" - }, - "decrypt": { - "httpMethod": "POST", "parameterOrder": [ "name" ], - "response": { - "$ref": "DecryptResponse" - }, + "httpMethod": "POST", "parameters": { "name": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path", - "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", + "description": "The resource name of the CryptoKey to update.", "required": true, - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", - "path": "v1beta1/{+name}:decrypt", - "request": { - "$ref": "DecryptRequest" - }, - "description": "Decrypt data that was protected by Encrypt." + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion" }, - "list": { + "getIamPolicy": { + "httpMethod": "GET", "response": { - "$ref": "ListCryptoKeysResponse" + "$ref": "Policy" }, "parameterOrder": [ - "parent" + "resource" ], - "httpMethod": "GET", "parameters": { - "parent": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path", - "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "type": "string" - }, - "pageToken": { - "location": "query", - "description": "Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", - "type": "string" - }, - "pageSize": { - "description": "Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - "format": "int32", - "type": "integer", - "location": "query" + "type": "string", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - "path": "v1beta1/{+parent}/cryptoKeys", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.list", - "description": "Lists CryptoKeys." - } - } - } - }, - "methods": { - "list": { - "response": { - "$ref": "ListKeyRingsResponse" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "parameters": { - "pageToken": { - "description": "Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "parent": { - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings", - "path": "v1beta1/{+parent}/keyRings", - "id": "cloudkms.projects.locations.keyRings.list", - "description": "Lists KeyRings." - }, - "setIamPolicy": { - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", - "id": "cloudkms.projects.locations.keyRings.setIamPolicy", - "path": "v1beta1/{+resource}:setIamPolicy", - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - } - }, - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "KeyRing" - }, - "parameters": { - "parent": { - "pattern": "^projects/[^/]+/locations/[^/]+$", - "location": "path", - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - "required": true, - "type": "string" + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", + "path": "v1beta1/{+resource}:getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." }, - "keyRingId": { - "location": "query", - "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings", - "id": "cloudkms.projects.locations.keyRings.create", - "path": "v1beta1/{+parent}/keyRings", - "request": { - "$ref": "KeyRing" - }, - "description": "Create a new KeyRing in a given Project and Location." - }, - "getIamPolicy": { - "httpMethod": "GET", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy", - "id": "cloudkms.projects.locations.keyRings.getIamPolicy", - "path": "v1beta1/{+resource}:getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." - }, - "get": { - "description": "Returns metadata for a given KeyRing.", - "httpMethod": "GET", - "response": { - "$ref": "KeyRing" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path", - "description": "The name of the KeyRing to get.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}", - "id": "cloudkms.projects.locations.keyRings.get", - "path": "v1beta1/{+name}" - }, - "testIamPermissions": { - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameters": { - "resource": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path", - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string" + "get": { + "httpMethod": "GET", + "response": { + "$ref": "CryptoKey" + }, + "parameterOrder": [ + "name" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the CryptoKey to get.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", + "path": "v1beta1/{+name}", + "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion." + }, + "patch": { + "description": "Update a CryptoKey.", + "request": { + "$ref": "CryptoKey" + }, + "response": { + "$ref": "CryptoKey" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", + "location": "path", + "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required list of fields to be updated in this request.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", + "path": "v1beta1/{+name}", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.patch" } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", - "id": "cloudkms.projects.locations.keyRings.testIamPermissions", - "path": "v1beta1/{+resource}:testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." + } } } } - }, - "methods": { - "list": { - "path": "v1beta1/{+name}/locations", - "id": "cloudkms.projects.locations.list", - "description": "Lists information about the supported locations for this service.", - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "response": { - "$ref": "ListLocationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The resource that owns the locations collection, if applicable.", - "required": true, - "type": "string" - }, - "pageToken": { - "description": "The standard list page token.", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "The standard list page size.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "filter": { - "location": "query", - "description": "The standard list filter.", - "type": "string" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/locations" - }, - "get": { - "description": "Get information about a location.", - "response": { - "$ref": "Location" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/locations/[^/]+$", - "location": "path", - "description": "Resource name for the location.", - "required": true, - "type": "string" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}", - "path": "v1beta1/{+name}", - "id": "cloudkms.projects.locations.get" - } } } - } - } - }, - "parameters": { - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + } + } + }, + "parameters": { + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + } + }, + "version": "v1beta1", + "baseUrl": "https://cloudkms.googleapis.com/", + "kind": "discovery#restDescription", + "description": "Manages encryption for your cloud services the same way you do on-premise. You can generate, use, rotate, and destroy AES256 encryption keys.", + "servicePath": "", + "basePath": "", + "documentationLink": "https://cloud.google.com/kms/", + "revision": "20170216", + "id": "cloudkms:v1beta1", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "UpdateCryptoKeyPrimaryVersionRequest": { + "description": "Request message for KeyManagementService.UpdateCryptoKeyPrimaryVersion.", + "type": "object", + "properties": { + "cryptoKeyVersionId": { + "description": "The id of the child CryptoKeyVersion to use as primary.", + "type": "string" + } + }, + "id": "UpdateCryptoKeyPrimaryVersionRequest" + }, + "RestoreCryptoKeyVersionRequest": { + "description": "Request message for KeyManagementService.RestoreCryptoKeyVersion.", + "type": "object", + "properties": {}, + "id": "RestoreCryptoKeyVersionRequest" + }, + "DataAccessOptions": { + "description": "Write a Data Access (Gin) log", + "type": "object", + "properties": {}, + "id": "DataAccessOptions" + }, + "ListKeyRingsResponse": { + "description": "Response message for KeyManagementService.ListKeyRings.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "A token to retrieve next page of results. Pass this value in\nListKeyRingsRequest.page_token to retrieve the next page of results.", + "type": "string" + }, + "totalSize": { + "description": "The total number of KeyRings that matched the query.", + "format": "int32", + "type": "integer" + }, + "keyRings": { + "description": "The list of KeyRings.", + "type": "array", + "items": { + "$ref": "KeyRing" + } + } + }, + "id": "ListKeyRingsResponse" + }, + "AuditConfig": { + "description": "Specifies the audit configuration for a service.\nIt consists of which permission types are logged, and what identities, if\nany, are exempted from logging.\nAn AuditConifg must have one or more AuditLogConfigs.", + "type": "object", + "properties": { + "service": { + "description": "Specifies a service that will be enabled for audit logging.\nFor example, `resourcemanager`, `storage`, `compute`.\n`allServices` is a special value that covers all services.", + "type": "string" + }, + "auditLogConfigs": { + "description": "The configuration for logging of each type of permission.\nNext ID: 4", + "type": "array", + "items": { + "$ref": "AuditLogConfig" + } + }, + "exemptedMembers": { + "description": "Specifies the identities that are exempted from \"data access\" audit\nlogging for the `service` specified above.\nFollows the same format of Binding.members.\nThis field is deprecated in favor of per-permission-type exemptions.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "AuditConfig" + }, + "CryptoKeyVersion": { + "properties": { + "state": { + "enumDescriptions": [ + "Not specified.", + "This version may be used in Encrypt and\nDecrypt requests.", + "This version may not be used, but the key material is still available,\nand the version can be placed back into the ENABLED state.", + "This version is destroyed, and the key material is no longer stored.\nA version may not leave this state once entered.", + "This version is scheduled for destruction, and will be destroyed soon.\nCall\nRestoreCryptoKeyVersion\nto put it back into the DISABLED state." + ], + "enum": [ + "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", + "ENABLED", + "DISABLED", + "DESTROYED", + "DESTROY_SCHEDULED" + ], + "description": "The current state of the CryptoKeyVersion.", + "type": "string" + }, + "name": { + "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", + "type": "string" + }, + "destroyEventTime": { + "description": "Output only. The time this CryptoKeyVersion's key material was\ndestroyed. Only present if state is\nDESTROYED.", + "format": "google-datetime", + "type": "string" + }, + "destroyTime": { + "description": "Output only. The time this CryptoKeyVersion's key material is scheduled\nfor destruction. Only present if state is\nDESTROY_SCHEDULED.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Output only. The time at which this CryptoKeyVersion was created.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "CryptoKeyVersion", + "description": "A CryptoKeyVersion represents an individual cryptographic key, and the\nassociated key material.\n\nIt can be used for cryptographic operations either directly, or via its\nparent CryptoKey, in which case the server will choose the appropriate\nversion for the operation.", + "type": "object" + }, + "CloudAuditOptions": { + "description": "Write a Cloud Audit log", + "type": "object", + "properties": {}, + "id": "CloudAuditOptions" + }, + "Binding": { + "properties": { + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "type": "array", + "items": { + "type": "string" + } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + } + }, + "id": "Binding", + "description": "Associates `members` with a `role`.", + "type": "object" + }, + "EncryptRequest": { + "description": "Request message for KeyManagementService.Encrypt.", + "type": "object", + "properties": { + "plaintext": { + "description": "Required. The data to encrypt. Must be no larger than 64KiB.", + "format": "byte", + "type": "string" + }, + "additionalAuthenticatedData": { + "description": "Optional data that, if specified, must also be provided during decryption\nthrough DecryptRequest.additional_authenticated_data. Must be no\nlarger than 64KiB.", + "format": "byte", + "type": "string" + } + }, + "id": "EncryptRequest" + }, + "ListCryptoKeyVersionsResponse": { + "properties": { + "cryptoKeyVersions": { + "description": "The list of CryptoKeyVersions.", + "type": "array", + "items": { + "$ref": "CryptoKeyVersion" + } + }, + "nextPageToken": { + "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeyVersionsRequest.page_token to retrieve the next page of\nresults.", + "type": "string" + }, + "totalSize": { + "description": "The total number of CryptoKeyVersions that matched the\nquery.", + "format": "int32", + "type": "integer" + } + }, + "id": "ListCryptoKeyVersionsResponse", + "description": "Response message for KeyManagementService.ListCryptoKeyVersions.", + "type": "object" + }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse" + }, + "DestroyCryptoKeyVersionRequest": { + "properties": {}, + "id": "DestroyCryptoKeyVersionRequest", + "description": "Request message for KeyManagementService.DestroyCryptoKeyVersion.", + "type": "object" + }, + "CryptoKey": { + "description": "A CryptoKey represents a logical key that can be used for cryptographic\noperations.\n\nA CryptoKey is made up of one or more versions, which\nrepresent the actual key material used in cryptographic operations.", + "type": "object", + "properties": { + "purpose": { + "enumDescriptions": [ + "Not specified.", + "CryptoKeys with this purpose may be used with\nEncrypt and\nDecrypt." + ], + "enum": [ + "CRYPTO_KEY_PURPOSE_UNSPECIFIED", + "ENCRYPT_DECRYPT" + ], + "description": "The immutable purpose of this CryptoKey. Currently, the only acceptable\npurpose is ENCRYPT_DECRYPT.", + "type": "string" + }, + "nextRotationTime": { + "description": "At next_rotation_time, the Key Management Service will automatically:\n\n1. Create a new version of this CryptoKey.\n2. Mark the new version as primary.\n\nKey rotations performed manually via\nCreateCryptoKeyVersion and\nUpdateCryptoKeyPrimaryVersion\ndo not affect next_rotation_time.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Output only. The time at which this CryptoKey was created.", + "format": "google-datetime", + "type": "string" + }, + "rotationPeriod": { + "description": "next_rotation_time will be advanced by this period when the service\nautomatically rotates a key. Must be at least one day.\n\nIf rotation_period is set, next_rotation_time must also be set.", + "format": "google-duration", + "type": "string" + }, + "primary": { + "description": "Output only. A copy of the \"primary\" CryptoKeyVersion that will be used\nby Encrypt when this CryptoKey is given\nin EncryptRequest.name.\n\nThe CryptoKey's primary version can be updated via\nUpdateCryptoKeyPrimaryVersion.", + "$ref": "CryptoKeyVersion" + }, + "name": { + "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "type": "string" + } + }, + "id": "CryptoKey" + }, + "Rule": { + "properties": { + "logConfig": { + "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries\nthat match the LOG action.", + "type": "array", + "items": { + "$ref": "LogConfig" + } + }, + "in": { + "description": "If one or more 'in' clauses are specified, the rule matches if\nthe PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", + "type": "array", + "items": { + "type": "string" + } + }, + "permissions": { + "description": "A permission is a string of form '\u003cservice\u003e.\u003cresource type\u003e.\u003cverb\u003e'\n(e.g., 'storage.buckets.list'). A value of '*' matches all permissions,\nand a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", + "type": "array", + "items": { + "type": "string" + } + }, + "action": { + "enum": [ + "NO_ACTION", + "ALLOW", + "ALLOW_WITH_LOG", + "DENY", + "DENY_WITH_LOG", + "LOG" + ], + "description": "Required", + "type": "string", + "enumDescriptions": [ + "Default no action.", + "Matching 'Entries' grant access.", + "Matching 'Entries' grant access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' deny access.", + "Matching 'Entries' deny access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' tell IAM.Check callers to generate logs." + ] + }, + "notIn": { + "description": "If one or more 'not_in' clauses are specified, the rule matches\nif the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.\nThe format for in and not_in entries is the same as for members in a\nBinding (see google/iam/v1/policy.proto).", + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "description": "Human-readable description of the rule.", + "type": "string" + }, + "conditions": { + "description": "Additional restrictions that must be met", + "type": "array", + "items": { + "$ref": "Condition" + } + } + }, + "id": "Rule", + "description": "A rule to be applied in a Policy.", + "type": "object" }, - "prettyPrint": { - "location": "query", - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true" + "LogConfig": { + "properties": { + "counter": { + "$ref": "CounterOptions", + "description": "Counter options." + }, + "dataAccess": { + "$ref": "DataAccessOptions", + "description": "Data access options." + }, + "cloudAudit": { + "$ref": "CloudAuditOptions", + "description": "Cloud audit options." + } + }, + "id": "LogConfig", + "description": "Specifies what kind of log the caller must write\nIncrement a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only,\nand end in \"_count\". Field names should not contain an initial slash.\nThe actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are\ntheir respective values.\n\nAt present the only supported field names are\n - \"iam_principal\", corresponding to IAMContext.principal;\n - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples:\n counter { metric: \"/debug_access_count\" field: \"iam_principal\" }\n ==\u003e increment counter /iam/policy/backend_debug_access_count\n {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support:\n* multiple field names (though this may be supported in the future)\n* decrementing the counter\n* incrementing it by anything other than 1", + "type": "object" }, - "fields": { - "location": "query", - "description": "Selector specifying which fields to include in a partial response.", - "type": "string" + "SetIamPolicyRequest": { + "properties": { + "policy": { + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", + "$ref": "Policy" + }, + "updateMask": { + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, a default\nmask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", + "format": "google-fieldmask", + "type": "string" + } + }, + "id": "SetIamPolicyRequest", + "description": "Request message for `SetIamPolicy` method.", + "type": "object" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" + "DecryptRequest": { + "description": "Request message for KeyManagementService.Decrypt.", + "type": "object", + "properties": { + "ciphertext": { + "description": "Required. The encrypted data originally returned in\nEncryptResponse.ciphertext.", + "format": "byte", + "type": "string" + }, + "additionalAuthenticatedData": { + "description": "Optional data that must match the data originally supplied in\nEncryptRequest.additional_authenticated_data.", + "format": "byte", + "type": "string" + } + }, + "id": "DecryptRequest" }, - "callback": { - "location": "query", - "description": "JSONP", - "type": "string" + "Location": { + "description": "A resource that represents Google Cloud Platform location.", + "type": "object", + "properties": { + "name": { + "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", + "type": "string" + }, + "locationId": { + "description": "The canonical id for this location. For example: `\"us-east1\"`.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", + "type": "object" + } + }, + "id": "Location" + }, + "ListCryptoKeysResponse": { + "properties": { + "nextPageToken": { + "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeysRequest.page_token to retrieve the next page of results.", + "type": "string" + }, + "cryptoKeys": { + "description": "The list of CryptoKeys.", + "type": "array", + "items": { + "$ref": "CryptoKey" + } + }, + "totalSize": { + "description": "The total number of CryptoKeys that matched the query.", + "format": "int32", + "type": "integer" + } + }, + "id": "ListCryptoKeysResponse", + "description": "Response message for KeyManagementService.ListCryptoKeys.", + "type": "object" + }, + "Condition": { + "description": "A condition to be met.", + "type": "object", + "properties": { + "sys": { + "enumDescriptions": [ + "Default non-attribute type", + "Region of the resource", + "Service name", + "Resource name", + "IP address of the caller" + ], + "enum": [ + "NO_ATTR", + "REGION", + "SERVICE", + "NAME", + "IP" + ], + "description": "Trusted attributes supplied by any service that owns resources and uses\nthe IAM system for access control.", + "type": "string" + }, + "value": { + "description": "DEPRECATED. Use 'values' instead.", + "type": "string" + }, + "values": { + "description": "The objects of the condition. This is mutually exclusive with 'value'.", + "type": "array", + "items": { + "type": "string" + } + }, + "iam": { + "enum": [ + "NO_ATTR", + "AUTHORITY", + "ATTRIBUTION", + "APPROVER" + ], + "description": "Trusted attributes supplied by the IAM system.", + "type": "string", + "enumDescriptions": [ + "Default non-attribute.", + "Either principal or (if present) authority selector.", + "The principal (even if an authority selector is present), which\nmust only be used for attribution, not authorization.", + "An approver (distinct from the requester) that has authorized this\nrequest.\nWhen used with IN, the condition indicates that one of the approvers\nassociated with the request matches the specified principal, or is a\nmember of the specified group. Approvers can only grant additional\naccess, and are thus only used in a strictly positive context\n(e.g. ALLOW/IN or DENY/NOT_IN).\nSee: go/rpc-security-policy-dynamicauth." + ] + }, + "op": { + "enumDescriptions": [ + "Default no-op.", + "DEPRECATED. Use IN instead.", + "DEPRECATED. Use NOT_IN instead.", + "Set-inclusion check.", + "Set-exclusion check.", + "Subject is discharged" + ], + "enum": [ + "NO_OP", + "EQUALS", + "NOT_EQUALS", + "IN", + "NOT_IN", + "DISCHARGED" + ], + "description": "An operator to apply the subject with.", + "type": "string" + }, + "svc": { + "description": "Trusted attributes discharged by the service.", + "type": "string" + } + }, + "id": "Condition" }, - "$.xgafv": { - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" + "CounterOptions": { + "description": "Options for counters", + "type": "object", + "properties": { + "metric": { + "description": "The metric to update.", + "type": "string" + }, + "field": { + "description": "The field value to attribute.", + "type": "string" + } + }, + "id": "CounterOptions" }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" + "AuditLogConfig": { + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", + "type": "object", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "type": "array", + "items": { + "type": "string" + } + }, + "logType": { + "enumDescriptions": [ + "Default case. Should never be this.", + "Admin reads. Example: CloudIAM getIamPolicy", + "Data writes. Example: CloudSQL Users create", + "Data reads. Example: CloudSQL Users list" + ], + "enum": [ + "LOG_TYPE_UNSPECIFIED", + "ADMIN_READ", + "DATA_WRITE", + "DATA_READ" + ], + "description": "The log type that this config enables.", + "type": "string" + } + }, + "id": "AuditLogConfig" }, - "access_token": { - "location": "query", - "description": "OAuth access token.", - "type": "string" + "DecryptResponse": { + "properties": { + "plaintext": { + "description": "The decrypted data originally supplied in EncryptRequest.plaintext.", + "format": "byte", + "type": "string" + } + }, + "id": "DecryptResponse", + "description": "Response message for KeyManagementService.Decrypt.", + "type": "object" }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" + "TestIamPermissionsRequest": { + "properties": { + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsRequest", + "description": "Request message for `TestIamPermissions` method.", + "type": "object" }, - "quotaUser": { - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "type": "object", + "properties": { + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + }, + "iamOwned": { + "type": "boolean" + }, + "rules": { + "description": "If more than one rule is specified, the rules are applied in the following\nmanner:\n- All matching LOG rules are always applied.\n- If any DENY/DENY_WITH_LOG rule matches, permission is denied.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is\n granted.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if no rule applies, permission is denied.", + "type": "array", + "items": { + "$ref": "Rule" + } + }, + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", + "type": "array", + "items": { + "$ref": "AuditConfig" + } + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } + } + }, + "id": "Policy" }, - "pp": { - "location": "query", - "description": "Pretty-print response.", - "type": "boolean", - "default": "true" + "EncryptResponse": { + "description": "Response message for KeyManagementService.Encrypt.", + "type": "object", + "properties": { + "ciphertext": { + "description": "The encrypted data.", + "format": "byte", + "type": "string" + }, + "name": { + "description": "The resource name of the CryptoKeyVersion used in encryption.", + "type": "string" + } + }, + "id": "EncryptResponse" }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" + "KeyRing": { + "description": "A KeyRing is a toplevel logical grouping of CryptoKeys.", + "type": "object", + "properties": { + "name": { + "description": "Output only. The resource name for the KeyRing in the format\n`projects/*/locations/*/keyRings/*`.", + "type": "string" + }, + "createTime": { + "description": "Output only. The time at which this KeyRing was created.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "KeyRing" }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" + "ListLocationsResponse": { + "description": "The response message for Locations.ListLocations.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "locations": { + "description": "A list of locations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Location" + } + } + }, + "id": "ListLocationsResponse" } }, - "version": "v1beta1", - "baseUrl": "https://cloudkms.googleapis.com/", - "servicePath": "", - "description": "", - "kind": "discovery#restDescription", - "basePath": "", - "id": "cloudkms:v1beta1", - "documentationLink": "https://cloud.google.com/kms/", - "revision": "20170117" + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "canonicalName": "Cloud KMS", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://cloudkms.googleapis.com/", + "ownerDomain": "google.com", + "name": "cloudkms", + "batchPath": "batch", + "title": "Google Cloud Key Management Service (KMS) API" } diff --git a/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-gen.go b/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-gen.go index a1374fbf6..7d54e6eaf 100644 --- a/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-gen.go +++ b/vendor/google.golang.org/api/cloudkms/v1beta1/cloudkms-gen.go @@ -1,4 +1,4 @@ -// Package cloudkms provides access to the Google Cloud KMS API. +// Package cloudkms provides access to the Google Cloud Key Management Service (KMS) API. // // See https://cloud.google.com/kms/ // @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Locations = NewProjectsLocationsService(s) @@ -320,6 +325,18 @@ type Condition struct { // "ATTRIBUTION" - The principal (even if an authority selector is // present), which // must only be used for attribution, not authorization. + // "APPROVER" - An approver (distinct from the requester) that has + // authorized this + // request. + // When used with IN, the condition indicates that one of the + // approvers + // associated with the request matches the specified principal, or is + // a + // member of the specified group. Approvers can only grant + // additional + // access, and are thus only used in a strictly positive context + // (e.g. ALLOW/IN or DENY/NOT_IN). + // See: go/rpc-security-policy-dynamicauth. Iam string `json:"iam,omitempty"` // Op: An operator to apply the subject with. @@ -1428,6 +1445,7 @@ func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1589,6 +1607,7 @@ func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1764,6 +1783,7 @@ func (c *ProjectsLocationsKeyRingsCreateCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.keyring) if err != nil { @@ -1913,6 +1933,7 @@ func (c *ProjectsLocationsKeyRingsGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2055,6 +2076,7 @@ func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2215,6 +2237,7 @@ func (c *ProjectsLocationsKeyRingsListCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2379,6 +2402,7 @@ func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -2480,6 +2504,12 @@ type ProjectsLocationsKeyRingsTestIamPermissionsCall struct { // If the resource does not exist, this will return an empty set // of // permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware +// UIs and command-line tools, not for authorization checking. This +// operation +// may "fail open" without warning. func (r *ProjectsLocationsKeyRingsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsTestIamPermissionsCall { c := &ProjectsLocationsKeyRingsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2518,6 +2548,7 @@ func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) doRequest(alt string) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -2573,7 +2604,7 @@ func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.testIamPermissions", @@ -2663,6 +2694,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) doRequest(alt string) (* reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokey) if err != nil { @@ -2803,6 +2835,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) doRequest(alt string) ( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.decryptrequest) if err != nil { @@ -2939,6 +2972,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) doRequest(alt string) ( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.encryptrequest) if err != nil { @@ -3084,6 +3118,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3226,6 +3261,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) doRequest(alt stri reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3387,6 +3423,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3556,6 +3593,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) doRequest(alt string) (*h reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokey) if err != nil { @@ -3699,6 +3737,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) doRequest(alt stri reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -3800,6 +3839,12 @@ type ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall struct { // If the resource does not exist, this will return an empty set // of // permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware +// UIs and command-line tools, not for authorization checking. This +// operation +// may "fail open" without warning. func (r *ProjectsLocationsKeyRingsCryptoKeysService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { c := &ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3838,6 +3883,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) doRequest(al reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -3893,7 +3939,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Do(opts ...g } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", @@ -3974,6 +4020,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) doRequest( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatecryptokeyprimaryversionrequest) if err != nil { @@ -4113,6 +4160,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) doReque reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokeyversion) if err != nil { @@ -4260,6 +4308,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) doRequ reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.destroycryptokeyversionrequest) if err != nil { @@ -4404,6 +4453,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) doRequest( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4565,6 +4615,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) doRequest reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4741,6 +4792,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) doReques reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokeyversion) if err != nil { @@ -4889,6 +4941,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) doRequ reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.restorecryptokeyversionrequest) if err != nil { diff --git a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go index 57208fccd..0298195ae 100644 --- a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go +++ b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only MetricDescriptors *MetricDescriptorsService @@ -85,6 +86,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewMetricDescriptorsService(s *Service) *MetricDescriptorsService { rs := &MetricDescriptorsService{s: s} return rs @@ -1014,6 +1019,7 @@ func (c *MetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) if err != nil { @@ -1148,6 +1154,7 @@ func (c *MetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors/{metric}") @@ -1325,6 +1332,7 @@ func (c *MetricDescriptorsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1602,6 +1610,7 @@ func (c *TimeseriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1832,6 +1841,7 @@ func (c *TimeseriesWriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.writetimeseriesrequest) if err != nil { @@ -2072,6 +2082,7 @@ func (c *TimeseriesDescriptorsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index ea95815f0..639051f54 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -1,448 +1,521 @@ { + "batchPath": "batch", "id": "cloudresourcemanager:v1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" - } - } - } - }, - "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", - "protocol": "rest", + "documentationLink": "https://cloud.google.com/resource-manager", + "revision": "20170221", "title": "Google Cloud Resource Manager API", + "discoveryVersion": "v1", + "ownerName": "Google", "resources": { - "organizations": { + "projects": { "methods": { + "undelete": { + "httpMethod": "POST", + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "projectId": { + "description": "The project ID (for example, `foo-bar-123`).\n\nRequired.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}:undelete", + "id": "cloudresourcemanager.projects.undelete", + "path": "v1/projects/{projectId}:undelete", + "request": { + "$ref": "UndeleteProjectRequest" + }, + "description": "Restores the Project identified by the specified\n`project_id` (for example, `my-project-123`).\nYou can only use this method for a Project that has a lifecycle state of\nDELETE_REQUESTED.\nAfter deletion starts, the Project cannot be restored.\n\nThe caller must have modify permissions for this Project." + }, "get": { - "id": "cloudresourcemanager.organizations.get", "response": { - "$ref": "Organization" + "$ref": "Project" }, "parameterOrder": [ - "name" + "projectId" ], - "description": "Fetches an Organization resource identified by the specified resource name.", - "flatPath": "v1/organizations/{organizationsId}", "httpMethod": "GET", "parameters": { - "name": { - "description": "The resource name of the Organization to fetch, e.g. \"organizations/1234\".", - "required": true, - "pattern": "^organizations/[^/]+$", + "projectId": { "location": "path", + "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "required": true, "type": "string" } }, - "path": "v1/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + ], + "flatPath": "v1/projects/{projectId}", + "path": "v1/projects/{projectId}", + "id": "cloudresourcemanager.projects.get", + "description": "Retrieves the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project." }, - "testIamPermissions": { - "id": "cloudresourcemanager.organizations.testIamPermissions", + "getAncestry": { + "httpMethod": "POST", + "parameterOrder": [ + "projectId" + ], "response": { - "$ref": "TestIamPermissionsResponse" + "$ref": "GetAncestryResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "projectId": { + "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}:getAncestry", + "id": "cloudresourcemanager.projects.getAncestry", + "path": "v1/projects/{projectId}:getAncestry", + "description": "Gets a list of ancestors in the resource hierarchy for the Project\nidentified by the specified `project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", + "request": { + "$ref": "GetAncestryRequest" + } + }, + "update": { + "request": { + "$ref": "Project" + }, + "description": "Updates the attributes of the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have modify permissions for this Project.", + "response": { + "$ref": "Project" }, "parameterOrder": [ - "resource" + "projectId" ], - "description": "Returns permissions that a caller has on the specified Organization.\nThe `resource` field should be the organization's resource name,\ne.g. \"organizations/123\".", + "httpMethod": "PUT", + "parameters": { + "projectId": { + "description": "The project ID (for example, `my-project-123`).\n\nRequired.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}", + "path": "v1/projects/{projectId}", + "id": "cloudresourcemanager.projects.update" + }, + "testIamPermissions": { + "flatPath": "v1/projects/{resource}:testIamPermissions", + "id": "cloudresourcemanager.projects.testIamPermissions", + "path": "v1/projects/{resource}:testIamPermissions", + "description": "Returns permissions that a caller has on the specified Project.", "request": { "$ref": "TestIamPermissionsRequest" }, - "flatPath": "v1/organizations/{organizationsId}:testIamPermissions", "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], "parameters": { "resource": { "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "location": "path" } + } + }, + "delete": { + "flatPath": "v1/projects/{projectId}", + "path": "v1/projects/{projectId}", + "id": "cloudresourcemanager.projects.delete", + "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + "response": { + "$ref": "Empty" }, - "path": "v1/{+resource}:testIamPermissions", + "parameterOrder": [ + "projectId" + ], + "httpMethod": "DELETE", "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "description": "The Project ID (for example, `foo-bar-123`).\n\nRequired.", + "required": true, + "type": "string", + "location": "path" + } + } }, - "search": { - "id": "cloudresourcemanager.organizations.search", + "list": { + "description": "Lists Projects that are visible to the user and satisfy the\nspecified filter. This method returns Projects in an unspecified order.\nNew Projects do not necessarily appear at the end of the list.", "response": { - "$ref": "SearchOrganizationsResponse" + "$ref": "ListProjectsResponse" }, "parameterOrder": [], - "description": "Searches Organization resources that are visible to the user and satisfy\nthe specified filter. This method returns Organizations in an unspecified\norder. New Organizations do not necessarily appear at the end of the\nresults.", + "httpMethod": "GET", + "parameters": { + "filter": { + "location": "query", + "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ \u003ccode\u003elabels.\u003cem\u003ekey\u003c/em\u003e\u003c/code\u003e where *key* is the name of a label\n\nSome examples of using labels as filters:\n\n|Filter|Description|\n|------|-----------|\n|name:*|The project has a name.|\n|name:Howl|The project's name is `Howl` or `howl`.|\n|name:HOWL|Equivalent to above.|\n|NAME:howl|Equivalent to above.|\n|labels.color:*|The project has the label `color`.|\n|labels.color:red|The project's label `color` has the value `red`.|\n|labels.color:red label.size:big|The project's label `color` has the\nvalue `red` and its label `size` has the value `big`.\n\nOptional.", + "type": "string" + }, + "pageToken": { + "description": "A pagination token returned from a previous call to ListProjects\nthat indicates from where listing should continue.\n\nOptional.", + "type": "string", + "location": "query" + }, + "pageSize": { + "description": "The maximum number of Projects to return in the response.\nThe server can return fewer Projects than requested.\nIf unspecified, server picks an appropriate default.\n\nOptional.", + "format": "int32", + "type": "integer", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1/projects", + "path": "v1/projects", + "id": "cloudresourcemanager.projects.list" + }, + "create": { "request": { - "$ref": "SearchOrganizationsRequest" + "$ref": "Project" }, - "flatPath": "v1/organizations:search", + "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. It is automatically deleted\nafter a few hours, so there is no need to call DeleteOperation.\n\nOur SLO permits Project creation to take up to 30 seconds at the 90th\npercentile. As of 2016-08-29, we are observing 6 seconds 50th percentile\nlatency. 95th percentile latency is around 11 seconds. We recommend\npolling at the 5th second with an exponential backoff.", "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "Operation" + }, "parameters": {}, - "path": "v1/organizations:search", "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects", + "id": "cloudresourcemanager.projects.create", + "path": "v1/projects" }, "setIamPolicy": { - "id": "cloudresourcemanager.organizations.setIamPolicy", + "flatPath": "v1/projects/{resource}:setIamPolicy", + "path": "v1/projects/{resource}:setIamPolicy", + "id": "cloudresourcemanager.projects.setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the IAM access control policy for the specified Project. Replaces\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted only to `user` and `serviceAccount`.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ Invitations to grant the owner role cannot be sent using\n`setIamPolicy()`;\nthey must be sent only using the Cloud Platform Console.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ There must be at least one owner who has accepted the Terms of\nService (ToS) agreement in the policy. Calling `setIamPolicy()` to\nto remove the last ToS-accepted owner from the policy will fail. This\nrestriction also applies to legacy projects that no longer have owners\nwho have accepted the ToS. Edits to IAM policies will be rejected until\nthe lack of a ToS-accepting owner is rectified.\n\n+ Calling this method requires enabling the App Engine Admin API.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.", "response": { "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Sets the access control policy on an Organization resource. Replaces any\nexisting policy. The `resource` field should be the organization's resource\nname, e.g. \"organizations/123\".", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "flatPath": "v1/organizations/{organizationsId}:setIamPolicy", "httpMethod": "POST", "parameters": { "resource": { + "location": "path", "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path", "type": "string" } }, - "path": "v1/{+resource}:setIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "getIamPolicy": { - "id": "cloudresourcemanager.organizations.getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.", "response": { "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Gets the access control policy for an Organization resource. May be empty\nif no such policy or resource exists. The `resource` field should be the\norganization's resource name, e.g. \"organizations/123\".", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "flatPath": "v1/organizations/{organizationsId}:getIamPolicy", "httpMethod": "POST", "parameters": { "resource": { "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/{+resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + ], + "flatPath": "v1/projects/{resource}:getIamPolicy", + "path": "v1/projects/{resource}:getIamPolicy", + "id": "cloudresourcemanager.projects.getIamPolicy" } } }, - "projects": { + "organizations": { "methods": { - "getAncestry": { - "id": "cloudresourcemanager.projects.getAncestry", + "testIamPermissions": { + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that a caller has on the specified Organization.\nThe `resource` field should be the organization's resource name,\ne.g. \"organizations/123\".", "response": { - "$ref": "GetAncestryResponse" + "$ref": "TestIamPermissionsResponse" }, "parameterOrder": [ - "projectId" + "resource" ], - "description": "Gets a list of ancestors in the resource hierarchy for the Project\nidentified by the specified `project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", - "request": { - "$ref": "GetAncestryRequest" - }, - "flatPath": "v1/projects/{projectId}:getAncestry", "httpMethod": "POST", "parameters": { - "projectId": { - "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", - "required": true, + "resource": { "location": "path", - "type": "string" + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$" } }, - "path": "v1/projects/{projectId}:getAncestry", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + ], + "flatPath": "v1/organizations/{organizationsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "cloudresourcemanager.organizations.testIamPermissions" }, "getIamPolicy": { - "id": "cloudresourcemanager.projects.getIamPolicy", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.", + "description": "Gets the access control policy for an Organization resource. May be empty\nif no such policy or resource exists. The `resource` field should be the\norganization's resource name, e.g. \"organizations/123\".", "request": { "$ref": "GetIamPolicyRequest" }, - "flatPath": "v1/projects/{resource}:getIamPolicy", "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "location": "path", - "type": "string" - } + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" }, - "path": "v1/projects/{resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] - }, - "undelete": { - "id": "cloudresourcemanager.projects.undelete", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "projectId" ], - "description": "Restores the Project identified by the specified\n`project_id` (for example, `my-project-123`).\nYou can only use this method for a Project that has a lifecycle state of\nDELETE_REQUESTED.\nAfter deletion starts, the Project cannot be restored.\n\nThe caller must have modify permissions for this Project.", - "request": { - "$ref": "UndeleteProjectRequest" - }, - "flatPath": "v1/projects/{projectId}:undelete", - "httpMethod": "POST", "parameters": { - "projectId": { - "description": "The project ID (for example, `foo-bar-123`).\n\nRequired.", + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "pattern": "^organizations/[^/]+$", + "location": "path" } }, - "path": "v1/projects/{projectId}:undelete", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "flatPath": "v1/organizations/{organizationsId}:getIamPolicy", + "id": "cloudresourcemanager.organizations.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy" }, - "list": { - "id": "cloudresourcemanager.projects.list", + "search": { + "request": { + "$ref": "SearchOrganizationsRequest" + }, + "description": "Searches Organization resources that are visible to the user and satisfy\nthe specified filter. This method returns Organizations in an unspecified\norder. New Organizations do not necessarily appear at the end of the\nresults.", "response": { - "$ref": "ListProjectsResponse" + "$ref": "SearchOrganizationsResponse" }, "parameterOrder": [], - "description": "Lists Projects that are visible to the user and satisfy the\nspecified filter. This method returns Projects in an unspecified order.\nNew Projects do not necessarily appear at the end of the list.", - "flatPath": "v1/projects", - "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "The maximum number of Projects to return in the response.\nThe server can return fewer Projects than requested.\nIf unspecified, server picks an appropriate default.\n\nOptional.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "filter": { - "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ \u003ccode\u003elabels.\u003cem\u003ekey\u003c/em\u003e\u003c/code\u003e where *key* is the name of a label\n\nSome examples of using labels as filters:\n\n|Filter|Description|\n|------|-----------|\n|name:*|The project has a name.|\n|name:Howl|The project's name is `Howl` or `howl`.|\n|name:HOWL|Equivalent to above.|\n|NAME:howl|Equivalent to above.|\n|labels.color:*|The project has the label `color`.|\n|labels.color:red|The project's label `color` has the value `red`.|\n|labels.color:red label.size:big|The project's label `color` has the\nvalue `red` and its label `size` has the value `big`.\n\nOptional.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "A pagination token returned from a previous call to ListProjects\nthat indicates from where listing should continue.\n\nOptional.", - "location": "query", - "type": "string" - } - }, - "path": "v1/projects", + "httpMethod": "POST", + "parameters": {}, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + ], + "flatPath": "v1/organizations:search", + "path": "v1/organizations:search", + "id": "cloudresourcemanager.organizations.search" }, "get": { - "id": "cloudresourcemanager.projects.get", - "response": { - "$ref": "Project" - }, + "httpMethod": "GET", "parameterOrder": [ - "projectId" + "name" ], - "description": "Retrieves the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", - "flatPath": "v1/projects/{projectId}", - "httpMethod": "GET", + "response": { + "$ref": "Organization" + }, "parameters": { - "projectId": { - "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "name": { + "description": "The resource name of the Organization to fetch, e.g. \"organizations/1234\".", "required": true, - "location": "path", - "type": "string" + "type": "string", + "pattern": "^organizations/[^/]+$", + "location": "path" } }, - "path": "v1/projects/{projectId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] - }, - "update": { - "id": "cloudresourcemanager.projects.update", - "response": { - "$ref": "Project" - }, - "parameterOrder": [ - "projectId" ], - "description": "Updates the attributes of the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have modify permissions for this Project.", - "request": { - "$ref": "Project" - }, - "flatPath": "v1/projects/{projectId}", - "httpMethod": "PUT", - "parameters": { - "projectId": { - "description": "The project ID (for example, `my-project-123`).\n\nRequired.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/projects/{projectId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "create": { - "id": "cloudresourcemanager.projects.create", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [], - "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. It is automatically deleted\nafter a few hours, so there is no need to call DeleteOperation.\n\nOur SLO permits Project creation to take up to 30 seconds at the 90th\npercentile. As of 2016-08-29, we are observing 6 seconds 50th percentile\nlatency. 95th percentile latency is around 11 seconds. We recommend\npolling at the 5th second with an exponential backoff.", - "request": { - "$ref": "Project" - }, - "flatPath": "v1/projects", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/projects", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "flatPath": "v1/organizations/{organizationsId}", + "id": "cloudresourcemanager.organizations.get", + "path": "v1/{+name}", + "description": "Fetches an Organization resource identified by the specified resource name." }, "setIamPolicy": { - "id": "cloudresourcemanager.projects.setIamPolicy", "response": { "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Sets the IAM access control policy for the specified Project. Replaces\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted only to `user` and `serviceAccount`.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ Invitations to grant the owner role cannot be sent using\n`setIamPolicy()`;\nthey must be sent only using the Cloud Platform Console.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ There must be at least one owner who has accepted the Terms of\nService (ToS) agreement in the policy. Calling `setIamPolicy()` to\nto remove the last ToS-accepted owner from the policy will fail. This\nrestriction also applies to legacy projects that no longer have owners\nwho have accepted the ToS. Edits to IAM policies will be rejected until\nthe lack of a ToS-accepting owner is rectified.\n\n+ Calling this method requires enabling the App Engine Admin API.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "flatPath": "v1/projects/{resource}:setIamPolicy", "httpMethod": "POST", "parameters": { "resource": { + "location": "path", "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "pattern": "^organizations/[^/]+$" } }, - "path": "v1/projects/{resource}:setIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] - }, + ], + "flatPath": "v1/organizations/{organizationsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "cloudresourcemanager.organizations.setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the access control policy on an Organization resource. Replaces any\nexisting policy. The `resource` field should be the organization's resource\nname, e.g. \"organizations/123\"." + } + } + }, + "liens": { + "methods": { "delete": { - "id": "cloudresourcemanager.projects.delete", + "httpMethod": "DELETE", + "parameterOrder": [ + "name" + ], "response": { "$ref": "Empty" }, - "parameterOrder": [ - "projectId" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" ], - "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", - "flatPath": "v1/projects/{projectId}", - "httpMethod": "DELETE", "parameters": { - "projectId": { - "description": "The Project ID (for example, `foo-bar-123`).\n\nRequired.", - "required": true, + "name": { "location": "path", - "type": "string" + "description": "The name/identifier of the Lien to delete.", + "required": true, + "type": "string", + "pattern": "^liens/.+$" } }, - "path": "v1/projects/{projectId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "flatPath": "v1/liens/{liensId}", + "id": "cloudresourcemanager.liens.delete", + "path": "v1/{+name}", + "description": "Delete a Lien by `name`.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`." }, - "testIamPermissions": { - "id": "cloudresourcemanager.projects.testIamPermissions", + "list": { + "flatPath": "v1/liens", + "id": "cloudresourcemanager.liens.list", + "path": "v1/liens", + "description": "List all Liens applied to the `parent` resource.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.get`.", + "httpMethod": "GET", + "parameterOrder": [], "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "description": "Returns permissions that a caller has on the specified Project.", - "request": { - "$ref": "TestIamPermissionsRequest" + "$ref": "ListLiensResponse" }, - "flatPath": "v1/projects/{resource}:testIamPermissions", - "httpMethod": "POST", "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "location": "path", + "pageToken": { + "location": "query", + "description": "The `next_page_token` value returned from a previous List request, if any.", "type": "string" + }, + "pageSize": { + "location": "query", + "description": "The maximum number of items to return. This is a suggestion for the server.", + "format": "int32", + "type": "integer" + }, + "parent": { + "description": "The name of the resource to list all attached Liens.\nFor example, `projects/1234`.", + "type": "string", + "location": "query" } }, - "path": "v1/projects/{resource}:testIamPermissions", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" ] + }, + "create": { + "response": { + "$ref": "Lien" + }, + "parameterOrder": [], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": {}, + "flatPath": "v1/liens", + "path": "v1/liens", + "id": "cloudresourcemanager.liens.create", + "description": "Create a Lien which applies to the resource denoted by the `parent` field.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, applying to `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`.\n\nNOTE: Some resources may limit the number of Liens which may be applied.", + "request": { + "$ref": "Lien" + } } } }, "operations": { "methods": { "get": { + "flatPath": "v1/operations/{operationsId}", + "path": "v1/{+name}", "id": "cloudresourcemanager.operations.get", + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", "response": { "$ref": "Operation" }, "parameterOrder": [ "name" ], - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "parameters": { "name": { "description": "The name of the operation resource.", "required": true, + "type": "string", "pattern": "^operations/.+$", - "location": "path", - "type": "string" + "location": "path" } }, - "path": "v1/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" @@ -451,66 +524,151 @@ } } }, - "schemas": { - "FolderOperationError": { - "description": "A classification of the Folder Operation error.", + "parameters": { + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + } + }, + "schemas": { + "UndeleteProjectRequest": { + "description": "The request sent to the UndeleteProject\nmethod.", + "type": "object", + "properties": {}, + "id": "UndeleteProjectRequest" + }, + "ProjectCreationStatus": { + "description": "A status object which is used as the `metadata` field for the Operation\nreturned by CreateProject. It provides insight for when significant phases of\nProject creation have completed.", "type": "object", "properties": { - "errorMessageId": { - "description": "The type of operation error experienced.", - "enum": [ - "ERROR_TYPE_UNSPECIFIED", - "FOLDER_HEIGHT_VIOLATION", - "MAX_CHILD_FOLDERS_VIOLATION", - "FOLDER_NAME_UNIQUENESS_VIOLATION", - "RESOURCE_DELETED", - "PARENT_DELETED", - "CYCLE_INTRODUCED_ERROR", - "FOLDER_ALREADY_BEING_MOVED", - "FOLDER_TO_DELETE_NON_EMPTY" - ], - "enumDescriptions": [ - "The error type was unrecognized or unspecified.", - "The attempted action would violate the max folder depth constraint.", - "The attempted action would violate the max child folders constraint.", - "The attempted action would violate the locally-unique folder\ndisplay_name constraint.", - "The resource being moved has been deleted.", - "The resource a folder was being added to has been deleted.", - "The attempted action would introduce cycle in resource path.", - "The attempted action would move a folder that is already being moved.", - "The folder the caller is trying to delete contains active resources." - ], + "ready": { + "description": "True if the project creation process is complete.", + "type": "boolean" + }, + "createTime": { + "description": "Creation time of the project creation workflow.", + "format": "google-datetime", "type": "string" + }, + "gettable": { + "description": "True if the project can be retrieved using GetProject. No other operations\non the project are guaranteed to work until the project creation is\ncomplete.", + "type": "boolean" } }, - "id": "FolderOperationError" + "id": "ProjectCreationStatus" }, - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "GetIamPolicyRequest": { + "description": "Request message for `GetIamPolicy` method.", + "type": "object", + "properties": {}, + "id": "GetIamPolicyRequest" + }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", "type": "object", "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" - }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", "type": "array", "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "type": "string" } - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + } + }, + "id": "TestIamPermissionsResponse" + }, + "OrganizationOwner": { + "description": "The entity that owns an Organization. The lifetime of the Organization and\nall of its descendants are bound to the `OrganizationOwner`. If the\n`OrganizationOwner` is deleted, the Organization and all its descendants will\nbe deleted.", + "type": "object", + "properties": { + "directoryCustomerId": { + "description": "The Google for Work customer id used in the Directory API.", "type": "string" } }, - "id": "Status" + "id": "OrganizationOwner" }, "ListProjectsResponse": { "description": "A page of the response received from the\nListProjects\nmethod.\n\nA paginated response where more pages are available has\n`next_page_token` set. This token can be used in a subsequent request to\nretrieve the next request page.", @@ -530,206 +688,243 @@ }, "id": "ListProjectsResponse" }, - "UndeleteProjectRequest": { - "description": "The request sent to the UndeleteProject\nmethod.", - "type": "object", - "properties": {}, - "id": "UndeleteProjectRequest" - }, - "FolderOperation": { - "description": "Metadata describing a long running folder operation", + "Project": { + "description": "A Project is a high-level Google Cloud Platform entity. It is a\ncontainer for ACLs, APIs, App Engine Apps, VMs, and other\nGoogle Cloud Platform resources.", "type": "object", "properties": { - "displayName": { - "description": "The display name of the folder.", + "labels": { + "description": "The labels associated with this Project.\n\nLabel keys must be between 1 and 63 characters long and must conform\nto the following regular expression: \\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?.\n\nLabel values must be between 0 and 63 characters long and must conform\nto the regular expression (\\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?)?.\n\nNo more than 256 labels can be associated with a given resource.\n\nClients should store labels in a representation such as JSON that does not\ndepend on specific characters being disallowed.\n\nExample: \u003ccode\u003e\"environment\" : \"dev\"\u003c/code\u003e\nRead-write.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "createTime": { + "description": "Creation time.\n\nRead-only.", + "format": "google-datetime", "type": "string" }, - "sourceParent": { - "description": "The resource name of the folder's parent.\nOnly applicable when the operation_type is MOVE.", + "name": { + "description": "The user-assigned display name of the Project.\nIt must be 4 to 30 characters.\nAllowed characters are: lowercase and uppercase letters, numbers,\nhyphen, single-quote, double-quote, space, and exclamation point.\n\nExample: \u003ccode\u003eMy Project\u003c/code\u003e\nRead-write.", "type": "string" }, - "destinationParent": { - "description": "The resource name of the folder or organization we are either creating\nthe folder under or moving the folder to.", + "projectId": { + "description": "The unique, user-assigned ID of the Project.\nIt must be 6 to 30 lowercase letters, digits, or hyphens.\nIt must start with a letter.\nTrailing hyphens are prohibited.\n\nExample: \u003ccode\u003etokyo-rain-123\u003c/code\u003e\nRead-only after creation.", "type": "string" }, - "operationType": { - "description": "The type of this operation.", - "enum": [ - "OPERATION_TYPE_UNSPECIFIED", - "CREATE", - "MOVE" + "lifecycleState": { + "description": "The Project lifecycle state.\n\nRead-only.", + "type": "string", + "enumDescriptions": [ + "Unspecified state. This is only used/useful for distinguishing\nunset values.", + "The normal and active state.", + "The project has been marked for deletion by the user\n(by invoking\nDeleteProject)\nor by the system (Google Cloud Platform).\nThis can generally be reversed by invoking UndeleteProject.", + "This lifecycle state is no longer used and not returned by the API." ], + "enum": [ + "LIFECYCLE_STATE_UNSPECIFIED", + "ACTIVE", + "DELETE_REQUESTED", + "DELETE_IN_PROGRESS" + ] + }, + "projectNumber": { + "description": "The number uniquely identifying the project.\n\nExample: \u003ccode\u003e415104041262\u003c/code\u003e\nRead-only.", + "format": "int64", + "type": "string" + }, + "parent": { + "$ref": "ResourceId", + "description": "An optional reference to a parent Resource.\n\nThe only supported parent type is \"organization\". Once set, the parent\ncannot be modified. The `parent` can be set on creation or using the\n`UpdateProject` method; the end user must have the\n`resourcemanager.projects.create` permission on the parent.\n\nRead-write." + } + }, + "id": "Project" + }, + "SearchOrganizationsResponse": { + "description": "The response returned from the `SearchOrganizations` method.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "A pagination token to be used to retrieve the next page of results. If the\nresult is too large to fit within the page size specified in the request,\nthis field will be set with a token that can be used to fetch the next page\nof results. If this field is empty, it indicates that this response\ncontains the last page of results.", + "type": "string" + }, + "organizations": { + "description": "The list of Organizations that matched the search query, possibly\npaginated.", + "type": "array", + "items": { + "$ref": "Organization" + } + } + }, + "id": "SearchOrganizationsResponse" + }, + "FolderOperationError": { + "description": "A classification of the Folder Operation error.", + "type": "object", + "properties": { + "errorMessageId": { "enumDescriptions": [ - "Operation type not specified.", - "A create folder operation.", - "A move folder operation." + "The error type was unrecognized or unspecified.", + "The attempted action would violate the max folder depth constraint.", + "The attempted action would violate the max child folders constraint.", + "The attempted action would violate the locally-unique folder\ndisplay_name constraint.", + "The resource being moved has been deleted.", + "The resource a folder was being added to has been deleted.", + "The attempted action would introduce cycle in resource path.", + "The attempted action would move a folder that is already being moved.", + "The folder the caller is trying to delete contains active resources." + ], + "enum": [ + "ERROR_TYPE_UNSPECIFIED", + "FOLDER_HEIGHT_VIOLATION", + "MAX_CHILD_FOLDERS_VIOLATION", + "FOLDER_NAME_UNIQUENESS_VIOLATION", + "RESOURCE_DELETED", + "PARENT_DELETED", + "CYCLE_INTRODUCED_ERROR", + "FOLDER_ALREADY_BEING_MOVED", + "FOLDER_TO_DELETE_NON_EMPTY" ], + "description": "The type of operation error experienced.", "type": "string" } }, - "id": "FolderOperation" + "id": "FolderOperationError" }, - "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "Lien": { + "description": "A Lien represents an encumbrance on the actions that can be performed on a\nresource.", "type": "object", "properties": { - "error": { - "description": "The error result of the operation in case of failure or cancellation.", - "$ref": "Status" + "restrictions": { + "description": "The types of operations which should be blocked as a result of this Lien.\nEach value should correspond to an IAM permission. The server will\nvalidate the permissions against those for which Liens are supported.\n\nAn empty list is meaningless and will be rejected.\n\nExample: ['resourcemanager.projects.delete']", + "type": "array", + "items": { + "type": "string" + } }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" + "parent": { + "description": "A reference to the resource this Lien is attached to. The server will\nvalidate the parent against those for which Liens are supported.\n\nExample: `projects/1234`", + "type": "string" }, - "metadata": { - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "createTime": { + "description": "The creation time of this Lien.", + "format": "google-datetime", + "type": "string" }, - "response": { - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "origin": { + "description": "A stable, user-visible/meaningful string identifying the origin of the\nLien, intended to be inspected programmatically. Maximum length of 200\ncharacters.\n\nExample: 'compute.googleapis.com'", + "type": "string" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "description": "A system-generated unique identifier for this Lien.\n\nExample: `liens/1234abcd`", + "type": "string" + }, + "reason": { + "description": "Concise user-visible strings indicating why an action cannot be performed\non a resource. Maximum lenth of 200 characters.\n\nExample: 'Holds production API key'", "type": "string" } }, - "id": "Operation" + "id": "Lien" }, - "GetIamPolicyRequest": { - "description": "Request message for `GetIamPolicy` method.", + "Ancestor": { + "description": "Identifying information for a single ancestor of a project.", "type": "object", - "properties": {}, - "id": "GetIamPolicyRequest" + "properties": { + "resourceId": { + "$ref": "ResourceId", + "description": "Resource id of the ancestor." + } + }, + "id": "Ancestor" }, "SetIamPolicyRequest": { "description": "Request message for `SetIamPolicy` method.", "type": "object", "properties": { "policy": { - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", - "$ref": "Policy" + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." } }, "id": "SetIamPolicyRequest" }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", "type": "object", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "TestIamPermissionsResponse" + "properties": {}, + "id": "Empty" }, "Organization": { "description": "The root node in the resource hierarchy to which a particular entity's\n(e.g., company) resources belong.", "type": "object", "properties": { - "displayName": { - "description": "A friendly string to be used to refer to the Organization in the UI.\nAssigned by the server, set to the firm name of the Google For Work\ncustomer that owns this organization.\n@OutputOnly", - "type": "string" - }, - "creationTime": { - "description": "Timestamp when the Organization was created. Assigned by the server.\n@OutputOnly", - "type": "string", - "format": "google-datetime" - }, "owner": { - "description": "The owner of this Organization. The owner should be specified on\ncreation. Once set, it cannot be changed.\nThis field is required.", - "$ref": "OrganizationOwner" + "$ref": "OrganizationOwner", + "description": "The owner of this Organization. The owner should be specified on\ncreation. Once set, it cannot be changed.\nThis field is required." }, "lifecycleState": { "description": "The organization's current lifecycle state. Assigned by the server.\n@OutputOnly", - "enum": [ - "LIFECYCLE_STATE_UNSPECIFIED", - "ACTIVE", - "DELETE_REQUESTED" - ], + "type": "string", "enumDescriptions": [ "Unspecified state. This is only useful for distinguishing unset values.", "The normal and active state.", "The organization has been marked for deletion by the user." ], - "type": "string" + "enum": [ + "LIFECYCLE_STATE_UNSPECIFIED", + "ACTIVE", + "DELETE_REQUESTED" + ] }, "name": { "description": "Output Only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", "type": "string" + }, + "displayName": { + "description": "A friendly string to be used to refer to the Organization in the UI.\nAssigned by the server, set to the primary domain of the G Suite\ncustomer that owns the organization.\n@OutputOnly", + "type": "string" + }, + "creationTime": { + "description": "Timestamp when the Organization was created. Assigned by the server.\n@OutputOnly", + "format": "google-datetime", + "type": "string" } }, "id": "Organization" }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "GetAncestryResponse": { + "description": "Response from the GetAncestry method.", "type": "object", "properties": { - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "ancestor": { + "description": "Ancestors are ordered from bottom to top of the resource hierarchy. The\nfirst ancestor is the project itself, followed by the project's parent,\netc.", "type": "array", "items": { - "$ref": "Binding" + "$ref": "Ancestor" } - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "type": "string", - "format": "byte" - }, - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer", - "format": "int32" } }, - "id": "Policy" + "id": "GetAncestryResponse" }, - "ProjectCreationStatus": { - "description": "A status object which is used as the `metadata` field for the Operation\nreturned by CreateProject. It provides insight for when significant phases of\nProject creation have completed.", + "SearchOrganizationsRequest": { + "description": "The request sent to the `SearchOrganizations` method.", "type": "object", "properties": { - "ready": { - "description": "True if the project creation process is complete.", - "type": "boolean" + "pageSize": { + "description": "The maximum number of Organizations to return in the response.\nThis field is optional.", + "format": "int32", + "type": "integer" }, - "gettable": { - "description": "True if the project can be retrieved using GetProject. No other operations\non the project are guaranteed to work until the project creation is\ncomplete.", - "type": "boolean" + "filter": { + "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a Google for Work domain, for example:\n\n|Filter|Description|\n|------|-----------|\n|owner.directorycustomerid:123456789|Organizations with\n`owner.directory_customer_id` equal to `123456789`.|\n|domain:google.com|Organizations corresponding to the domain `google.com`.|\n\nThis field is optional.", + "type": "string" }, - "createTime": { - "description": "Creation time of the project creation workflow.", - "type": "string", - "format": "google-datetime" - } - }, - "id": "ProjectCreationStatus" - }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "array", - "items": { - "type": "string" - } + "pageToken": { + "description": "A pagination token returned from a previous call to `SearchOrganizations`\nthat indicates from where listing should continue.\nThis field is optional.", + "type": "string" } }, - "id": "TestIamPermissionsRequest" + "id": "SearchOrganizationsRequest" }, "GetAncestryRequest": { "description": "The request sent to the\nGetAncestry\nmethod.", @@ -737,77 +932,79 @@ "properties": {}, "id": "GetAncestryRequest" }, - "GetAncestryResponse": { - "description": "Response from the GetAncestry method.", + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", "type": "object", "properties": { - "ancestor": { - "description": "Ancestors are ordered from bottom to top of the resource hierarchy. The\nfirst ancestor is the project itself, followed by the project's parent,\netc.", + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "type": "array", "items": { - "$ref": "Ancestor" + "type": "string" } } }, - "id": "GetAncestryResponse" + "id": "TestIamPermissionsRequest" }, - "SearchOrganizationsResponse": { - "description": "The response returned from the `SearchOrganizations` method.", + "FolderOperation": { + "description": "Metadata describing a long running folder operation", "type": "object", "properties": { - "organizations": { - "description": "The list of Organizations that matched the search query, possibly\npaginated.", - "type": "array", - "items": { - "$ref": "Organization" - } + "operationType": { + "description": "The type of this operation.", + "type": "string", + "enumDescriptions": [ + "Operation type not specified.", + "A create folder operation.", + "A move folder operation." + ], + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "CREATE", + "MOVE" + ] }, - "nextPageToken": { - "description": "A pagination token to be used to retrieve the next page of results. If the\nresult is too large to fit within the page size specified in the request,\nthis field will be set with a token that can be used to fetch the next page\nof results. If this field is empty, it indicates that this response\ncontains the last page of results.", + "displayName": { + "description": "The display name of the folder.", "type": "string" - } - }, - "id": "SearchOrganizationsResponse" - }, - "SearchOrganizationsRequest": { - "description": "The request sent to the `SearchOrganizations` method.", - "type": "object", - "properties": { - "pageSize": { - "description": "The maximum number of Organizations to return in the response.\nThis field is optional.", - "type": "integer", - "format": "int32" }, - "filter": { - "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a Google for Work domain, for example:\n\n|Filter|Description|\n|------|-----------|\n|owner.directorycustomerid:123456789|Organizations with\n`owner.directory_customer_id` equal to `123456789`.|\n|domain:google.com|Organizations corresponding to the domain `google.com`.|\n\nThis field is optional.", + "sourceParent": { + "description": "The resource name of the folder's parent.\nOnly applicable when the operation_type is MOVE.", "type": "string" - }, - "pageToken": { - "description": "A pagination token returned from a previous call to `SearchOrganizations`\nthat indicates from where listing should continue.\nThis field is optional.", + }, + "destinationParent": { + "description": "The resource name of the folder or organization we are either creating\nthe folder under or moving the folder to.", "type": "string" } }, - "id": "SearchOrganizationsRequest" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" + "id": "FolderOperation" }, - "Ancestor": { - "description": "Identifying information for a single ancestor of a project.", + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", "type": "object", "properties": { - "resourceId": { - "description": "Resource id of the ancestor.", - "$ref": "ResourceId" + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + }, + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } } }, - "id": "Ancestor" + "id": "Policy" }, "ResourceId": { - "description": "A container to reference an id for any resource type. A `resource` in Google\nCloud Platform is a generic term for something you (a developer) may want to\ninteract with through one of our API's. Some examples are an AppEngine app,\na Compute Engine instance, a Cloud SQL database, and so on.", + "description": "A container to reference an id for any resource type. A `resource` in Google\nCloud Platform is a generic term for something you (a developer) may want to\ninteract with through one of our API's. Some examples are an App Engine app,\na Compute Engine instance, a Cloud SQL database, and so on.", "type": "object", "properties": { "type": { @@ -821,192 +1018,130 @@ }, "id": "ResourceId" }, - "Binding": { - "description": "Associates `members` with a `role`.", + "Operation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", "type": "object", "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", - "type": "array", - "items": { - "type": "string" + "error": { + "description": "The error result of the operation in case of failure or cancellation.", + "$ref": "Status" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object" + }, + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + }, + "response": { + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" } }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", "type": "string" } }, - "id": "Binding" + "id": "Operation" }, - "Project": { - "description": "A Project is a high-level Google Cloud Platform entity. It is a\ncontainer for ACLs, APIs, AppEngine Apps, VMs, and other\nGoogle Cloud Platform resources.", + "ListLiensResponse": { + "description": "The response message for Liens.ListLiens.", "type": "object", "properties": { - "parent": { - "description": "An optional reference to a parent Resource.\n\nThe only supported parent type is \"organization\". Once set, the parent\ncannot be modified. The `parent` can be set on creation or using the\n`UpdateProject` method; the end user must have the\n`resourcemanager.projects.create` permission on the parent.\n\nRead-write.", - "$ref": "ResourceId" - }, - "labels": { - "description": "The labels associated with this Project.\n\nLabel keys must be between 1 and 63 characters long and must conform\nto the following regular expression: \\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?.\n\nLabel values must be between 0 and 63 characters long and must conform\nto the regular expression (\\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?)?.\n\nNo more than 256 labels can be associated with a given resource.\n\nClients should store labels in a representation such as JSON that does not\ndepend on specific characters being disallowed.\n\nExample: \u003ccode\u003e\"environment\" : \"dev\"\u003c/code\u003e\nRead-write.", - "additionalProperties": { - "type": "string" - }, - "type": "object" + "liens": { + "description": "A list of Liens.", + "type": "array", + "items": { + "$ref": "Lien" + } }, - "lifecycleState": { - "description": "The Project lifecycle state.\n\nRead-only.", - "enum": [ - "LIFECYCLE_STATE_UNSPECIFIED", - "ACTIVE", - "DELETE_REQUESTED", - "DELETE_IN_PROGRESS" - ], - "enumDescriptions": [ - "Unspecified state. This is only used/useful for distinguishing\nunset values.", - "The normal and active state.", - "The project has been marked for deletion by the user\n(by invoking\nDeleteProject)\nor by the system (Google Cloud Platform).\nThis can generally be reversed by invoking UndeleteProject.", - "This lifecycle state is no longer used and not returned by the API." - ], + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more\nresults in the list.", "type": "string" + } + }, + "id": "ListLiensResponse" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" }, - "createTime": { - "description": "Creation time.\n\nRead-only.", - "type": "string", - "format": "google-datetime" - }, - "name": { - "description": "The user-assigned display name of the Project.\nIt must be 4 to 30 characters.\nAllowed characters are: lowercase and uppercase letters, numbers,\nhyphen, single-quote, double-quote, space, and exclamation point.\n\nExample: \u003ccode\u003eMy Project\u003c/code\u003e\nRead-write.", + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", "type": "string" }, - "projectNumber": { - "description": "The number uniquely identifying the project.\n\nExample: \u003ccode\u003e415104041262\u003c/code\u003e\nRead-only.", - "type": "string", - "format": "int64" - }, - "projectId": { - "description": "The unique, user-assigned ID of the Project.\nIt must be 6 to 30 lowercase letters, digits, or hyphens.\nIt must start with a letter.\nTrailing hyphens are prohibited.\n\nExample: \u003ccode\u003etokyo-rain-123\u003c/code\u003e\nRead-only after creation.", - "type": "string" + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + } } }, - "id": "Project" + "id": "Status" }, - "OrganizationOwner": { - "description": "The entity that owns an Organization. The lifetime of the Organization and\nall of its descendants are bound to the `OrganizationOwner`. If the\n`OrganizationOwner` is deleted, the Organization and all its descendants will\nbe deleted.", + "Binding": { + "description": "Associates `members` with a `role`.", "type": "object", "properties": { - "directoryCustomerId": { - "description": "The Google for Work customer id used in the Directory API.", + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "type": "array", + "items": { + "type": "string" + } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", "type": "string" } }, - "id": "OrganizationOwner" + "id": "Binding" } }, - "revision": "20170122", - "basePath": "", + "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "canonicalName": "Cloud Resource Manager", - "discoveryVersion": "v1", + "version": "v1", "baseUrl": "https://cloudresourcemanager.googleapis.com/", - "name": "cloudresourcemanager", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "canonicalName": "Cloud Resource Manager", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } }, - "documentationLink": "https://cloud.google.com/resource-manager", - "ownerDomain": "google.com", - "batchPath": "batch", "servicePath": "", - "ownerName": "Google", - "version": "v1", + "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", + "kind": "discovery#restDescription", "rootUrl": "https://cloudresourcemanager.googleapis.com/", - "kind": "discovery#restDescription" + "basePath": "", + "ownerDomain": "google.com", + "name": "cloudresourcemanager" } diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index e21b76c8a..e8cfc8319 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -59,6 +59,7 @@ func New(client *http.Client) (*Service, error) { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} + s.Liens = NewLiensService(s) s.Operations = NewOperationsService(s) s.Organizations = NewOrganizationsService(s) s.Projects = NewProjectsService(s) @@ -66,9 +67,12 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Liens *LiensService Operations *OperationsService @@ -84,6 +88,19 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewLiensService(s *Service) *LiensService { + rs := &LiensService{s: s} + return rs +} + +type LiensService struct { + s *Service +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -363,6 +380,118 @@ func (s *GetAncestryResponse) MarshalJSON() ([]byte, error) { type GetIamPolicyRequest struct { } +// Lien: A Lien represents an encumbrance on the actions that can be +// performed on a +// resource. +type Lien struct { + // CreateTime: The creation time of this Lien. + CreateTime string `json:"createTime,omitempty"` + + // Name: A system-generated unique identifier for this Lien. + // + // Example: `liens/1234abcd` + Name string `json:"name,omitempty"` + + // Origin: A stable, user-visible/meaningful string identifying the + // origin of the + // Lien, intended to be inspected programmatically. Maximum length of + // 200 + // characters. + // + // Example: 'compute.googleapis.com' + Origin string `json:"origin,omitempty"` + + // Parent: A reference to the resource this Lien is attached to. The + // server will + // validate the parent against those for which Liens are + // supported. + // + // Example: `projects/1234` + Parent string `json:"parent,omitempty"` + + // Reason: Concise user-visible strings indicating why an action cannot + // be performed + // on a resource. Maximum lenth of 200 characters. + // + // Example: 'Holds production API key' + Reason string `json:"reason,omitempty"` + + // Restrictions: The types of operations which should be blocked as a + // result of this Lien. + // Each value should correspond to an IAM permission. The server + // will + // validate the permissions against those for which Liens are + // supported. + // + // An empty list is meaningless and will be rejected. + // + // Example: ['resourcemanager.projects.delete'] + Restrictions []string `json:"restrictions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Lien) MarshalJSON() ([]byte, error) { + type noMethod Lien + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListLiensResponse: The response message for Liens.ListLiens. +type ListLiensResponse struct { + // Liens: A list of Liens. + Liens []*Lien `json:"liens,omitempty"` + + // NextPageToken: Token to retrieve the next page of results, or empty + // if there are no more + // results in the list. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Liens") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Liens") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListLiensResponse) MarshalJSON() ([]byte, error) { + type noMethod ListLiensResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListProjectsResponse: A page of the response received from // the // ListProjects @@ -509,9 +638,9 @@ type Organization struct { // DisplayName: A friendly string to be used to refer to the // Organization in the UI. - // Assigned by the server, set to the firm name of the Google For - // Work - // customer that owns this organization. + // Assigned by the server, set to the primary domain of the G + // Suite + // customer that owns the organization. // @OutputOnly DisplayName string `json:"displayName,omitempty"` @@ -698,7 +827,7 @@ func (s *Policy) MarshalJSON() ([]byte, error) { // Project: A Project is a high-level Google Cloud Platform entity. It // is a -// container for ACLs, APIs, AppEngine Apps, VMs, and other +// container for ACLs, APIs, App Engine Apps, VMs, and other // Google Cloud Platform resources. type Project struct { // CreateTime: Creation time. @@ -860,8 +989,8 @@ func (s *ProjectCreationStatus) MarshalJSON() ([]byte, error) { // `resource` in Google // Cloud Platform is a generic term for something you (a developer) may // want to -// interact with through one of our API's. Some examples are an -// AppEngine app, +// interact with through one of our API's. Some examples are an App +// Engine app, // a Compute Engine instance, a Cloud SQL database, and so on. type ResourceId struct { // Id: Required field for the type-specific id. This should correspond @@ -1230,6 +1359,464 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type UndeleteProjectRequest struct { } +// method id "cloudresourcemanager.liens.create": + +type LiensCreateCall struct { + s *Service + lien *Lien + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Create a Lien which applies to the resource denoted by the +// `parent` field. +// +// Callers of this method will require permission on the `parent` +// resource. +// For example, applying to `projects/1234` requires +// permission +// `resourcemanager.projects.updateLiens`. +// +// NOTE: Some resources may limit the number of Liens which may be +// applied. +func (r *LiensService) Create(lien *Lien) *LiensCreateCall { + c := &LiensCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.lien = lien + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiensCreateCall) Fields(s ...googleapi.Field) *LiensCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiensCreateCall) Context(ctx context.Context) *LiensCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiensCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.lien) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/liens") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.liens.create" call. +// Exactly one of *Lien or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Lien.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *LiensCreateCall) Do(opts ...googleapi.CallOption) (*Lien, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Lien{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create a Lien which applies to the resource denoted by the `parent` field.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, applying to `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`.\n\nNOTE: Some resources may limit the number of Liens which may be applied.", + // "flatPath": "v1/liens", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.liens.create", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v1/liens", + // "request": { + // "$ref": "Lien" + // }, + // "response": { + // "$ref": "Lien" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "cloudresourcemanager.liens.delete": + +type LiensDeleteCall struct { + s *Service + nameid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Delete a Lien by `name`. +// +// Callers of this method will require permission on the `parent` +// resource. +// For example, a Lien with a `parent` of `projects/1234` requires +// permission +// `resourcemanager.projects.updateLiens`. +func (r *LiensService) Delete(nameid string) *LiensDeleteCall { + c := &LiensDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiensDeleteCall) Fields(s ...googleapi.Field) *LiensDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiensDeleteCall) Context(ctx context.Context) *LiensDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiensDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.liens.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LiensDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Delete a Lien by `name`.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`.", + // "flatPath": "v1/liens/{liensId}", + // "httpMethod": "DELETE", + // "id": "cloudresourcemanager.liens.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name/identifier of the Lien to delete.", + // "location": "path", + // "pattern": "^liens/.+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "cloudresourcemanager.liens.list": + +type LiensListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List all Liens applied to the `parent` resource. +// +// Callers of this method will require permission on the `parent` +// resource. +// For example, a Lien with a `parent` of `projects/1234` requires +// permission +// `resourcemanager.projects.get`. +func (r *LiensService) List() *LiensListCall { + c := &LiensListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return. This is a suggestion for the server. +func (c *LiensListCall) PageSize(pageSize int64) *LiensListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// `next_page_token` value returned from a previous List request, if +// any. +func (c *LiensListCall) PageToken(pageToken string) *LiensListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": The name of the resource +// to list all attached Liens. +// For example, `projects/1234`. +func (c *LiensListCall) Parent(parent string) *LiensListCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiensListCall) Fields(s ...googleapi.Field) *LiensListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LiensListCall) IfNoneMatch(entityTag string) *LiensListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiensListCall) Context(ctx context.Context) *LiensListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiensListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/liens") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.liens.list" call. +// Exactly one of *ListLiensResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLiensResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LiensListCall) Do(opts ...googleapi.CallOption) (*ListLiensResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListLiensResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all Liens applied to the `parent` resource.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.get`.", + // "flatPath": "v1/liens", + // "httpMethod": "GET", + // "id": "cloudresourcemanager.liens.list", + // "parameterOrder": [], + // "parameters": { + // "pageSize": { + // "description": "The maximum number of items to return. This is a suggestion for the server.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The `next_page_token` value returned from a previous List request, if any.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The name of the resource to list all attached Liens.\nFor example, `projects/1234`.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/liens", + // "response": { + // "$ref": "ListLiensResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *LiensListCall) Pages(ctx context.Context, f func(*ListLiensResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "cloudresourcemanager.operations.get": type OperationsGetCall struct { @@ -1293,6 +1880,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1434,6 +2022,7 @@ func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1569,6 +2158,7 @@ func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { @@ -1709,6 +2299,7 @@ func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchorganizationsrequest) if err != nil { @@ -1857,6 +2448,7 @@ func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -1995,6 +2587,7 @@ func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -2141,6 +2734,7 @@ func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.project) if err != nil { @@ -2286,6 +2880,7 @@ func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}") @@ -2424,6 +3019,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2559,6 +3155,7 @@ func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getancestryrequest) if err != nil { @@ -2696,6 +3293,7 @@ func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { @@ -2890,6 +3488,7 @@ func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3099,6 +3698,7 @@ func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -3234,6 +3834,7 @@ func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -3377,6 +3978,7 @@ func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteprojectrequest) if err != nil { @@ -3515,6 +4117,7 @@ func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.project) if err != nil { diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json index 7f1cd8877..373989174 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json @@ -1,530 +1,602 @@ { - "id": "cloudresourcemanager:v1beta1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" - } - } - } - }, - "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", - "protocol": "rest", "title": "Google Cloud Resource Manager API", + "ownerName": "Google", + "discoveryVersion": "v1", "resources": { - "projects": { + "organizations": { "methods": { - "getAncestry": { - "id": "cloudresourcemanager.projects.getAncestry", + "get": { + "description": "Fetches an Organization resource identified by the specified resource name.", "response": { - "$ref": "GetAncestryResponse" + "$ref": "Organization" }, "parameterOrder": [ - "projectId" + "name" ], - "description": "Gets a list of ancestors in the resource hierarchy for the Project\nidentified by the specified `project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", - "request": { - "$ref": "GetAncestryRequest" - }, - "flatPath": "v1beta1/projects/{projectId}:getAncestry", - "httpMethod": "POST", + "httpMethod": "GET", "parameters": { - "projectId": { - "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", - "required": true, - "location": "path", + "organizationId": { + "location": "query", + "description": "The id of the Organization resource to fetch.\nThis field is deprecated and will be removed in v1. Use name instead.", "type": "string" + }, + "name": { + "location": "path", + "description": "The resource name of the Organization to fetch, e.g. \"organizations/1234\".", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$" } }, - "path": "v1beta1/projects/{projectId}:getAncestry", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + ], + "flatPath": "v1beta1/organizations/{organizationsId}", + "path": "v1beta1/{+name}", + "id": "cloudresourcemanager.organizations.get" }, - "getIamPolicy": { - "id": "cloudresourcemanager.projects.getIamPolicy", + "update": { + "description": "Updates an Organization resource identified by the specified resource name.", + "request": { + "$ref": "Organization" + }, "response": { - "$ref": "Policy" + "$ref": "Organization" }, "parameterOrder": [ - "resource" + "name" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" ], - "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "flatPath": "v1beta1/projects/{resource}:getIamPolicy", - "httpMethod": "POST", "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, + "name": { "location": "path", - "type": "string" + "description": "Output Only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$" } }, - "path": "v1beta1/projects/{resource}:getIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + "flatPath": "v1beta1/organizations/{organizationsId}", + "path": "v1beta1/{+name}", + "id": "cloudresourcemanager.organizations.update" }, - "undelete": { - "id": "cloudresourcemanager.projects.undelete", - "response": { - "$ref": "Empty" + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified Organization.\nThe `resource` field should be the organization's resource name,\ne.g. \"organizations/123\".", + "request": { + "$ref": "TestIamPermissionsRequest" }, + "httpMethod": "POST", "parameterOrder": [ - "projectId" + "resource" ], - "description": "Restores the Project identified by the specified\n`project_id` (for example, `my-project-123`).\nYou can only use this method for a Project that has a lifecycle state of\nDELETE_REQUESTED.\nAfter deletion starts, the Project cannot be restored.\n\nThe caller must have modify permissions for this Project.", - "request": { - "$ref": "UndeleteProjectRequest" + "response": { + "$ref": "TestIamPermissionsResponse" }, - "flatPath": "v1beta1/projects/{projectId}:undelete", - "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], "parameters": { - "projectId": { - "description": "The project ID (for example, `foo-bar-123`).\n\nRequired.", + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "pattern": "^organizations/[^/]+$", + "location": "path" } }, - "path": "v1beta1/projects/{projectId}:undelete", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "flatPath": "v1beta1/organizations/{organizationsId}:testIamPermissions", + "id": "cloudresourcemanager.organizations.testIamPermissions", + "path": "v1beta1/{+resource}:testIamPermissions" }, "list": { - "id": "cloudresourcemanager.projects.list", + "flatPath": "v1beta1/organizations", + "path": "v1beta1/organizations", + "id": "cloudresourcemanager.organizations.list", + "description": "Lists Organization resources that are visible to the user and satisfy\nthe specified filter. This method returns Organizations in an unspecified\norder. New Organizations do not necessarily appear at the end of the list.", "response": { - "$ref": "ListProjectsResponse" + "$ref": "ListOrganizationsResponse" }, "parameterOrder": [], - "description": "Lists Projects that are visible to the user and satisfy the\nspecified filter. This method returns Projects in an unspecified order.\nNew Projects do not necessarily appear at the end of the list.", - "flatPath": "v1beta1/projects", "httpMethod": "GET", "parameters": { "pageSize": { - "description": "The maximum number of Projects to return in the response.\nThe server can return fewer Projects than requested.\nIf unspecified, server picks an appropriate default.\n\nOptional.", "location": "query", - "type": "integer", - "format": "int32" + "description": "The maximum number of Organizations to return in the response.\nThis field is optional.", + "format": "int32", + "type": "integer" }, "filter": { - "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ \u003ccode\u003elabels.\u003cem\u003ekey\u003c/em\u003e\u003c/code\u003e where *key* is the name of a label\n\nSome examples of using labels as filters:\n\n|Filter|Description|\n|------|-----------|\n|name:*|The project has a name.|\n|name:Howl|The project's name is `Howl` or `howl`.|\n|name:HOWL|Equivalent to above.|\n|NAME:howl|Equivalent to above.|\n|labels.color:*|The project has the label `color`.|\n|labels.color:red|The project's label `color` has the value `red`.|\n|labels.color:red label.size:big|The project's label `color` has the value `red` and its label `size` has the value `big`.\n\nOptional.", - "location": "query", - "type": "string" + "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a Google for Work domain, for example:\n\n|Filter|Description|\n|------|-----------|\n|owner.directorycustomerid:123456789|Organizations with `owner.directory_customer_id` equal to `123456789`.|\n|domain:google.com|Organizations corresponding to the domain `google.com`.|\n\nThis field is optional.", + "type": "string", + "location": "query" }, "pageToken": { - "description": "A pagination token returned from a previous call to ListProjects\nthat indicates from where listing should continue.\n\nOptional.", - "location": "query", - "type": "string" + "description": "A pagination token returned from a previous call to `ListOrganizations`\nthat indicates from where listing should continue.\nThis field is optional.", + "type": "string", + "location": "query" } }, - "path": "v1beta1/projects", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" ] }, - "get": { - "id": "cloudresourcemanager.projects.get", - "response": { - "$ref": "Project" + "setIamPolicy": { + "flatPath": "v1beta1/organizations/{organizationsId}:setIamPolicy", + "id": "cloudresourcemanager.organizations.setIamPolicy", + "path": "v1beta1/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" }, + "description": "Sets the access control policy on an Organization resource. Replaces any\nexisting policy. The `resource` field should be the organization's resource\nname, e.g. \"organizations/123\".", + "httpMethod": "POST", "parameterOrder": [ - "projectId" + "resource" ], - "description": "Retrieves the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", - "flatPath": "v1beta1/projects/{projectId}", - "httpMethod": "GET", + "response": { + "$ref": "Policy" + }, "parameters": { - "projectId": { - "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "pattern": "^organizations/[^/]+$", + "location": "path" } }, - "path": "v1beta1/projects/{projectId}", "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only" + "https://www.googleapis.com/auth/cloud-platform" ] }, - "update": { - "id": "cloudresourcemanager.projects.update", + "getIamPolicy": { + "request": { + "$ref": "GetIamPolicyRequest" + }, + "description": "Gets the access control policy for an Organization resource. May be empty\nif no such policy or resource exists. The `resource` field should be the\norganization's resource name, e.g. \"organizations/123\".", "response": { - "$ref": "Project" + "$ref": "Policy" }, "parameterOrder": [ - "projectId" + "resource" ], - "description": "Updates the attributes of the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have modify permissions for this Project.", - "request": { - "$ref": "Project" - }, - "flatPath": "v1beta1/projects/{projectId}", - "httpMethod": "PUT", + "httpMethod": "POST", "parameters": { - "projectId": { - "description": "The project ID (for example, `my-project-123`).\n\nRequired.", - "required": true, + "resource": { "location": "path", - "type": "string" + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$" } }, - "path": "v1beta1/projects/{projectId}", "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "create": { - "id": "cloudresourcemanager.projects.create", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "flatPath": "v1beta1/organizations/{organizationsId}:getIamPolicy", + "path": "v1beta1/{+resource}:getIamPolicy", + "id": "cloudresourcemanager.organizations.getIamPolicy" + } + } + }, + "projects": { + "methods": { + "list": { + "httpMethod": "GET", "response": { - "$ref": "Project" + "$ref": "ListProjectsResponse" }, "parameterOrder": [], - "description": "Creates a Project resource.\n\nInitially, the Project resource is owned by its creator exclusively.\nThe creator can later grant permission to others to read or update the\nProject.\n\nSeveral APIs are activated automatically for the Project, including\nGoogle Cloud Storage.", - "request": { - "$ref": "Project" - }, - "flatPath": "v1beta1/projects", - "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], "parameters": { - "useLegacyStack": { - "description": "A safety hatch to opt out of the new reliable project creation process.", + "pageSize": { + "description": "The maximum number of Projects to return in the response.\nThe server can return fewer Projects than requested.\nIf unspecified, server picks an appropriate default.\n\nOptional.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "filter": { + "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ \u003ccode\u003elabels.\u003cem\u003ekey\u003c/em\u003e\u003c/code\u003e where *key* is the name of a label\n\nSome examples of using labels as filters:\n\n|Filter|Description|\n|------|-----------|\n|name:*|The project has a name.|\n|name:Howl|The project's name is `Howl` or `howl`.|\n|name:HOWL|Equivalent to above.|\n|NAME:howl|Equivalent to above.|\n|labels.color:*|The project has the label `color`.|\n|labels.color:red|The project's label `color` has the value `red`.|\n|labels.color:red label.size:big|The project's label `color` has the value `red` and its label `size` has the value `big`.\n\nOptional.", + "type": "string", + "location": "query" + }, + "pageToken": { "location": "query", - "type": "boolean" + "description": "A pagination token returned from a previous call to ListProjects\nthat indicates from where listing should continue.\n\nOptional.", + "type": "string" } }, + "flatPath": "v1beta1/projects", + "id": "cloudresourcemanager.projects.list", "path": "v1beta1/projects", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "description": "Lists Projects that are visible to the user and satisfy the\nspecified filter. This method returns Projects in an unspecified order.\nNew Projects do not necessarily appear at the end of the list." }, "setIamPolicy": { - "id": "cloudresourcemanager.projects.setIamPolicy", - "response": { - "$ref": "Policy" - }, + "httpMethod": "POST", "parameterOrder": [ "resource" ], - "description": "Sets the IAM access control policy for the specified Project. Replaces\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted only to `user` and `serviceAccount`.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ Invitations to grant the owner role cannot be sent using `setIamPolicy()`;\nthey must be sent only using the Cloud Platform Console.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ There must be at least one owner who has accepted the Terms of\nService (ToS) agreement in the policy. Calling `setIamPolicy()` to\nto remove the last ToS-accepted owner from the policy will fail. This\nrestriction also applies to legacy projects that no longer have owners\nwho have accepted the ToS. Edits to IAM policies will be rejected until\nthe lack of a ToS-accepting owner is rectified.\n\n+ Calling this method requires enabling the App Engine Admin API.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its roles.", - "request": { - "$ref": "SetIamPolicyRequest" + "response": { + "$ref": "Policy" }, - "flatPath": "v1beta1/projects/{resource}:setIamPolicy", - "httpMethod": "POST", - "parameters": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { "resource": { + "location": "path", "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "location": "path", "type": "string" } }, + "flatPath": "v1beta1/projects/{resource}:setIamPolicy", + "id": "cloudresourcemanager.projects.setIamPolicy", "path": "v1beta1/projects/{resource}:setIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "description": "Sets the IAM access control policy for the specified Project. Replaces\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted only to `user` and `serviceAccount`.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ Invitations to grant the owner role cannot be sent using `setIamPolicy()`;\nthey must be sent only using the Cloud Platform Console.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ There must be at least one owner who has accepted the Terms of\nService (ToS) agreement in the policy. Calling `setIamPolicy()` to\nto remove the last ToS-accepted owner from the policy will fail. This\nrestriction also applies to legacy projects that no longer have owners\nwho have accepted the ToS. Edits to IAM policies will be rejected until\nthe lack of a ToS-accepting owner is rectified.\n\n+ Calling this method requires enabling the App Engine Admin API.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its roles.", + "request": { + "$ref": "SetIamPolicyRequest" + } }, - "delete": { - "id": "cloudresourcemanager.projects.delete", + "create": { + "flatPath": "v1beta1/projects", + "path": "v1beta1/projects", + "id": "cloudresourcemanager.projects.create", + "request": { + "$ref": "Project" + }, + "description": "Creates a Project resource.\n\nInitially, the Project resource is owned by its creator exclusively.\nThe creator can later grant permission to others to read or update the\nProject.\n\nSeveral APIs are activated automatically for the Project, including\nGoogle Cloud Storage.", "response": { - "$ref": "Empty" + "$ref": "Project" }, - "parameterOrder": [ - "projectId" - ], - "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time, at which point the project is\nno longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", - "flatPath": "v1beta1/projects/{projectId}", - "httpMethod": "DELETE", + "parameterOrder": [], + "httpMethod": "POST", "parameters": { - "projectId": { - "description": "The Project ID (for example, `foo-bar-123`).\n\nRequired.", - "required": true, - "location": "path", - "type": "string" + "useLegacyStack": { + "description": "A safety hatch to opt out of the new reliable project creation process.", + "type": "boolean", + "location": "query" } }, - "path": "v1beta1/projects/{projectId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, - "testIamPermissions": { - "id": "cloudresourcemanager.projects.testIamPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "description": "Returns permissions that a caller has on the specified Project.", + "getIamPolicy": { + "flatPath": "v1beta1/projects/{resource}:getIamPolicy", + "path": "v1beta1/projects/{resource}:getIamPolicy", + "id": "cloudresourcemanager.projects.getIamPolicy", "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1beta1/projects/{resource}:testIamPermissions", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "location": "path", - "type": "string" - } + "$ref": "GetIamPolicyRequest" }, - "path": "v1beta1/projects/{resource}:testIamPermissions", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only" - ] - } - } - }, - "organizations": { - "methods": { - "getIamPolicy": { - "id": "cloudresourcemanager.organizations.getIamPolicy", + "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.", "response": { "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Gets the access control policy for an Organization resource. May be empty\nif no such policy or resource exists. The `resource` field should be the\norganization's resource name, e.g. \"organizations/123\".", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "flatPath": "v1beta1/organizations/{organizationsId}:getIamPolicy", "httpMethod": "POST", "parameters": { "resource": { + "location": "path", "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path", "type": "string" } }, - "path": "v1beta1/{+resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" ] }, - "list": { - "id": "cloudresourcemanager.organizations.list", - "response": { - "$ref": "ListOrganizationsResponse" - }, - "parameterOrder": [], - "description": "Lists Organization resources that are visible to the user and satisfy\nthe specified filter. This method returns Organizations in an unspecified\norder. New Organizations do not necessarily appear at the end of the list.", - "flatPath": "v1beta1/organizations", + "get": { + "description": "Retrieves the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "The maximum number of Organizations to return in the response.\nThis field is optional.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "filter": { - "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a Google for Work domain, for example:\n\n|Filter|Description|\n|------|-----------|\n|owner.directorycustomerid:123456789|Organizations with `owner.directory_customer_id` equal to `123456789`.|\n|domain:google.com|Organizations corresponding to the domain `google.com`.|\n\nThis field is optional.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "A pagination token returned from a previous call to `ListOrganizations`\nthat indicates from where listing should continue.\nThis field is optional.", - "location": "query", - "type": "string" - } + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "Project" }, - "path": "v1beta1/organizations", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] + ], + "parameters": { + "projectId": { + "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectId}", + "id": "cloudresourcemanager.projects.get", + "path": "v1beta1/projects/{projectId}" }, - "get": { - "id": "cloudresourcemanager.organizations.get", + "undelete": { + "flatPath": "v1beta1/projects/{projectId}:undelete", + "path": "v1beta1/projects/{projectId}:undelete", + "id": "cloudresourcemanager.projects.undelete", + "request": { + "$ref": "UndeleteProjectRequest" + }, + "description": "Restores the Project identified by the specified\n`project_id` (for example, `my-project-123`).\nYou can only use this method for a Project that has a lifecycle state of\nDELETE_REQUESTED.\nAfter deletion starts, the Project cannot be restored.\n\nThe caller must have modify permissions for this Project.", "response": { - "$ref": "Organization" + "$ref": "Empty" }, "parameterOrder": [ - "name" + "projectId" ], - "description": "Fetches an Organization resource identified by the specified resource name.", - "flatPath": "v1beta1/organizations/{organizationsId}", - "httpMethod": "GET", + "httpMethod": "POST", "parameters": { - "name": { - "description": "The resource name of the Organization to fetch, e.g. \"organizations/1234\".", + "projectId": { + "description": "The project ID (for example, `foo-bar-123`).\n\nRequired.", "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path", - "type": "string" - }, - "organizationId": { - "description": "The id of the Organization resource to fetch.\nThis field is deprecated and will be removed in v1. Use name instead.", - "location": "query", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1beta1/{+name}", "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only" + "https://www.googleapis.com/auth/cloud-platform" ] }, "update": { - "id": "cloudresourcemanager.organizations.update", + "description": "Updates the attributes of the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have modify permissions for this Project.", + "request": { + "$ref": "Project" + }, "response": { - "$ref": "Organization" + "$ref": "Project" }, "parameterOrder": [ - "name" + "projectId" ], - "description": "Updates an Organization resource identified by the specified resource name.", - "request": { - "$ref": "Organization" - }, - "flatPath": "v1beta1/organizations/{organizationsId}", "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { - "name": { - "description": "Output Only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", + "projectId": { + "description": "The project ID (for example, `my-project-123`).\n\nRequired.", "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1beta1/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "flatPath": "v1beta1/projects/{projectId}", + "path": "v1beta1/projects/{projectId}", + "id": "cloudresourcemanager.projects.update" }, - "setIamPolicy": { - "id": "cloudresourcemanager.organizations.setIamPolicy", - "response": { - "$ref": "Policy" - }, + "getAncestry": { + "httpMethod": "POST", "parameterOrder": [ - "resource" + "projectId" ], - "description": "Sets the access control policy on an Organization resource. Replaces any\nexisting policy. The `resource` field should be the organization's resource\nname, e.g. \"organizations/123\".", - "request": { - "$ref": "SetIamPolicyRequest" + "response": { + "$ref": "GetAncestryResponse" }, - "flatPath": "v1beta1/organizations/{organizationsId}:setIamPolicy", - "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^organizations/[^/]+$", + "projectId": { "location": "path", + "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "required": true, "type": "string" } }, - "path": "v1beta1/{+resource}:setIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "flatPath": "v1beta1/projects/{projectId}:getAncestry", + "id": "cloudresourcemanager.projects.getAncestry", + "path": "v1beta1/projects/{projectId}:getAncestry", + "description": "Gets a list of ancestors in the resource hierarchy for the Project\nidentified by the specified `project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", + "request": { + "$ref": "GetAncestryRequest" + } }, "testIamPermissions": { - "id": "cloudresourcemanager.organizations.testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that a caller has on the specified Project.", "response": { "$ref": "TestIamPermissionsResponse" }, "parameterOrder": [ "resource" ], - "description": "Returns permissions that a caller has on the specified Organization.\nThe `resource` field should be the organization's resource name,\ne.g. \"organizations/123\".", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1beta1/organizations/{organizationsId}:testIamPermissions", "httpMethod": "POST", "parameters": { "resource": { + "location": "path", "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path", "type": "string" } }, - "path": "v1beta1/{+resource}:testIamPermissions", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" - ] - } - } - } - }, - "schemas": { - "FolderOperationError": { - "description": "A classification of the Folder Operation error.", - "type": "object", - "properties": { - "errorMessageId": { - "description": "The type of operation error experienced.", - "enum": [ - "ERROR_TYPE_UNSPECIFIED", - "FOLDER_HEIGHT_VIOLATION", - "MAX_CHILD_FOLDERS_VIOLATION", - "FOLDER_NAME_UNIQUENESS_VIOLATION", - "RESOURCE_DELETED", - "PARENT_DELETED", - "CYCLE_INTRODUCED_ERROR", - "FOLDER_ALREADY_BEING_MOVED", - "FOLDER_TO_DELETE_NON_EMPTY" ], - "enumDescriptions": [ - "The error type was unrecognized or unspecified.", - "The attempted action would violate the max folder depth constraint.", - "The attempted action would violate the max child folders constraint.", - "The attempted action would violate the locally-unique folder\ndisplay_name constraint.", - "The resource being moved has been deleted.", - "The resource a folder was being added to has been deleted.", - "The attempted action would introduce cycle in resource path.", - "The attempted action would move a folder that is already being moved.", - "The folder the caller is trying to delete contains active resources." + "flatPath": "v1beta1/projects/{resource}:testIamPermissions", + "path": "v1beta1/projects/{resource}:testIamPermissions", + "id": "cloudresourcemanager.projects.testIamPermissions" + }, + "delete": { + "flatPath": "v1beta1/projects/{projectId}", + "path": "v1beta1/projects/{projectId}", + "id": "cloudresourcemanager.projects.delete", + "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time, at which point the project is\nno longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "projectId" ], - "type": "string" + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "description": "The Project ID (for example, `foo-bar-123`).\n\nRequired.", + "required": true, + "type": "string", + "location": "path" + } + } } - }, - "id": "FolderOperationError" + } + } + }, + "parameters": { + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" }, - "ListProjectsResponse": { - "description": "A page of the response received from the\nListProjects\nmethod.\n\nA paginated response where more pages are available has\n`next_page_token` set. This token can be used in a subsequent request to\nretrieve the next request page.", + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + } + }, + "schemas": { + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "Organization": { + "description": "The root node in the resource hierarchy to which a particular entity's\n(e.g., company) resources belong.", "type": "object", "properties": { - "projects": { - "description": "The list of Projects that matched the list filter. This list can\nbe paginated.", - "type": "array", - "items": { - "$ref": "Project" - } + "owner": { + "description": "The owner of this Organization. The owner should be specified on\ncreation. Once set, it cannot be changed.\nThis field is required.", + "$ref": "OrganizationOwner" }, - "nextPageToken": { - "description": "Pagination token.\n\nIf the result set is too large to fit in a single response, this token\nis returned. It encodes the position of the current result cursor.\nFeeding this value into a new list request with the `page_token` parameter\ngives the next page of the results.\n\nWhen `next_page_token` is not filled in, there is no next page and\nthe list returned is the last page in the result set.\n\nPagination tokens have a limited lifetime.", + "name": { + "description": "Output Only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", + "type": "string" + }, + "organizationId": { + "description": "An immutable id for the Organization that is assigned on creation. This\nshould be omitted when creating a new Organization.\nThis field is read-only.\nThis field is deprecated and will be removed in v1. Use name instead.", + "type": "string" + }, + "lifecycleState": { + "enumDescriptions": [ + "Unspecified state. This is only useful for distinguishing unset values.", + "The normal and active state.", + "The organization has been marked for deletion by the user." + ], + "enum": [ + "LIFECYCLE_STATE_UNSPECIFIED", + "ACTIVE", + "DELETE_REQUESTED" + ], + "description": "The organization's current lifecycle state. Assigned by the server.\n@OutputOnly", + "type": "string" + }, + "displayName": { + "description": "A friendly string to be used to refer to the Organization in the UI.\nAssigned by the server, set to the primary domain of the G Suite\ncustomer that owns the organization.\n@OutputOnly", + "type": "string" + }, + "creationTime": { + "description": "Timestamp when the Organization was created. Assigned by the server.\n@OutputOnly", + "format": "google-datetime", "type": "string" } }, - "id": "ListProjectsResponse" + "id": "Organization" }, "UndeleteProjectRequest": { "description": "The request sent to the UndeleteProject\nmethod.", @@ -532,38 +604,25 @@ "properties": {}, "id": "UndeleteProjectRequest" }, - "FolderOperation": { - "description": "Metadata describing a long running folder operation", + "ProjectCreationStatus": { + "description": "A status object which is used as the `metadata` field for the Operation\nreturned by CreateProject. It provides insight for when significant phases of\nProject creation have completed.", "type": "object", "properties": { - "displayName": { - "description": "The display name of the folder.", - "type": "string" - }, - "sourceParent": { - "description": "The resource name of the folder's parent.\nOnly applicable when the operation_type is MOVE.", + "createTime": { + "description": "Creation time of the project creation workflow.", + "format": "google-datetime", "type": "string" }, - "destinationParent": { - "description": "The resource name of the folder or organization we are either creating\nthe folder under or moving the folder to.", - "type": "string" + "gettable": { + "description": "True if the project can be retrieved using GetProject. No other operations\non the project are guaranteed to work until the project creation is\ncomplete.", + "type": "boolean" }, - "operationType": { - "description": "The type of this operation.", - "enum": [ - "OPERATION_TYPE_UNSPECIFIED", - "CREATE", - "MOVE" - ], - "enumDescriptions": [ - "Operation type not specified.", - "A create folder operation.", - "A move folder operation." - ], - "type": "string" + "ready": { + "description": "True if the project creation process is complete.", + "type": "boolean" } }, - "id": "FolderOperation" + "id": "ProjectCreationStatus" }, "GetIamPolicyRequest": { "description": "Request message for `GetIamPolicy` method.", @@ -571,17 +630,6 @@ "properties": {}, "id": "GetIamPolicyRequest" }, - "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "type": "object", - "properties": { - "policy": { - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", - "$ref": "Policy" - } - }, - "id": "SetIamPolicyRequest" - }, "TestIamPermissionsResponse": { "description": "Response message for `TestIamPermissions` method.", "type": "object", @@ -596,149 +644,225 @@ }, "id": "TestIamPermissionsResponse" }, - "Organization": { - "description": "The root node in the resource hierarchy to which a particular entity's\n(e.g., company) resources belong.", + "GetAncestryResponse": { + "description": "Response from the GetAncestry method.", + "type": "object", + "properties": { + "ancestor": { + "description": "Ancestors are ordered from bottom to top of the resource hierarchy. The\nfirst ancestor is the project itself, followed by the project's parent,\netc.", + "type": "array", + "items": { + "$ref": "Ancestor" + } + } + }, + "id": "GetAncestryResponse" + }, + "OrganizationOwner": { + "description": "The entity that owns an Organization. The lifetime of the Organization and\nall of its descendants are bound to the `OrganizationOwner`. If the\n`OrganizationOwner` is deleted, the Organization and all its descendants will\nbe deleted.", + "type": "object", + "properties": { + "directoryCustomerId": { + "description": "The Google for Work customer id used in the Directory API.", + "type": "string" + } + }, + "id": "OrganizationOwner" + }, + "ListProjectsResponse": { + "description": "A page of the response received from the\nListProjects\nmethod.\n\nA paginated response where more pages are available has\n`next_page_token` set. This token can be used in a subsequent request to\nretrieve the next request page.", + "type": "object", + "properties": { + "projects": { + "description": "The list of Projects that matched the list filter. This list can\nbe paginated.", + "type": "array", + "items": { + "$ref": "Project" + } + }, + "nextPageToken": { + "description": "Pagination token.\n\nIf the result set is too large to fit in a single response, this token\nis returned. It encodes the position of the current result cursor.\nFeeding this value into a new list request with the `page_token` parameter\ngives the next page of the results.\n\nWhen `next_page_token` is not filled in, there is no next page and\nthe list returned is the last page in the result set.\n\nPagination tokens have a limited lifetime.", + "type": "string" + } + }, + "id": "ListProjectsResponse" + }, + "GetAncestryRequest": { + "description": "The request sent to the\nGetAncestry\nmethod.", + "type": "object", + "properties": {}, + "id": "GetAncestryRequest" + }, + "Project": { + "description": "A Project is a high-level Google Cloud Platform entity. It is a\ncontainer for ACLs, APIs, App Engine Apps, VMs, and other\nGoogle Cloud Platform resources.", "type": "object", "properties": { + "projectId": { + "description": "The unique, user-assigned ID of the Project.\nIt must be 6 to 30 lowercase letters, digits, or hyphens.\nIt must start with a letter.\nTrailing hyphens are prohibited.\n\nExample: \u003ccode\u003etokyo-rain-123\u003c/code\u003e\nRead-only after creation.", + "type": "string" + }, "lifecycleState": { - "description": "The organization's current lifecycle state. Assigned by the server.\n@OutputOnly", + "enumDescriptions": [ + "Unspecified state. This is only used/useful for distinguishing\nunset values.", + "The normal and active state.", + "The project has been marked for deletion by the user\n(by invoking DeleteProject)\nor by the system (Google Cloud Platform).\nThis can generally be reversed by invoking UndeleteProject.", + "This lifecycle state is no longer used and is not returned by the API." + ], "enum": [ "LIFECYCLE_STATE_UNSPECIFIED", "ACTIVE", - "DELETE_REQUESTED" - ], - "enumDescriptions": [ - "Unspecified state. This is only useful for distinguishing unset values.", - "The normal and active state.", - "The organization has been marked for deletion by the user." + "DELETE_REQUESTED", + "DELETE_IN_PROGRESS" ], + "description": "The Project lifecycle state.\n\nRead-only.", "type": "string" }, - "displayName": { - "description": "A friendly string to be used to refer to the Organization in the UI.\nAssigned by the server, set to the firm name of the Google For Work\ncustomer that owns this organization.\n@OutputOnly", + "projectNumber": { + "description": "The number uniquely identifying the project.\n\nExample: \u003ccode\u003e415104041262\u003c/code\u003e\nRead-only.", + "format": "int64", "type": "string" }, - "organizationId": { - "description": "An immutable id for the Organization that is assigned on creation. This\nshould be omitted when creating a new Organization.\nThis field is read-only.\nThis field is deprecated and will be removed in v1. Use name instead.", + "parent": { + "$ref": "ResourceId", + "description": "An optional reference to a parent Resource.\n\nThe only supported parent type is \"organization\". Once set, the parent\ncannot be modified. The `parent` can be set on creation or using the\n`UpdateProject` method; the end user must have the\n`resourcemanager.projects.create` permission on the parent.\n\nRead-write." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The labels associated with this Project.\n\nLabel keys must be between 1 and 63 characters long and must conform\nto the following regular expression: \\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?.\n\nLabel values must be between 0 and 63 characters long and must conform\nto the regular expression (\\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?)?.\n\nNo more than 256 labels can be associated with a given resource.\n\nClients should store labels in a representation such as JSON that does not\ndepend on specific characters being disallowed.\n\nExample: \u003ccode\u003e\"environment\" : \"dev\"\u003c/code\u003e\nRead-write.", + "type": "object" + }, + "createTime": { + "description": "Creation time.\n\nRead-only.", + "format": "google-datetime", "type": "string" }, "name": { - "description": "Output Only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", + "description": "The user-assigned display name of the Project.\nIt must be 4 to 30 characters.\nAllowed characters are: lowercase and uppercase letters, numbers,\nhyphen, single-quote, double-quote, space, and exclamation point.\n\nExample: \u003ccode\u003eMy Project\u003c/code\u003e\nRead-write.", "type": "string" - }, - "creationTime": { - "description": "Timestamp when the Organization was created. Assigned by the server.\n@OutputOnly", - "type": "string", - "format": "google-datetime" - }, - "owner": { - "description": "The owner of this Organization. The owner should be specified on\ncreation. Once set, it cannot be changed.\nThis field is required.", - "$ref": "OrganizationOwner" } }, - "id": "Organization" + "id": "Project" }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", "type": "object", "properties": { - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "type": "array", "items": { - "$ref": "Binding" + "type": "string" } - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + } + }, + "id": "TestIamPermissionsRequest" + }, + "FolderOperationError": { + "description": "A classification of the Folder Operation error.", + "type": "object", + "properties": { + "errorMessageId": { + "description": "The type of operation error experienced.", "type": "string", - "format": "byte" - }, - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer", - "format": "int32" + "enumDescriptions": [ + "The error type was unrecognized or unspecified.", + "The attempted action would violate the max folder depth constraint.", + "The attempted action would violate the max child folders constraint.", + "The attempted action would violate the locally-unique folder\ndisplay_name constraint.", + "The resource being moved has been deleted.", + "The resource a folder was being added to has been deleted.", + "The attempted action would introduce cycle in resource path.", + "The attempted action would move a folder that is already being moved.", + "The folder the caller is trying to delete contains active resources." + ], + "enum": [ + "ERROR_TYPE_UNSPECIFIED", + "FOLDER_HEIGHT_VIOLATION", + "MAX_CHILD_FOLDERS_VIOLATION", + "FOLDER_NAME_UNIQUENESS_VIOLATION", + "RESOURCE_DELETED", + "PARENT_DELETED", + "CYCLE_INTRODUCED_ERROR", + "FOLDER_ALREADY_BEING_MOVED", + "FOLDER_TO_DELETE_NON_EMPTY" + ] } }, - "id": "Policy" + "id": "FolderOperationError" }, - "ProjectCreationStatus": { - "description": "A status object which is used as the `metadata` field for the Operation\nreturned by CreateProject. It provides insight for when significant phases of\nProject creation have completed.", + "FolderOperation": { + "description": "Metadata describing a long running folder operation", "type": "object", "properties": { - "ready": { - "description": "True if the project creation process is complete.", - "type": "boolean" + "displayName": { + "description": "The display name of the folder.", + "type": "string" }, - "gettable": { - "description": "True if the project can be retrieved using GetProject. No other operations\non the project are guaranteed to work until the project creation is\ncomplete.", - "type": "boolean" + "sourceParent": { + "description": "The resource name of the folder's parent.\nOnly applicable when the operation_type is MOVE.", + "type": "string" }, - "createTime": { - "description": "Creation time of the project creation workflow.", - "type": "string", - "format": "google-datetime" + "destinationParent": { + "description": "The resource name of the folder or organization we are either creating\nthe folder under or moving the folder to.", + "type": "string" + }, + "operationType": { + "enumDescriptions": [ + "Operation type not specified.", + "A create folder operation.", + "A move folder operation." + ], + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "CREATE", + "MOVE" + ], + "description": "The type of this operation.", + "type": "string" } }, - "id": "ProjectCreationStatus" + "id": "FolderOperation" }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", "type": "object", "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + }, + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", "type": "array", "items": { - "type": "string" + "$ref": "Binding" } } }, - "id": "TestIamPermissionsRequest" + "id": "Policy" }, - "ListOrganizationsResponse": { - "description": "The response returned from the `ListOrganizations` method.", + "ResourceId": { + "description": "A container to reference an id for any resource type. A `resource` in Google\nCloud Platform is a generic term for something you (a developer) may want to\ninteract with through one of our API's. Some examples are an App Engine app,\na Compute Engine instance, a Cloud SQL database, and so on.", "type": "object", "properties": { - "organizations": { - "description": "The list of Organizations that matched the list query, possibly paginated.", - "type": "array", - "items": { - "$ref": "Organization" - } + "type": { + "description": "Required field representing the resource type this id is for.\nAt present, the valid types are \"project\" and \"organization\".", + "type": "string" }, - "nextPageToken": { - "description": "A pagination token to be used to retrieve the next page of results. If the\nresult is too large to fit within the page size specified in the request,\nthis field will be set with a token that can be used to fetch the next page\nof results. If this field is empty, it indicates that this response\ncontains the last page of results.", + "id": { + "description": "Required field for the type-specific id. This should correspond to the id\nused in the type-specific API's.", "type": "string" } }, - "id": "ListOrganizationsResponse" - }, - "GetAncestryRequest": { - "description": "The request sent to the\nGetAncestry\nmethod.", - "type": "object", - "properties": {}, - "id": "GetAncestryRequest" - }, - "GetAncestryResponse": { - "description": "Response from the GetAncestry method.", - "type": "object", - "properties": { - "ancestor": { - "description": "Ancestors are ordered from bottom to top of the resource hierarchy. The\nfirst ancestor is the project itself, followed by the project's parent,\netc.", - "type": "array", - "items": { - "$ref": "Ancestor" - } - } - }, - "id": "GetAncestryResponse" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" + "id": "ResourceId" }, "Ancestor": { "description": "Identifying information for a single ancestor of a project.", @@ -751,72 +875,34 @@ }, "id": "Ancestor" }, - "ResourceId": { - "description": "A container to reference an id for any resource type. A `resource` in Google\nCloud Platform is a generic term for something you (a developer) may want to\ninteract with through one of our API's. Some examples are an AppEngine app,\na Compute Engine instance, a Cloud SQL database, and so on.", + "ListOrganizationsResponse": { + "description": "The response returned from the `ListOrganizations` method.", "type": "object", "properties": { - "type": { - "description": "Required field representing the resource type this id is for.\nAt present, the valid types are \"project\" and \"organization\".", - "type": "string" + "organizations": { + "description": "The list of Organizations that matched the list query, possibly paginated.", + "type": "array", + "items": { + "$ref": "Organization" + } }, - "id": { - "description": "Required field for the type-specific id. This should correspond to the id\nused in the type-specific API's.", + "nextPageToken": { + "description": "A pagination token to be used to retrieve the next page of results. If the\nresult is too large to fit within the page size specified in the request,\nthis field will be set with a token that can be used to fetch the next page\nof results. If this field is empty, it indicates that this response\ncontains the last page of results.", "type": "string" } }, - "id": "ResourceId" + "id": "ListOrganizationsResponse" }, - "Project": { - "description": "A Project is a high-level Google Cloud Platform entity. It is a\ncontainer for ACLs, APIs, AppEngine Apps, VMs, and other\nGoogle Cloud Platform resources.", + "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", "type": "object", "properties": { - "parent": { - "description": "An optional reference to a parent Resource.\n\nThe only supported parent type is \"organization\". Once set, the parent\ncannot be modified. The `parent` can be set on creation or using the\n`UpdateProject` method; the end user must have the\n`resourcemanager.projects.create` permission on the parent.\n\nRead-write.", - "$ref": "ResourceId" - }, - "labels": { - "description": "The labels associated with this Project.\n\nLabel keys must be between 1 and 63 characters long and must conform\nto the following regular expression: \\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?.\n\nLabel values must be between 0 and 63 characters long and must conform\nto the regular expression (\\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?)?.\n\nNo more than 256 labels can be associated with a given resource.\n\nClients should store labels in a representation such as JSON that does not\ndepend on specific characters being disallowed.\n\nExample: \u003ccode\u003e\"environment\" : \"dev\"\u003c/code\u003e\nRead-write.", - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - "lifecycleState": { - "description": "The Project lifecycle state.\n\nRead-only.", - "enum": [ - "LIFECYCLE_STATE_UNSPECIFIED", - "ACTIVE", - "DELETE_REQUESTED", - "DELETE_IN_PROGRESS" - ], - "enumDescriptions": [ - "Unspecified state. This is only used/useful for distinguishing\nunset values.", - "The normal and active state.", - "The project has been marked for deletion by the user\n(by invoking DeleteProject)\nor by the system (Google Cloud Platform).\nThis can generally be reversed by invoking UndeleteProject.", - "This lifecycle state is no longer used and is not returned by the API." - ], - "type": "string" - }, - "createTime": { - "description": "Creation time.\n\nRead-only.", - "type": "string", - "format": "google-datetime" - }, - "name": { - "description": "The user-assigned display name of the Project.\nIt must be 4 to 30 characters.\nAllowed characters are: lowercase and uppercase letters, numbers,\nhyphen, single-quote, double-quote, space, and exclamation point.\n\nExample: \u003ccode\u003eMy Project\u003c/code\u003e\nRead-write.", - "type": "string" - }, - "projectNumber": { - "description": "The number uniquely identifying the project.\n\nExample: \u003ccode\u003e415104041262\u003c/code\u003e\nRead-only.", - "type": "string", - "format": "int64" - }, - "projectId": { - "description": "The unique, user-assigned ID of the Project.\nIt must be 6 to 30 lowercase letters, digits, or hyphens.\nIt must start with a letter.\nTrailing hyphens are prohibited.\n\nExample: \u003ccode\u003etokyo-rain-123\u003c/code\u003e\nRead-only after creation.", - "type": "string" + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." } }, - "id": "Project" + "id": "SetIamPolicyRequest" }, "Binding": { "description": "Associates `members` with a `role`.", @@ -835,123 +921,37 @@ } }, "id": "Binding" - }, - "OrganizationOwner": { - "description": "The entity that owns an Organization. The lifetime of the Organization and\nall of its descendants are bound to the `OrganizationOwner`. If the\n`OrganizationOwner` is deleted, the Organization and all its descendants will\nbe deleted.", - "type": "object", - "properties": { - "directoryCustomerId": { - "description": "The Google for Work customer id used in the Directory API.", - "type": "string" - } - }, - "id": "OrganizationOwner" } }, - "revision": "20170122", - "basePath": "", + "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "canonicalName": "Cloud Resource Manager", - "discoveryVersion": "v1", + "version": "v1beta1", "baseUrl": "https://cloudresourcemanager.googleapis.com/", - "name": "cloudresourcemanager", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "canonicalName": "Cloud Resource Manager", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } }, - "documentationLink": "https://cloud.google.com/resource-manager", - "ownerDomain": "google.com", - "batchPath": "batch", + "kind": "discovery#restDescription", + "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", "servicePath": "", - "ownerName": "Google", - "version": "v1beta1", "rootUrl": "https://cloudresourcemanager.googleapis.com/", - "kind": "discovery#restDescription" + "basePath": "", + "ownerDomain": "google.com", + "name": "cloudresourcemanager", + "batchPath": "batch", + "id": "cloudresourcemanager:v1beta1", + "documentationLink": "https://cloud.google.com/resource-manager", + "revision": "20170221" } diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go index 5163131b2..20f473534 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Organizations *OrganizationsService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOrganizationsService(s *Service) *OrganizationsService { rs := &OrganizationsService{s: s} return rs @@ -467,9 +472,9 @@ type Organization struct { // DisplayName: A friendly string to be used to refer to the // Organization in the UI. - // Assigned by the server, set to the firm name of the Google For - // Work - // customer that owns this organization. + // Assigned by the server, set to the primary domain of the G + // Suite + // customer that owns the organization. // @OutputOnly DisplayName string `json:"displayName,omitempty"` @@ -663,7 +668,7 @@ func (s *Policy) MarshalJSON() ([]byte, error) { // Project: A Project is a high-level Google Cloud Platform entity. It // is a -// container for ACLs, APIs, AppEngine Apps, VMs, and other +// container for ACLs, APIs, App Engine Apps, VMs, and other // Google Cloud Platform resources. type Project struct { // CreateTime: Creation time. @@ -824,8 +829,8 @@ func (s *ProjectCreationStatus) MarshalJSON() ([]byte, error) { // `resource` in Google // Cloud Platform is a generic term for something you (a developer) may // want to -// interact with through one of our API's. Some examples are an -// AppEngine app, +// interact with through one of our API's. Some examples are an App +// Engine app, // a Compute Engine instance, a Cloud SQL database, and so on. type ResourceId struct { // Id: Required field for the type-specific id. This should correspond @@ -1038,6 +1043,7 @@ func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1178,6 +1184,7 @@ func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { @@ -1367,6 +1374,7 @@ func (c *OrganizationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1527,6 +1535,7 @@ func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -1665,6 +1674,7 @@ func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -1802,6 +1812,7 @@ func (c *OrganizationsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.organization) if err != nil { @@ -1952,6 +1963,7 @@ func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.project) if err != nil { @@ -2104,6 +2116,7 @@ func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}") @@ -2242,6 +2255,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2377,6 +2391,7 @@ func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getancestryrequest) if err != nil { @@ -2514,6 +2529,7 @@ func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { @@ -2707,6 +2723,7 @@ func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2914,6 +2931,7 @@ func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -3049,6 +3067,7 @@ func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -3192,6 +3211,7 @@ func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteprojectrequest) if err != nil { @@ -3330,6 +3350,7 @@ func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.project) if err != nil { diff --git a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json index fd8cc0f1a..97bca1c6a 100644 --- a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json +++ b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json @@ -1,362 +1,383 @@ { - "kind": "discovery#restDescription", - "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/avS4leRd3oaImhwLi14P2pt74p0\"", - "discoveryVersion": "v1", - "id": "cloudtrace:v1", - "name": "cloudtrace", - "canonicalName": "Cloud Trace", - "version": "v1", - "revision": "20160518", - "title": "Google Cloud Trace API", - "description": "Send and retrieve trace data from Google Cloud Trace. Data is generated and available by default for all App Engine applications. Data from other applications can be written to Cloud Trace for display, reporting, and analysis.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/tools/cloud-trace", - "protocol": "rest", - "baseUrl": "https://cloudtrace.googleapis.com/", - "basePath": "", - "rootUrl": "https://cloudtrace.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "version_module": true, - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" + "id": "cloudtrace:v1", + "revision": "20170208", + "documentationLink": "https://cloud.google.com/trace", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "TraceSpan": { + "description": "A span represents a single timed event within a trace. Spans can be nested\nand form a trace tree. Often, a trace contains a root span that describes the\nend-to-end latency of an operation and, optionally, one or more subspans for\nits suboperations. Spans do not need to be contiguous. There may be gaps\nbetween spans in a trace.", + "type": "object", + "properties": { + "spanId": { + "type": "string", + "description": "Identifier for the span. Must be a 64-bit integer other than 0 and\nunique within a trace.", + "format": "uint64" + }, + "parentSpanId": { + "description": "ID of the parent span, if any. Optional.", + "format": "uint64", + "type": "string" + }, + "endTime": { + "type": "string", + "description": "End time of the span in nanoseconds from the UNIX epoch.", + "format": "google-datetime" + }, + "startTime": { + "description": "Start time of the span in nanoseconds from the UNIX epoch.", + "format": "google-datetime", + "type": "string" + }, + "kind": { + "description": "Distinguishes between spans generated in a particular context. For example,\ntwo spans with the same name may be distinguished using `RPC_CLIENT`\nand `RPC_SERVER` to identify queueing latency associated with the span.", + "type": "string", + "enumDescriptions": [ + "Unspecified.", + "Indicates that the span covers server-side handling of an RPC or other\nremote network request.", + "Indicates that the span covers the client-side wrapper around an RPC or\nother remote request." + ], + "enum": [ + "SPAN_KIND_UNSPECIFIED", + "RPC_SERVER", + "RPC_CLIENT" + ] + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Collection of labels associated with the span. Label keys must be less than\n128 bytes. Label values must be less than 16 kilobytes." + }, + "name": { + "description": "Name of the span. Must be less than 128 bytes. The span name is sanitized\nand displayed in the Stackdriver Trace tool in the\n{% dynamic print site_values.console_name %}.\nThe name may be a method name or some other per-call site name.\nFor the same executable and the same call point, a best practice is\nto use a consistent name, which makes it easier to correlate\ncross-trace spans.", + "type": "string" + } + }, + "id": "TraceSpan" + }, + "ListTracesResponse": { + "description": "The response message for the `ListTraces` method.", + "type": "object", + "properties": { + "traces": { + "description": "List of trace records returned.", + "type": "array", + "items": { + "$ref": "Trace" + } + }, + "nextPageToken": { + "description": "If defined, indicates that there are more traces that match the request\nand that this value should be passed to the next request to continue\nretrieving additional traces.", + "type": "string" + } + }, + "id": "ListTracesResponse" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "Trace": { + "description": "A trace describes how long it takes for an application to perform an\noperation. It consists of a set of spans, each of which represent a single\ntimed event within the operation.", + "type": "object", + "properties": { + "projectId": { + "description": "Project ID of the Cloud project where the trace data is stored.", + "type": "string" + }, + "spans": { + "description": "Collection of spans in the trace.", + "type": "array", + "items": { + "$ref": "TraceSpan" + } + }, + "traceId": { + "description": "Globally unique identifier for the trace. This identifier is a 128-bit\nnumeric value formatted as a 32-byte hex string.", + "type": "string" + } + }, + "id": "Trace" + }, + "Traces": { + "description": "List of new or updated traces.", + "type": "object", + "properties": { + "traces": { + "type": "array", + "items": { + "$ref": "Trace" + }, + "description": "List of traces." + } + }, + "id": "Traces" + } }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" + "protocol": "rest", + "canonicalName": "Cloud Trace", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/trace.readonly": { + "description": "Read Trace data for a project or application" + }, + "https://www.googleapis.com/auth/trace.append": { + "description": "Write Trace data for a project or application" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" + "rootUrl": "https://cloudtrace.googleapis.com/", + "ownerDomain": "google.com", + "name": "cloudtrace", + "batchPath": "batch", + "title": "Stackdriver Trace API", + "ownerName": "Google", + "resources": { + "projects": { + "methods": { + "patchTraces": { + "description": "Sends new traces to Stackdriver Trace or updates existing traces. If the ID\nof a trace that you send matches that of an existing trace, any fields\nin the existing trace and its spans are overwritten by the provided values,\nand any new fields provided are merged with the existing trace data. If the\nID does not match, a new trace is created.", + "request": { + "$ref": "Traces" + }, + "httpMethod": "PATCH", + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append" + ], + "parameters": { + "projectId": { + "required": true, + "type": "string", + "location": "path", + "description": "ID of the Cloud project where the trace data is stored." + } + }, + "flatPath": "v1/projects/{projectId}/traces", + "id": "cloudtrace.projects.patchTraces", + "path": "v1/projects/{projectId}/traces" + } + }, + "resources": { + "traces": { + "methods": { + "list": { + "response": { + "$ref": "ListTracesResponse" + }, + "parameterOrder": [ + "projectId" + ], + "httpMethod": "GET", + "parameters": { + "orderBy": { + "location": "query", + "description": "Field used to sort the returned traces. Optional.\nCan be one of the following:\n\n* `trace_id`\n* `name` (`name` field of root span in the trace)\n* `duration` (difference between `end_time` and `start_time` fields of\n the root span)\n* `start` (`start_time` field of the root span)\n\nDescending order can be specified by appending `desc` to the sort field\n(for example, `name desc`).\n\nOnly one sort field is permitted.", + "type": "string" + }, + "projectId": { + "location": "path", + "description": "ID of the Cloud project where the trace data is stored.", + "required": true, + "type": "string" + }, + "filter": { + "description": "An optional filter for the request.", + "type": "string", + "location": "query" + }, + "endTime": { + "location": "query", + "description": "Start of the time interval (inclusive) during which the trace data was\ncollected from the application.", + "format": "google-datetime", + "type": "string" + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "Token identifying the page of results to return. If provided, use the\nvalue of the `next_page_token` field from a previous request. Optional." + }, + "startTime": { + "location": "query", + "description": "End of the time interval (inclusive) during which the trace data was\ncollected from the application.", + "format": "google-datetime", + "type": "string" + }, + "pageSize": { + "description": "Maximum number of traces to return. If not specified or \u003c= 0, the\nimplementation selects a reasonable value. The implementation may\nreturn fewer traces than the requested page size. Optional.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "view": { + "location": "query", + "enum": [ + "VIEW_TYPE_UNSPECIFIED", + "MINIMAL", + "ROOTSPAN", + "COMPLETE" + ], + "description": "Type of data returned for traces in the list. Optional. Default is\n`MINIMAL`.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.readonly" + ], + "flatPath": "v1/projects/{projectId}/traces", + "path": "v1/projects/{projectId}/traces", + "id": "cloudtrace.projects.traces.list", + "description": "Returns of a list of traces that match the specified filter conditions." + }, + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "projectId", + "traceId" + ], + "response": { + "$ref": "Trace" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.readonly" + ], + "parameters": { + "projectId": { + "description": "ID of the Cloud project where the trace data is stored.", + "required": true, + "type": "string", + "location": "path" + }, + "traceId": { + "required": true, + "type": "string", + "location": "path", + "description": "ID of the trace to return." + } + }, + "flatPath": "v1/projects/{projectId}/traces/{traceId}", + "id": "cloudtrace.projects.traces.get", + "path": "v1/projects/{projectId}/traces/{traceId}", + "description": "Gets a single trace by its ID." + } + } + } + } + } }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "parameters": { + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" }, - "https://www.googleapis.com/auth/trace.append": { - "description": "Write Trace data for a project or application" + "bearer_token": { + "type": "string", + "location": "query", + "description": "OAuth bearer token." }, - "https://www.googleapis.com/auth/trace.readonly": { - "description": "Read Trace data for a project or application" - } - } - } - }, - "schemas": { - "ListTracesResponse": { - "id": "ListTracesResponse", - "type": "object", - "description": "The response message for the `ListTraces` method.", - "properties": { - "traces": { - "type": "array", - "description": "List of trace records returned.", - "items": { - "$ref": "Trace" - } + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" }, - "nextPageToken": { - "type": "string", - "description": "If defined, indicates that there are more traces that match the request and that this value should be passed to the next request to continue retrieving additional traces." - } - } - }, - "Trace": { - "id": "Trace", - "type": "object", - "description": "A trace describes how long it takes for an application to perform an operation. It consists of a set of spans, each of which represent a single timed event within the operation.", - "properties": { - "projectId": { - "type": "string", - "description": "Project ID of the Cloud project where the trace data is stored." + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" }, - "traceId": { - "type": "string", - "description": "Globally unique identifier for the trace. This identifier is a 128-bit numeric value formatted as a 32-byte hex string." + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "spans": { - "type": "array", - "description": "Collection of spans in the trace.", - "items": { - "$ref": "TraceSpan" - } - } - } - }, - "TraceSpan": { - "id": "TraceSpan", - "type": "object", - "description": "A span represents a single timed event within a trace. Spans can be nested and form a trace tree. Often, a trace contains a root span that describes the end-to-end latency of an operation and, optionally, one or more subspans for its suboperations. Spans do not need to be contiguous. There may be gaps between spans in a trace.", - "properties": { - "spanId": { - "type": "string", - "description": "Identifier for the span. This identifier must be unique within a trace.", - "format": "uint64" + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, - "kind": { - "type": "string", - "description": "Distinguishes between spans generated in a particular context. For example, two spans with the same name may be distinguished using `RPC_CLIENT` and `RPC_SERVER` to identify queueing latency associated with the span.", - "enum": [ - "SPAN_KIND_UNSPECIFIED", - "RPC_SERVER", - "RPC_CLIENT" - ] + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, - "name": { - "type": "string", - "description": "Name of the trace. The trace name is sanitized and displayed in the Stackdriver Trace tool in the {% dynamic print site_values.console_name %}. The name may be a method name or some other per-call site name. For the same executable and the same call point, a best practice is to use a consistent name, which makes it easier to correlate cross-trace spans." + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" }, - "startTime": { - "type": "string", - "description": "Start time of the span in nanoseconds from the UNIX epoch." + "$.xgafv": { + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query" }, - "endTime": { - "type": "string", - "description": "End time of the span in nanoseconds from the UNIX epoch." + "alt": { + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" }, - "parentSpanId": { - "type": "string", - "description": "ID of the parent span, if any. Optional.", - "format": "uint64" + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" }, - "labels": { - "type": "object", - "description": "Collection of labels associated with the span.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "Traces": { - "id": "Traces", - "type": "object", - "description": "List of new or updated traces.", - "properties": { - "traces": { - "type": "array", - "description": "List of traces.", - "items": { - "$ref": "Trace" - } + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" } - } }, - "Empty": { - "id": "Empty", - "type": "object", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`." - } - }, - "resources": { - "projects": { - "methods": { - "patchTraces": { - "id": "cloudtrace.projects.patchTraces", - "path": "v1/projects/{projectId}/traces", - "httpMethod": "PATCH", - "description": "Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.", - "parameters": { - "projectId": { - "type": "string", - "description": "ID of the Cloud project where the trace data is stored.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "projectId" - ], - "request": { - "$ref": "Traces" - }, - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/trace.append" - ] - } - }, - "resources": { - "traces": { - "methods": { - "list": { - "id": "cloudtrace.projects.traces.list", - "path": "v1/projects/{projectId}/traces", - "httpMethod": "GET", - "description": "Returns of a list of traces that match the specified filter conditions.", - "parameters": { - "projectId": { - "type": "string", - "description": "ID of the Cloud project where the trace data is stored.", - "required": true, - "location": "path" - }, - "view": { - "type": "string", - "description": "Type of data returned for traces in the list. Optional. Default is `MINIMAL`.", - "enum": [ - "VIEW_TYPE_UNSPECIFIED", - "MINIMAL", - "ROOTSPAN", - "COMPLETE" - ], - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "Maximum number of traces to return. If not specified or \u003c= 0, the implementation selects a reasonable value. The implementation may return fewer traces than the requested page size. Optional.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Token identifying the page of results to return. If provided, use the value of the `next_page_token` field from a previous request. Optional.", - "location": "query" - }, - "startTime": { - "type": "string", - "description": "End of the time interval (inclusive) during which the trace data was collected from the application.", - "location": "query" - }, - "endTime": { - "type": "string", - "description": "Start of the time interval (inclusive) during which the trace data was collected from the application.", - "location": "query" - }, - "filter": { - "type": "string", - "description": "An optional filter for the request.", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Field used to sort the returned traces. Optional. Can be one of the following: * `trace_id` * `name` (`name` field of root span in the trace) * `duration` (difference between `end_time` and `start_time` fields of the root span) * `start` (`start_time` field of the root span) Descending order can be specified by appending `desc` to the sort field (for example, `name desc`). Only one sort field is permitted.", - "location": "query" - } - }, - "parameterOrder": [ - "projectId" - ], - "response": { - "$ref": "ListTracesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/trace.readonly" - ] - }, - "get": { - "id": "cloudtrace.projects.traces.get", - "path": "v1/projects/{projectId}/traces/{traceId}", - "httpMethod": "GET", - "description": "Gets a single trace by its ID.", - "parameters": { - "projectId": { - "type": "string", - "description": "ID of the Cloud project where the trace data is stored.", - "required": true, - "location": "path" - }, - "traceId": { - "type": "string", - "description": "ID of the trace to return.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "projectId", - "traceId" - ], - "response": { - "$ref": "Trace" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/trace.readonly" - ] - } - } - } - } - } - } + "version": "v1", + "baseUrl": "https://cloudtrace.googleapis.com/", + "servicePath": "", + "kind": "discovery#restDescription", + "description": "Send and retrieve trace data from Stackdriver Trace. Data is generated and available by default for all App Engine applications. Data from other applications can be written to Stackdriver Trace for display, reporting, and analysis.\n", + "basePath": "" } diff --git a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go index 14c42d383..3d8313be0 100644 --- a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go +++ b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go @@ -1,6 +1,6 @@ -// Package cloudtrace provides access to the Google Cloud Trace API. +// Package cloudtrace provides access to the Stackdriver Trace API. // -// See https://cloud.google.com/tools/cloud-trace +// See https://cloud.google.com/trace // // Usage example: // @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Traces = NewProjectsTracesService(s) @@ -103,11 +108,17 @@ type ProjectsTracesService struct { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated empty messages in your APIs. A typical example is to use -// it as the request or the response type of an API method. For -// instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for `Empty` is -// empty JSON object `{}`. +// duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -117,8 +128,10 @@ type Empty struct { // ListTracesResponse: The response message for the `ListTraces` method. type ListTracesResponse struct { // NextPageToken: If defined, indicates that there are more traces that - // match the request and that this value should be passed to the next - // request to continue retrieving additional traces. + // match the request + // and that this value should be passed to the next request to + // continue + // retrieving additional traces. NextPageToken string `json:"nextPageToken,omitempty"` // Traces: List of trace records returned. @@ -152,8 +165,10 @@ func (s *ListTracesResponse) MarshalJSON() ([]byte, error) { } // Trace: A trace describes how long it takes for an application to -// perform an operation. It consists of a set of spans, each of which -// represent a single timed event within the operation. +// perform an +// operation. It consists of a set of spans, each of which represent a +// single +// timed event within the operation. type Trace struct { // ProjectId: Project ID of the Cloud project where the trace data is // stored. @@ -163,7 +178,8 @@ type Trace struct { Spans []*TraceSpan `json:"spans,omitempty"` // TraceId: Globally unique identifier for the trace. This identifier is - // a 128-bit numeric value formatted as a 32-byte hex string. + // a 128-bit + // numeric value formatted as a 32-byte hex string. TraceId string `json:"traceId,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -194,41 +210,58 @@ func (s *Trace) MarshalJSON() ([]byte, error) { } // TraceSpan: A span represents a single timed event within a trace. -// Spans can be nested and form a trace tree. Often, a trace contains a -// root span that describes the end-to-end latency of an operation and, -// optionally, one or more subspans for its suboperations. Spans do not -// need to be contiguous. There may be gaps between spans in a trace. +// Spans can be nested +// and form a trace tree. Often, a trace contains a root span that +// describes the +// end-to-end latency of an operation and, optionally, one or more +// subspans for +// its suboperations. Spans do not need to be contiguous. There may be +// gaps +// between spans in a trace. type TraceSpan struct { // EndTime: End time of the span in nanoseconds from the UNIX epoch. EndTime string `json:"endTime,omitempty"` // Kind: Distinguishes between spans generated in a particular context. - // For example, two spans with the same name may be distinguished using - // `RPC_CLIENT` and `RPC_SERVER` to identify queueing latency associated - // with the span. + // For example, + // two spans with the same name may be distinguished using + // `RPC_CLIENT` + // and `RPC_SERVER` to identify queueing latency associated with the + // span. // // Possible values: - // "SPAN_KIND_UNSPECIFIED" - // "RPC_SERVER" - // "RPC_CLIENT" + // "SPAN_KIND_UNSPECIFIED" - Unspecified. + // "RPC_SERVER" - Indicates that the span covers server-side handling + // of an RPC or other + // remote network request. + // "RPC_CLIENT" - Indicates that the span covers the client-side + // wrapper around an RPC or + // other remote request. Kind string `json:"kind,omitempty"` - // Labels: Collection of labels associated with the span. + // Labels: Collection of labels associated with the span. Label keys + // must be less than + // 128 bytes. Label values must be less than 16 kilobytes. Labels map[string]string `json:"labels,omitempty"` - // Name: Name of the trace. The trace name is sanitized and displayed in - // the Stackdriver Trace tool in the {% dynamic print - // site_values.console_name %}. The name may be a method name or some - // other per-call site name. For the same executable and the same call - // point, a best practice is to use a consistent name, which makes it - // easier to correlate cross-trace spans. + // Name: Name of the span. Must be less than 128 bytes. The span name is + // sanitized + // and displayed in the Stackdriver Trace tool in the + // {% dynamic print site_values.console_name %}. + // The name may be a method name or some other per-call site name. + // For the same executable and the same call point, a best practice + // is + // to use a consistent name, which makes it easier to + // correlate + // cross-trace spans. Name string `json:"name,omitempty"` // ParentSpanId: ID of the parent span, if any. Optional. ParentSpanId uint64 `json:"parentSpanId,omitempty,string"` - // SpanId: Identifier for the span. This identifier must be unique - // within a trace. + // SpanId: Identifier for the span. Must be a 64-bit integer other than + // 0 and + // unique within a trace. SpanId uint64 `json:"spanId,omitempty,string"` // StartTime: Start time of the span in nanoseconds from the UNIX epoch. @@ -297,11 +330,14 @@ type ProjectsPatchTracesCall struct { } // PatchTraces: Sends new traces to Stackdriver Trace or updates -// existing traces. If the ID of a trace that you send matches that of -// an existing trace, any fields in the existing trace and its spans are -// overwritten by the provided values, and any new fields provided are -// merged with the existing trace data. If the ID does not match, a new -// trace is created. +// existing traces. If the ID +// of a trace that you send matches that of an existing trace, any +// fields +// in the existing trace and its spans are overwritten by the provided +// values, +// and any new fields provided are merged with the existing trace data. +// If the +// ID does not match, a new trace is created. func (r *ProjectsService) PatchTraces(projectId string, traces *Traces) *ProjectsPatchTracesCall { c := &ProjectsPatchTracesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -340,6 +376,7 @@ func (c *ProjectsPatchTracesCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.traces) if err != nil { @@ -395,7 +432,8 @@ func (c *ProjectsPatchTracesCall) Do(opts ...googleapi.CallOption) (*Empty, erro } return ret, nil // { - // "description": "Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.", + // "description": "Sends new traces to Stackdriver Trace or updates existing traces. If the ID\nof a trace that you send matches that of an existing trace, any fields\nin the existing trace and its spans are overwritten by the provided values,\nand any new fields provided are merged with the existing trace data. If the\nID does not match, a new trace is created.", + // "flatPath": "v1/projects/{projectId}/traces", // "httpMethod": "PATCH", // "id": "cloudtrace.projects.patchTraces", // "parameterOrder": [ @@ -485,6 +523,7 @@ func (c *ProjectsTracesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -540,6 +579,7 @@ func (c *ProjectsTracesGetCall) Do(opts ...googleapi.CallOption) (*Trace, error) return ret, nil // { // "description": "Gets a single trace by its ID.", + // "flatPath": "v1/projects/{projectId}/traces/{traceId}", // "httpMethod": "GET", // "id": "cloudtrace.projects.traces.get", // "parameterOrder": [ @@ -592,8 +632,8 @@ func (r *ProjectsTracesService) List(projectId string) *ProjectsTracesListCall { } // EndTime sets the optional parameter "endTime": Start of the time -// interval (inclusive) during which the trace data was collected from -// the application. +// interval (inclusive) during which the trace data was +// collected from the application. func (c *ProjectsTracesListCall) EndTime(endTime string) *ProjectsTracesListCall { c.urlParams_.Set("endTime", endTime) return c @@ -607,44 +647,55 @@ func (c *ProjectsTracesListCall) Filter(filter string) *ProjectsTracesListCall { } // OrderBy sets the optional parameter "orderBy": Field used to sort the -// returned traces. Can be one of the following: * `trace_id` * `name` -// (`name` field of root span in the trace) * `duration` (difference -// between `end_time` and `start_time` fields of the root span) * -// `start` (`start_time` field of the root span) Descending order can be -// specified by appending `desc` to the sort field (for example, `name -// desc`). Only one sort field is permitted. +// returned traces. +// Can be one of the following: +// +// * `trace_id` +// * `name` (`name` field of root span in the trace) +// * `duration` (difference between `end_time` and `start_time` fields +// of +// the root span) +// * `start` (`start_time` field of the root span) +// +// Descending order can be specified by appending `desc` to the sort +// field +// (for example, `name desc`). +// +// Only one sort field is permitted. func (c *ProjectsTracesListCall) OrderBy(orderBy string) *ProjectsTracesListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": Maximum number of -// traces to return. If not specified or <= 0, the implementation -// selects a reasonable value. The implementation may return fewer -// traces than the requested page size. +// traces to return. If not specified or <= 0, the +// implementation selects a reasonable value. The implementation +// may +// return fewer traces than the requested page size. func (c *ProjectsTracesListCall) PageSize(pageSize int64) *ProjectsTracesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Token identifying -// the page of results to return. If provided, use the value of the -// `next_page_token` field from a previous request. +// the page of results to return. If provided, use the +// value of the `next_page_token` field from a previous request. func (c *ProjectsTracesListCall) PageToken(pageToken string) *ProjectsTracesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // StartTime sets the optional parameter "startTime": End of the time -// interval (inclusive) during which the trace data was collected from -// the application. +// interval (inclusive) during which the trace data was +// collected from the application. func (c *ProjectsTracesListCall) StartTime(startTime string) *ProjectsTracesListCall { c.urlParams_.Set("startTime", startTime) return c } // View sets the optional parameter "view": Type of data returned for -// traces in the list. Default is `MINIMAL`. +// traces in the list. Default is +// `MINIMAL`. // // Possible values: // "VIEW_TYPE_UNSPECIFIED" @@ -697,6 +748,7 @@ func (c *ProjectsTracesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -751,6 +803,7 @@ func (c *ProjectsTracesListCall) Do(opts ...googleapi.CallOption) (*ListTracesRe return ret, nil // { // "description": "Returns of a list of traces that match the specified filter conditions.", + // "flatPath": "v1/projects/{projectId}/traces", // "httpMethod": "GET", // "id": "cloudtrace.projects.traces.list", // "parameterOrder": [ @@ -758,7 +811,8 @@ func (c *ProjectsTracesListCall) Do(opts ...googleapi.CallOption) (*ListTracesRe // ], // "parameters": { // "endTime": { - // "description": "Start of the time interval (inclusive) during which the trace data was collected from the application.", + // "description": "Start of the time interval (inclusive) during which the trace data was\ncollected from the application.", + // "format": "google-datetime", // "location": "query", // "type": "string" // }, @@ -768,18 +822,18 @@ func (c *ProjectsTracesListCall) Do(opts ...googleapi.CallOption) (*ListTracesRe // "type": "string" // }, // "orderBy": { - // "description": "Field used to sort the returned traces. Optional. Can be one of the following: * `trace_id` * `name` (`name` field of root span in the trace) * `duration` (difference between `end_time` and `start_time` fields of the root span) * `start` (`start_time` field of the root span) Descending order can be specified by appending `desc` to the sort field (for example, `name desc`). Only one sort field is permitted.", + // "description": "Field used to sort the returned traces. Optional.\nCan be one of the following:\n\n* `trace_id`\n* `name` (`name` field of root span in the trace)\n* `duration` (difference between `end_time` and `start_time` fields of\n the root span)\n* `start` (`start_time` field of the root span)\n\nDescending order can be specified by appending `desc` to the sort field\n(for example, `name desc`).\n\nOnly one sort field is permitted.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Maximum number of traces to return. If not specified or \u003c= 0, the implementation selects a reasonable value. The implementation may return fewer traces than the requested page size. Optional.", + // "description": "Maximum number of traces to return. If not specified or \u003c= 0, the\nimplementation selects a reasonable value. The implementation may\nreturn fewer traces than the requested page size. Optional.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying the page of results to return. If provided, use the value of the `next_page_token` field from a previous request. Optional.", + // "description": "Token identifying the page of results to return. If provided, use the\nvalue of the `next_page_token` field from a previous request. Optional.", // "location": "query", // "type": "string" // }, @@ -790,12 +844,13 @@ func (c *ProjectsTracesListCall) Do(opts ...googleapi.CallOption) (*ListTracesRe // "type": "string" // }, // "startTime": { - // "description": "End of the time interval (inclusive) during which the trace data was collected from the application.", + // "description": "End of the time interval (inclusive) during which the trace data was\ncollected from the application.", + // "format": "google-datetime", // "location": "query", // "type": "string" // }, // "view": { - // "description": "Type of data returned for traces in the list. Optional. Default is `MINIMAL`.", + // "description": "Type of data returned for traces in the list. Optional. Default is\n`MINIMAL`.", // "enum": [ // "VIEW_TYPE_UNSPECIFIED", // "MINIMAL", diff --git a/vendor/google.golang.org/api/clouduseraccounts/v0.alpha/clouduseraccounts-gen.go b/vendor/google.golang.org/api/clouduseraccounts/v0.alpha/clouduseraccounts-gen.go index 69c6533e1..146811c38 100644 --- a/vendor/google.golang.org/api/clouduseraccounts/v0.alpha/clouduseraccounts-gen.go +++ b/vendor/google.golang.org/api/clouduseraccounts/v0.alpha/clouduseraccounts-gen.go @@ -73,9 +73,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only GlobalAccountsOperations *GlobalAccountsOperationsService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGlobalAccountsOperationsService(s *Service) *GlobalAccountsOperationsService { rs := &GlobalAccountsOperationsService{s: s} return rs @@ -1471,6 +1476,7 @@ func (c *GlobalAccountsOperationsDeleteCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") @@ -1590,6 +1596,7 @@ func (c *GlobalAccountsOperationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1811,6 +1818,7 @@ func (c *GlobalAccountsOperationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1990,6 +1998,7 @@ func (c *GroupsAddMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsaddmemberrequest) if err != nil { @@ -2134,6 +2143,7 @@ func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/groups/{groupName}") @@ -2281,6 +2291,7 @@ func (c *GroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2434,6 +2445,7 @@ func (c *GroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2576,6 +2588,7 @@ func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -2791,6 +2804,7 @@ func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2970,6 +2984,7 @@ func (c *GroupsRemoveMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsremovememberrequest) if err != nil { @@ -3117,6 +3132,7 @@ func (c *GroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { @@ -3266,6 +3282,7 @@ func (c *GroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { @@ -3423,6 +3440,7 @@ func (c *LinuxGetAuthorizedKeysViewCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/authorizedKeysView/{user}") @@ -3655,6 +3673,7 @@ func (c *LinuxGetLinuxAccountViewsCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/linuxAccountViews") @@ -3828,6 +3847,7 @@ func (c *UsersAddPublicKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publickey) if err != nil { @@ -3972,6 +3992,7 @@ func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}") @@ -4119,6 +4140,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4272,6 +4294,7 @@ func (c *UsersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4414,6 +4437,7 @@ func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -4629,6 +4653,7 @@ func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4807,6 +4832,7 @@ func (c *UsersRemovePublicKeyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}/removePublicKey") @@ -4954,6 +4980,7 @@ func (c *UsersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { @@ -5103,6 +5130,7 @@ func (c *UsersTestIamPermissionsCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { diff --git a/vendor/google.golang.org/api/clouduseraccounts/v0.beta/clouduseraccounts-gen.go b/vendor/google.golang.org/api/clouduseraccounts/v0.beta/clouduseraccounts-gen.go index 7a9896e73..05b52c288 100644 --- a/vendor/google.golang.org/api/clouduseraccounts/v0.beta/clouduseraccounts-gen.go +++ b/vendor/google.golang.org/api/clouduseraccounts/v0.beta/clouduseraccounts-gen.go @@ -73,9 +73,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only GlobalAccountsOperations *GlobalAccountsOperationsService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGlobalAccountsOperationsService(s *Service) *GlobalAccountsOperationsService { rs := &GlobalAccountsOperationsService{s: s} return rs @@ -1045,6 +1050,7 @@ func (c *GlobalAccountsOperationsDeleteCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") @@ -1164,6 +1170,7 @@ func (c *GlobalAccountsOperationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1385,6 +1392,7 @@ func (c *GlobalAccountsOperationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1564,6 +1572,7 @@ func (c *GroupsAddMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsaddmemberrequest) if err != nil { @@ -1708,6 +1717,7 @@ func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/groups/{groupName}") @@ -1855,6 +1865,7 @@ func (c *GroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1997,6 +2008,7 @@ func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -2212,6 +2224,7 @@ func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2391,6 +2404,7 @@ func (c *GroupsRemoveMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsremovememberrequest) if err != nil { @@ -2546,6 +2560,7 @@ func (c *LinuxGetAuthorizedKeysViewCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/authorizedKeysView/{user}") @@ -2778,6 +2793,7 @@ func (c *LinuxGetLinuxAccountViewsCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/linuxAccountViews") @@ -2951,6 +2967,7 @@ func (c *UsersAddPublicKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publickey) if err != nil { @@ -3095,6 +3112,7 @@ func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}") @@ -3242,6 +3260,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3384,6 +3403,7 @@ func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -3599,6 +3619,7 @@ func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3777,6 +3798,7 @@ func (c *UsersRemovePublicKeyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}/removePublicKey") diff --git a/vendor/google.golang.org/api/clouduseraccounts/vm_alpha/clouduseraccounts-gen.go b/vendor/google.golang.org/api/clouduseraccounts/vm_alpha/clouduseraccounts-gen.go index ccd9a8d04..929d7798a 100644 --- a/vendor/google.golang.org/api/clouduseraccounts/vm_alpha/clouduseraccounts-gen.go +++ b/vendor/google.golang.org/api/clouduseraccounts/vm_alpha/clouduseraccounts-gen.go @@ -73,9 +73,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only GlobalAccountsOperations *GlobalAccountsOperationsService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGlobalAccountsOperationsService(s *Service) *GlobalAccountsOperationsService { rs := &GlobalAccountsOperationsService{s: s} return rs @@ -1471,6 +1476,7 @@ func (c *GlobalAccountsOperationsDeleteCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") @@ -1590,6 +1596,7 @@ func (c *GlobalAccountsOperationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1811,6 +1818,7 @@ func (c *GlobalAccountsOperationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1990,6 +1998,7 @@ func (c *GroupsAddMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsaddmemberrequest) if err != nil { @@ -2134,6 +2143,7 @@ func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/groups/{groupName}") @@ -2281,6 +2291,7 @@ func (c *GroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2434,6 +2445,7 @@ func (c *GroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2576,6 +2588,7 @@ func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -2791,6 +2804,7 @@ func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2970,6 +2984,7 @@ func (c *GroupsRemoveMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsremovememberrequest) if err != nil { @@ -3117,6 +3132,7 @@ func (c *GroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { @@ -3266,6 +3282,7 @@ func (c *GroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { @@ -3423,6 +3440,7 @@ func (c *LinuxGetAuthorizedKeysViewCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/authorizedKeysView/{user}") @@ -3655,6 +3673,7 @@ func (c *LinuxGetLinuxAccountViewsCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/linuxAccountViews") @@ -3828,6 +3847,7 @@ func (c *UsersAddPublicKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publickey) if err != nil { @@ -3972,6 +3992,7 @@ func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}") @@ -4119,6 +4140,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4272,6 +4294,7 @@ func (c *UsersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4414,6 +4437,7 @@ func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -4629,6 +4653,7 @@ func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4807,6 +4832,7 @@ func (c *UsersRemovePublicKeyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}/removePublicKey") @@ -4954,6 +4980,7 @@ func (c *UsersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { @@ -5103,6 +5130,7 @@ func (c *UsersTestIamPermissionsCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { diff --git a/vendor/google.golang.org/api/clouduseraccounts/vm_beta/clouduseraccounts-gen.go b/vendor/google.golang.org/api/clouduseraccounts/vm_beta/clouduseraccounts-gen.go index 5f16d8981..53ef49a4a 100644 --- a/vendor/google.golang.org/api/clouduseraccounts/vm_beta/clouduseraccounts-gen.go +++ b/vendor/google.golang.org/api/clouduseraccounts/vm_beta/clouduseraccounts-gen.go @@ -73,9 +73,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only GlobalAccountsOperations *GlobalAccountsOperationsService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGlobalAccountsOperationsService(s *Service) *GlobalAccountsOperationsService { rs := &GlobalAccountsOperationsService{s: s} return rs @@ -1045,6 +1050,7 @@ func (c *GlobalAccountsOperationsDeleteCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") @@ -1164,6 +1170,7 @@ func (c *GlobalAccountsOperationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1385,6 +1392,7 @@ func (c *GlobalAccountsOperationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1564,6 +1572,7 @@ func (c *GroupsAddMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsaddmemberrequest) if err != nil { @@ -1708,6 +1717,7 @@ func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/groups/{groupName}") @@ -1855,6 +1865,7 @@ func (c *GroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1997,6 +2008,7 @@ func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -2212,6 +2224,7 @@ func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2391,6 +2404,7 @@ func (c *GroupsRemoveMemberCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupsremovememberrequest) if err != nil { @@ -2546,6 +2560,7 @@ func (c *LinuxGetAuthorizedKeysViewCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/authorizedKeysView/{user}") @@ -2778,6 +2793,7 @@ func (c *LinuxGetLinuxAccountViewsCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/linuxAccountViews") @@ -2951,6 +2967,7 @@ func (c *UsersAddPublicKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publickey) if err != nil { @@ -3095,6 +3112,7 @@ func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}") @@ -3242,6 +3260,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3384,6 +3403,7 @@ func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -3599,6 +3619,7 @@ func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3777,6 +3798,7 @@ func (c *UsersRemovePublicKeyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/users/{user}/removePublicKey") diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index 5b0e3cad5..327416ca4 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/2elvVtUis8Edx7anV3MuaVUCiBY\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/x9AVZb0DFub7qYBAESo32Be1F4A\"", "discoveryVersion": "v1", "id": "compute:alpha", "name": "compute", "version": "alpha", - "revision": "20170118", + "revision": "20170124", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -107,6 +107,197 @@ } } }, + "AcceleratorType": { + "id": "AcceleratorType", + "type": "object", + "description": "An Accelerator Type resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this accelerator type." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional textual description of the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#acceleratorType for accelerator types.", + "default": "compute#acceleratorType" + }, + "maximumCardsPerInstance": { + "type": "integer", + "description": "[Output Only] Maximum accelerator cards allowed per instance.", + "format": "int32" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined fully-qualified URL for this resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] The name of the zone where the accelerator type resides, such as us-central1-a." + } + } + }, + "AcceleratorTypeAggregatedList": { + "id": "AcceleratorTypeAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped accelerator type lists.", + "additionalProperties": { + "$ref": "AcceleratorTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of accelerator types." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#acceleratorTypeAggregatedList for aggregated lists of accelerator types.", + "default": "compute#acceleratorTypeAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AcceleratorTypeList": { + "id": "AcceleratorTypeList", + "type": "object", + "description": "Contains a list of accelerator types.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of AcceleratorType resources.", + "items": { + "$ref": "AcceleratorType" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#acceleratorTypeList for lists of accelerator types.", + "default": "compute#acceleratorTypeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] A token used to continue a truncated list request." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AcceleratorTypesScopedList": { + "id": "AcceleratorTypesScopedList", + "type": "object", + "properties": { + "acceleratorTypes": { + "type": "array", + "description": "[Output Only] List of accelerator types contained in this scope.", + "items": { + "$ref": "AcceleratorType" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that appears when the accelerator types list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, "AccessConfig": { "id": "AccessConfig", "type": "object", @@ -127,10 +318,10 @@ }, "networkTier": { "type": "string", - "description": "This signifies the networking tier used for configuring this access configuration and can only take the following values: CLOUD_NETWORK_PREMIUM , CLOUD_NETWORK_SELECT. If this field is not specified, it is assumed to be CLOUD_NETWORK_PREMIUM.", + "description": "This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM , SELECT. If this field is not specified, it is assumed to be PREMIUM.", "enum": [ - "CLOUD_NETWORK_PREMIUM", - "CLOUD_NETWORK_SELECT" + "PREMIUM", + "SELECT" ], "enumDescriptions": [ "", @@ -188,6 +379,20 @@ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, + "ipVersion": { + "type": "string", + "description": "The IP Version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.", + "enum": [ + "IPV4", + "IPV6", + "UNSPECIFIED_VERSION" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "kind": { "type": "string", "description": "[Output Only] Type of the resource. Always compute#address for addresses.", @@ -217,10 +422,10 @@ }, "networkTier": { "type": "string", - "description": "This signifies the networking tier used for configuring this Address and can only take the following values: CLOUD_NETWORK_PREMIUM , CLOUD_NETWORK_SELECT. If this field is not specified, it is assumed to be CLOUD_NETWORK_PREMIUM.", + "description": "This signifies the networking tier used for configuring this Address and can only take the following values: PREMIUM , SELECT. If this field is not specified, it is assumed to be PREMIUM.", "enum": [ - "CLOUD_NETWORK_PREMIUM", - "CLOUD_NETWORK_SELECT" + "PREMIUM", + "SELECT" ], "enumDescriptions": [ "", @@ -350,6 +555,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -370,6 +576,7 @@ "", "", "", + "", "" ] }, @@ -550,18 +757,18 @@ "AuditConfig": { "id": "AuditConfig", "type": "object", - "description": "Provides the configuration for non-admin_activity logging for a service. Controls exemptions and specific log sub-types.", + "description": "Specifies the audit configuration for a service. It consists of which permission types are logged, and what identities, if any, are exempted from logging. An AuditConifg must have one or more AuditLogConfigs.", "properties": { "auditLogConfigs": { "type": "array", - "description": "The configuration for each type of logging", + "description": "The configuration for logging of each type of permission.", "items": { "$ref": "AuditLogConfig" } }, "exemptedMembers": { "type": "array", - "description": "Specifies the identities that are exempted from \"data access\" audit logging for the `service` specified above. Follows the same format of Binding.members.", + "description": "Specifies the identities that are exempted from \"data access\" audit logging for the `service` specified above. Follows the same format of Binding.members. This field is deprecated in favor of per-permission-type exemptions.", "items": { "type": "string" } @@ -575,11 +782,11 @@ "AuditLogConfig": { "id": "AuditLogConfig", "type": "object", - "description": "Provides the configuration for a sub-type of logging.", + "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting foo@gmail.com from DATA_READ logging.", "properties": { "exemptedMembers": { "type": "array", - "description": "Specifies the identities that are exempted from this type of logging Follows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of [Binding.members][].", "items": { "type": "string" } @@ -818,6 +1025,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -838,6 +1046,7 @@ "", "", "", + "", "" ] }, @@ -1133,20 +1342,6 @@ } } }, - "BackendSSLPolicy": { - "id": "BackendSSLPolicy", - "type": "object", - "description": "Message containing backend SSL policies.", - "properties": { - "pinnedPeerCertificates": { - "type": "array", - "description": "List of PEM-encoded peer certificates, from which the public keys are extracted for authenticating the backend service.", - "items": { - "type": "string" - } - } - } - }, "BackendService": { "id": "BackendService", "type": "object", @@ -1157,10 +1352,6 @@ "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", "format": "int32" }, - "backendSslPolicy": { - "$ref": "BackendSSLPolicy", - "description": "Backend SSL policies to enforce." - }, "backends": { "type": "array", "description": "The list of backends that serve this BackendService.", @@ -1441,6 +1632,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -1461,6 +1653,7 @@ "", "", "", + "", "" ] }, @@ -1739,6 +1932,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -1759,6 +1953,7 @@ "", "", "", + "", "" ] }, @@ -2140,11 +2335,11 @@ "properties": { "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] Unique identifier for the resource; defined by the server." }, "items": { "type": "array", - "description": "[Output Only] A list of persistent disks.", + "description": "A list of Disk resources.", "items": { "$ref": "Disk" } @@ -2156,7 +2351,7 @@ }, "nextPageToken": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] A token used to continue a truncated list request." }, "selfLink": { "type": "string", @@ -2323,6 +2518,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -2343,6 +2539,7 @@ "", "", "", + "", "" ] }, @@ -2414,6 +2611,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -2434,6 +2632,7 @@ "", "", "", + "", "" ] }, @@ -2575,6 +2774,13 @@ "type": "string" } }, + "sourceServiceAccounts": { + "type": "array", + "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", + "items": { + "type": "string" + } + }, "sourceTags": { "type": "array", "description": "If source tags are specified, the firewall will apply only to traffic with source IP that belongs to a tag listed in source tags. Source tags cannot be used to control traffic to an instance's external IP address. Because tags are associated with an instance, not an IP address. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", @@ -2658,11 +2864,11 @@ "properties": { "IPAddress": { "type": "string", - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IP from the same scope (global or regional) will be assigned.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule. Only IPv4 is supported." + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP. For regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IP from the same scope (global or regional) will be assigned.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule. Only IPv4 is supported." }, "IPProtocol": { "type": "string", - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL\u003c/code, only TCP and UDP are valid.", + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL, only TCP and UDP are valid.", "enum": [ "AH", "ESP", @@ -2697,6 +2903,20 @@ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, + "ipVersion": { + "type": "string", + "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6. This can only be specified for a global forwarding rule.", + "enum": [ + "IPV4", + "IPV6", + "UNSPECIFIED_VERSION" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "kind": { "type": "string", "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", @@ -2716,7 +2936,7 @@ }, "loadBalancingScheme": { "type": "string", - "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL EXTERNAL The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)", + "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL, EXTERNAL The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)", "enum": [ "EXTERNAL", "INTERNAL", @@ -2739,10 +2959,10 @@ }, "networkTier": { "type": "string", - "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: CLOUD_NETWORK_PREMIUM , CLOUD_NETWORK_SELECT. If this field is not specified, it is assumed to be CLOUD_NETWORK_PREMIUM.", + "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: PREMIUM , SELECT. If this field is not specified, it is assumed to be PREMIUM.", "enum": [ - "CLOUD_NETWORK_PREMIUM", - "CLOUD_NETWORK_SELECT" + "PREMIUM", + "SELECT" ], "enumDescriptions": [ "", @@ -2881,6 +3101,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -2901,6 +3122,7 @@ "", "", "", + "", "" ] }, @@ -3267,7 +3489,7 @@ }, "hostType": { "type": "string", - "description": "Full or partial URL of the host type resource to use for this host, in the format: zones/zone/hostTypes/host-type. This is provided by the client when the host is created. For example, the following is a valid partial url to a predefined host type:\n\nzones/us-central1-f/hostTypes/n1-host-64-416" + "description": "Full or partial URL of the host type resource to use for this host, in the format: zones/zone/hostTypes/host-type. This is provided by the client when the host is created. For example, the following is a valid partial url to a predefined host type:\n\nzones/us-central1-b/hostTypes/n1-host-64-416" }, "id": { "type": "string", @@ -3406,58 +3628,265 @@ } } }, - "HostsScopedList": { - "id": "HostsScopedList", + "HostType": { + "id": "HostType", "type": "object", + "description": "A Host Type resource.", "properties": { - "hosts": { - "type": "array", - "description": "[Output Only] List of hosts contained in this scope.", - "items": { - "$ref": "Host" - } + "cpuPlatform": { + "type": "string", + "description": "[Output Only] The CPU platform used by this host type." }, - "warning": { - "type": "object", - "description": "[Output Only] An informational warning that appears when the host list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this host type." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional textual description of the resource." + }, + "guestCpus": { + "type": "integer", + "description": "[Output Only] The number of virtual CPUs that are available to the host type.", + "format": "int32" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#hostType for host types.", + "default": "compute#hostType" + }, + "localSsdGb": { + "type": "integer", + "description": "[Output Only] Local SSD available to the host type, defined in GB.", + "format": "int32" + }, + "memoryMb": { + "type": "integer", + "description": "[Output Only] The amount of physical memory available to the host type, defined in MB.", + "format": "int32" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] The name of the zone where the host type resides, such as us-central1-a." + } + } + }, + "HostTypeAggregatedList": { + "id": "HostTypeAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped host type lists.", + "additionalProperties": { + "$ref": "HostTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of host types." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource.Always compute#hostTypeAggregatedList for aggregated lists of host types.", + "default": "compute#hostTypeAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "HostTypeList": { + "id": "HostTypeList", + "type": "object", + "description": "Contains a list of host types.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Host Type resources.", + "items": { + "$ref": "HostType" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource.Always compute#hostTypeList for lists of host types.", + "default": "compute#hostTypeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "HostTypesScopedList": { + "id": "HostTypesScopedList", + "type": "object", + "properties": { + "hostTypes": { + "type": "array", + "description": "[Output Only] List of host types contained in this scope.", + "items": { + "$ref": "HostType" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that appears when the host types list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "HostsScopedList": { + "id": "HostsScopedList", + "type": "object", + "properties": { + "hosts": { + "type": "array", + "description": "[Output Only] List of hosts contained in this scope.", + "items": { + "$ref": "Host" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that appears when the host list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3944,6 +4373,10 @@ "$ref": "Metadata", "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." }, + "minCpuPlatform": { + "type": "string", + "description": "Minimum cpu/platform to be used by this instance. We may schedule on the specified or later cpu/platform." + }, "name": { "type": "string", "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." @@ -4519,9 +4952,13 @@ "instanceTemplate": { "type": "string" }, + "name": { + "type": "string", + "description": "Name of the version. Unique among all versions in the scope of this managed instance group." + }, "tag": { "type": "string", - "description": "Tag describing the version. Used to trigger rollout of a target version even if instance_template remains unchanged." + "description": "Tag describing the version. Used to trigger rollout of a target version even if instance_template remains unchanged. Deprecated in favor of 'name'." }, "targetSize": { "$ref": "FixedOrPercent", @@ -4565,6 +5002,10 @@ "items": { "$ref": "ManagedInstance" } + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." } } }, @@ -4628,6 +5069,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -4648,6 +5090,7 @@ "", "", "", + "", "" ] }, @@ -4822,6 +5265,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -4842,6 +5286,7 @@ "", "", "", + "", "" ] }, @@ -5158,6 +5603,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -5178,6 +5624,7 @@ "", "", "", + "", "" ] }, @@ -5224,6 +5671,18 @@ } } }, + "InstancesSetMachineResourcesRequest": { + "id": "InstancesSetMachineResourcesRequest", + "type": "object", + "properties": { + "guestAccelerators": { + "type": "array", + "items": { + "$ref": "AcceleratorConfig" + } + } + } + }, "InstancesSetMachineTypeRequest": { "id": "InstancesSetMachineTypeRequest", "type": "object", @@ -5493,6 +5952,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -5513,6 +5973,7 @@ "", "", "", + "", "" ] }, @@ -5625,6 +6086,10 @@ "tag": { "type": "string", "description": "[Output Only] Tag describing the version." + }, + "version": { + "$ref": "ManagedInstanceVersion", + "description": "[Output Only] Intended version of this instance." } } }, @@ -5661,6 +6126,20 @@ } } }, + "ManagedInstanceVersion": { + "id": "ManagedInstanceVersion", + "type": "object", + "properties": { + "instanceTemplate": { + "type": "string", + "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }." + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the version." + } + } + }, "Metadata": { "id": "Metadata", "type": "object", @@ -5845,12 +6324,7 @@ }, "network": { "type": "string", - "description": "URL of the network resource for this instance. This is required for creating an instance but optional when creating a firewall rule. If not specified when creating a firewall rule, the default network is used:\n\nglobal/networks/default \n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default", - "annotations": { - "required": [ - "compute.instances.insert" - ] - } + "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default" }, "networkIP": { "type": "string", @@ -6106,6 +6580,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -6126,6 +6601,7 @@ "", "", "", + "", "" ] }, @@ -6253,6 +6729,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -6273,6 +6750,7 @@ "", "", "", + "", "" ] }, @@ -6352,7 +6830,7 @@ "properties": { "auditConfigs": { "type": "array", - "description": "Specifies audit logging configs for \"data access\". \"data access\": generally refers to data reads/writes and admin reads. \"admin activity\": generally refers to admin writes.\n\nNote: `AuditConfig` doesn't apply to \"admin activity\", which always enables audit logging.", + "description": "Specifies cloud audit logging configuration for this policy.", "items": { "$ref": "AuditConfig" } @@ -6538,6 +7016,7 @@ "BACKEND_BUCKETS", "BACKEND_SERVICES", "CPUS", + "CPUS_ALL_REGIONS", "DISKS_TOTAL_GB", "FIREWALLS", "FORWARDING_RULES", @@ -6568,7 +7047,6 @@ "TARGET_SSL_PROXIES", "TARGET_TCP_PROXIES", "TARGET_VPN_GATEWAYS", - "TOTAL_CPUS", "URL_MAPS", "VPN_TUNNELS" ], @@ -6852,6 +7330,10 @@ "items": { "$ref": "ManagedInstance" } + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." } } }, @@ -7069,7 +7551,7 @@ "Route": { "id": "Route", "type": "object", - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, a instance gateway or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", "properties": { "creationTimestamp": { "type": "string", @@ -7190,6 +7672,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -7210,6 +7693,7 @@ "", "", "", + "", "" ] }, @@ -7477,6 +7961,13 @@ "$ref": "Route" } }, + "bestRoutesForRouter": { + "type": "array", + "description": "Best routes learned by this router.", + "items": { + "$ref": "Route" + } + }, "bgpPeerStatus": { "type": "array", "items": { @@ -7605,6 +8096,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -7625,6 +8117,7 @@ "", "", "", + "", "" ] }, @@ -8244,6 +8737,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -8264,6 +8758,7 @@ "", "", "", + "", "" ] }, @@ -8663,6 +9158,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -8683,6 +9179,7 @@ "", "", "", + "", "" ] }, @@ -8953,6 +9450,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -8973,6 +9471,7 @@ "", "", "", + "", "" ] }, @@ -9428,6 +9927,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -9448,6 +9948,7 @@ "", "", "", + "", "" ] }, @@ -9959,6 +10460,7 @@ "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", "UNREACHABLE" @@ -9979,6 +10481,7 @@ "", "", "", + "", "" ] }, @@ -10065,6 +10568,13 @@ "type": "object", "description": "A Zone resource.", "properties": { + "availableCpuPlatforms": { + "type": "array", + "description": "[Output Only] Available cpu/platform selections for the zone.", + "items": { + "type": "string" + } + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -10106,64 +10616,216 @@ "DOWN", "UP" ], - "enumDescriptions": [ - "", - "" + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "ZoneList": { + "id": "ZoneList", + "type": "object", + "description": "Contains a list of zone resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Zone resources.", + "items": { + "$ref": "Zone" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#zoneList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ZoneSetLabelsRequest": { + "id": "ZoneSetLabelsRequest", + "type": "object", + "properties": { + "labelFingerprint": { + "type": "string", + "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "The labels to set for this resource.", + "additionalProperties": { + "type": "string" + } + } + } + } + }, + "resources": { + "acceleratorTypes": { + "methods": { + "aggregatedList": { + "id": "compute.acceleratorTypes.aggregatedList", + "path": "{project}/aggregated/acceleratorTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of accelerator types.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AcceleratorTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.acceleratorTypes.get", + "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + "httpMethod": "GET", + "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", + "parameters": { + "acceleratorType": { + "type": "string", + "description": "Name of the accelerator type to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "acceleratorType" + ], + "response": { + "$ref": "AcceleratorType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "ZoneList": { - "id": "ZoneList", - "type": "object", - "description": "Contains a list of zone resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Zone resources.", - "items": { - "$ref": "Zone" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#zoneList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "list": { + "id": "compute.acceleratorTypes.list", + "path": "{project}/zones/{zone}/acceleratorTypes", + "httpMethod": "GET", + "description": "Retrieves a list of accelerator types available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "AcceleratorTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, - "ZoneSetLabelsRequest": { - "id": "ZoneSetLabelsRequest", - "type": "object", - "properties": { - "labelFingerprint": { - "type": "string", - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "The labels to set for this resource.", - "additionalProperties": { - "type": "string" - } - } - } - } - }, - "resources": { "addresses": { "methods": { "aggregatedList": { @@ -10183,7 +10845,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -10352,7 +11013,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -10393,6 +11053,50 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setLabels": { + "id": "compute.addresses.setLabels", + "path": "{project}/regions/{region}/addresses/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling or Tagging Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.addresses.testIamPermissions", "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", @@ -10459,7 +11163,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -10628,7 +11331,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -10950,7 +11652,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -11150,7 +11851,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -11331,7 +12031,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -11535,7 +12234,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -11705,7 +12403,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -11857,7 +12554,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -11949,7 +12645,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -12011,7 +12706,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -12232,7 +12926,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -12522,7 +13215,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -12685,7 +13377,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -12854,7 +13545,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -12895,6 +13585,50 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setLabels": { + "id": "compute.forwardingRules.setLabels", + "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling or Tagging Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setTarget": { "id": "compute.forwardingRules.setTarget", "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", @@ -13100,7 +13834,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -13133,6 +13866,42 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setLabels": { + "id": "compute.globalAddresses.setLabels", + "path": "{project}/global/addresses/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling or Tagging Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.globalAddresses.testIamPermissions", "path": "{project}/global/addresses/{resource}/testIamPermissions", @@ -13286,7 +14055,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -13308,15 +14076,51 @@ } }, "parameterOrder": [ - "project" + "project" + ], + "response": { + "$ref": "ForwardingRuleList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "id": "compute.globalForwardingRules.setLabels", + "path": "{project}/global/forwardingRules/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling or Tagging Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" ], + "request": { + "$ref": "GlobalSetLabelsRequest" + }, "response": { - "$ref": "ForwardingRuleList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "setTarget": { @@ -13413,7 +14217,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -13527,7 +14330,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -13676,7 +14478,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -13820,6 +14621,158 @@ } } }, + "hostTypes": { + "methods": { + "aggregatedList": { + "id": "compute.hostTypes.aggregatedList", + "path": "{project}/aggregated/hostTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of host types.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "HostTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.hostTypes.get", + "path": "{project}/zones/{zone}/hostTypes/{hostType}", + "httpMethod": "GET", + "description": "Returns the specified host type. Get a list of available host types by making a list() request.", + "parameters": { + "hostType": { + "type": "string", + "description": "Name of the host type to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "hostType" + ], + "response": { + "$ref": "HostType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.hostTypes.list", + "path": "{project}/zones/{zone}/hostTypes", + "httpMethod": "GET", + "description": "Retrieves a list of host types available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "HostTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "hosts": { "methods": { "aggregatedList": { @@ -13839,7 +14792,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -14050,7 +15002,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -14297,7 +15248,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -14338,7 +15288,7 @@ "parameters": { "httpHealthCheck": { "type": "string", - "description": "Name of the HttpHealthCheck resource to update.", + "description": "Name of the HttpHealthCheck resource to patch.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -14363,7 +15313,8 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "testIamPermissions": { @@ -14555,7 +15506,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -14844,6 +15794,11 @@ "httpMethod": "POST", "description": "Creates an image in the specified project using the data included in the request.", "parameters": { + "forceCreation": { + "type": "boolean", + "description": "Force image creation if true.", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -14886,7 +15841,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -15055,7 +16009,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -15261,7 +16214,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -15322,7 +16274,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "order_by": { @@ -15365,7 +16316,7 @@ "id": "compute.instanceGroupManagers.patch", "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", "httpMethod": "PATCH", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports patch semantics.", + "description": "Updates a managed instance group using the information that you specify in the request. The field statefulPolicy is updated using PATCH semantics. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports patch semantics.", "parameters": { "instanceGroupManager": { "type": "string", @@ -15709,7 +16660,7 @@ "id": "compute.instanceGroupManagers.update", "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", "httpMethod": "PUT", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", + "description": "Updates a managed instance group using the information that you specify in the request. The field statefulPolicy is updated using PATCH semantics. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", "parameters": { "instanceGroupManager": { "type": "string", @@ -15810,7 +16761,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -15974,7 +16924,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -16037,7 +16986,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -16326,7 +17274,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -16468,7 +17415,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -16888,7 +17834,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -17115,6 +18060,50 @@ "https://www.googleapis.com/auth/compute" ] }, + "setMachineResources": { + "id": "compute.instances.setMachineResources", + "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + "httpMethod": "POST", + "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesSetMachineResourcesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setMachineType": { "id": "compute.instances.setMachineType", "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", @@ -17667,7 +18656,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -17759,7 +18747,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -17952,7 +18939,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -18268,7 +19254,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "order_by": { @@ -18305,6 +19290,25 @@ "httpMethod": "POST", "description": "List all XPN host projects visible to the user in an organization.", "parameters": { + "filter": { + "type": "string", + "location": "query" + }, + "maxResults": { + "type": "integer", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "order_by": { + "type": "string", + "location": "query" + }, + "pageToken": { + "type": "string", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -18611,7 +19615,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -18967,7 +19970,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -19204,7 +20206,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -19437,7 +20438,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -19830,7 +20830,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -19891,7 +20890,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "order_by": { @@ -20336,7 +21334,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -20399,7 +21396,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -20631,7 +21627,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -20727,7 +21722,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -20781,7 +21775,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -20992,7 +21985,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -21327,7 +22319,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -21485,7 +22476,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -21707,7 +22697,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -21798,7 +22787,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -22053,7 +23041,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -22344,7 +23331,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -22566,7 +23552,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -22729,7 +23714,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -22898,7 +23882,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -23093,7 +24076,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -23307,7 +24289,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -23647,7 +24628,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -23941,7 +24921,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -24104,7 +25083,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -24273,7 +25251,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -24511,7 +25488,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -24710,7 +25686,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -24879,7 +25854,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -24920,6 +25894,50 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setLabels": { + "id": "compute.vpnTunnels.setLabels", + "path": "{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling or Tagging Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.vpnTunnels.testIamPermissions", "path": "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions", @@ -25066,7 +26084,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -25162,7 +26179,6 @@ "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 595ab8c82..10c85e11c 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -71,6 +71,7 @@ func New(client *http.Client) (*Service, error) { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} + s.AcceleratorTypes = NewAcceleratorTypesService(s) s.Addresses = NewAddressesService(s) s.Autoscalers = NewAutoscalersService(s) s.BackendBuckets = NewBackendBucketsService(s) @@ -85,6 +86,7 @@ func New(client *http.Client) (*Service, error) { s.GlobalForwardingRules = NewGlobalForwardingRulesService(s) s.GlobalOperations = NewGlobalOperationsService(s) s.HealthChecks = NewHealthChecksService(s) + s.HostTypes = NewHostTypesService(s) s.Hosts = NewHostsService(s) s.HttpHealthChecks = NewHttpHealthChecksService(s) s.HttpsHealthChecks = NewHttpsHealthChecksService(s) @@ -125,9 +127,12 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + AcceleratorTypes *AcceleratorTypesService Addresses *AddressesService @@ -157,6 +162,8 @@ type Service struct { HealthChecks *HealthChecksService + HostTypes *HostTypesService + Hosts *HostsService HttpHealthChecks *HttpHealthChecksService @@ -237,6 +244,19 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewAcceleratorTypesService(s *Service) *AcceleratorTypesService { + rs := &AcceleratorTypesService{s: s} + return rs +} + +type AcceleratorTypesService struct { + s *Service +} + func NewAddressesService(s *Service) *AddressesService { rs := &AddressesService{s: s} return rs @@ -363,6 +383,15 @@ type HealthChecksService struct { s *Service } +func NewHostTypesService(s *Service) *HostTypesService { + rs := &HostTypesService{s: s} + return rs +} + +type HostTypesService struct { + s *Service +} + func NewHostsService(s *Service) *HostsService { rs := &HostsService{s: s} return rs @@ -722,6 +751,298 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AcceleratorType: An Accelerator Type resource. +type AcceleratorType struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // accelerator type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] An optional textual description of the + // resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The type of the resource. Always + // compute#acceleratorType for accelerator types. + Kind string `json:"kind,omitempty"` + + // MaximumCardsPerInstance: [Output Only] Maximum accelerator cards + // allowed per instance. + MaximumCardsPerInstance int64 `json:"maximumCardsPerInstance,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] The name of the zone where the accelerator type + // resides, such as us-central1-a. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorType) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorType + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped accelerator type lists. + Items map[string]AcceleratorTypesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#acceleratorTypeAggregatedList for aggregated lists of + // accelerator types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AcceleratorTypeList: Contains a list of accelerator types. +type AcceleratorTypeList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of AcceleratorType resources. + Items []*AcceleratorType `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#acceleratorTypeList for lists of accelerator types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypesScopedList struct { + // AcceleratorTypes: [Output Only] List of accelerator types contained + // in this scope. + AcceleratorTypes []*AcceleratorType `json:"acceleratorTypes,omitempty"` + + // Warning: [Output Only] An informational warning that appears when the + // accelerator types list is empty. + Warning *AcceleratorTypesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AcceleratorTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AcceleratorTypes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypesScopedList) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AcceleratorTypesScopedListWarning: [Output Only] An informational +// warning that appears when the accelerator types list is empty. +type AcceleratorTypesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AccessConfig: An access configuration attached to an instance's // network interface. Only one access config per instance is supported. type AccessConfig struct { @@ -741,12 +1062,12 @@ type AccessConfig struct { // NetworkTier: This signifies the networking tier used for configuring // this access configuration and can only take the following values: - // CLOUD_NETWORK_PREMIUM , CLOUD_NETWORK_SELECT. If this field is not - // specified, it is assumed to be CLOUD_NETWORK_PREMIUM. + // PREMIUM , SELECT. If this field is not specified, it is assumed to be + // PREMIUM. // // Possible values: - // "CLOUD_NETWORK_PREMIUM" - // "CLOUD_NETWORK_SELECT" + // "PREMIUM" + // "SELECT" NetworkTier string `json:"networkTier,omitempty"` // PublicDnsName: [Output Only] The public DNS domain name for the @@ -814,6 +1135,16 @@ type Address struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // IpVersion: The IP Version that will be used by this address. Valid + // options are IPV4 or IPV6. This can only be specified for a global + // address. + // + // Possible values: + // "IPV4" + // "IPV6" + // "UNSPECIFIED_VERSION" + IpVersion string `json:"ipVersion,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#address for // addresses. Kind string `json:"kind,omitempty"` @@ -844,13 +1175,12 @@ type Address struct { Name string `json:"name,omitempty"` // NetworkTier: This signifies the networking tier used for configuring - // this Address and can only take the following values: - // CLOUD_NETWORK_PREMIUM , CLOUD_NETWORK_SELECT. If this field is not - // specified, it is assumed to be CLOUD_NETWORK_PREMIUM. + // this Address and can only take the following values: PREMIUM , + // SELECT. If this field is not specified, it is assumed to be PREMIUM. // // Possible values: - // "CLOUD_NETWORK_PREMIUM" - // "CLOUD_NETWORK_SELECT" + // "PREMIUM" + // "SELECT" NetworkTier string `json:"networkTier,omitempty"` // Region: [Output Only] URL of the region where the regional address @@ -1054,6 +1384,7 @@ type AddressesScopedListWarning struct { // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" // "UNREACHABLE" @@ -1406,16 +1737,19 @@ func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Provides the configuration for non-admin_activity -// logging for a service. Controls exemptions and specific log -// sub-types. +// AuditConfig: Specifies the audit configuration for a service. It +// consists of which permission types are logged, and what identities, +// if any, are exempted from logging. An AuditConifg must have one or +// more AuditLogConfigs. type AuditConfig struct { - // AuditLogConfigs: The configuration for each type of logging + // AuditLogConfigs: The configuration for logging of each type of + // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` // ExemptedMembers: Specifies the identities that are exempted from // "data access" audit logging for the `service` specified above. - // Follows the same format of Binding.members. + // Follows the same format of Binding.members. This field is deprecated + // in favor of per-permission-type exemptions. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // Service: Specifies a service that will be enabled for audit logging. @@ -1447,10 +1781,19 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditLogConfig: Provides the configuration for a sub-type of logging. +// AuditLogConfig: Provides the configuration for logging a type of +// permissions. Example: +// +// { "audit_log_configs": [ { "log_type": "DATA_READ", +// "exempted_members": [ "user:foo@gmail.com" ] }, { "log_type": +// "DATA_WRITE", } ] } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting +// foo@gmail.com from DATA_READ logging. type AuditLogConfig struct { - // ExemptedMembers: Specifies the identities that are exempted from this - // type of logging Follows the same format of Binding.members. + // ExemptedMembers: Specifies the identities that do not cause logging + // for this type of permission. Follows the same format of + // [Binding.members][]. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -1781,6 +2124,7 @@ type AutoscalersScopedListWarning struct { // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" // "UNREACHABLE" @@ -2436,38 +2780,6 @@ func (s *BackendBucketList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BackendSSLPolicy: Message containing backend SSL policies. -type BackendSSLPolicy struct { - // PinnedPeerCertificates: List of PEM-encoded peer certificates, from - // which the public keys are extracted for authenticating the backend - // service. - PinnedPeerCertificates []string `json:"pinnedPeerCertificates,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "PinnedPeerCertificates") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PinnedPeerCertificates") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BackendSSLPolicy) MarshalJSON() ([]byte, error) { - type noMethod BackendSSLPolicy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // BackendService: A BackendService resource. This resource defines a // group of backend virtual machines and their serving capacity. type BackendService struct { @@ -2479,9 +2791,6 @@ type BackendService struct { // When the load balancing scheme is INTERNAL, this field is not used. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` - // BackendSslPolicy: Backend SSL policies to enforce. - BackendSslPolicy *BackendSSLPolicy `json:"backendSslPolicy,omitempty"` - // Backends: The list of backends that serve this BackendService. Backends []*Backend `json:"backends,omitempty"` @@ -2882,6 +3191,7 @@ type BackendServicesScopedListWarning struct { // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" // "UNREACHABLE" @@ -3351,6 +3661,7 @@ type CommitmentsScopedListWarning struct { // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" // "UNREACHABLE" @@ -3930,23 +4241,19 @@ func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { // DiskList: A list of Disk resources. type DiskList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of persistent disks. + // Items: A list of Disk resources. Items []*Disk `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always compute#diskList for // lists of disks. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. @@ -4241,6 +4548,7 @@ type DiskTypesScopedListWarning struct { // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" // "UNREACHABLE" @@ -4395,6 +4703,7 @@ type DisksScopedListWarning struct { // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" // "UNREACHABLE" @@ -4556,6 +4865,20 @@ type Firewall struct { // the firewall to apply. Only IPv4 is supported. SourceRanges []string `json:"sourceRanges,omitempty"` + // SourceServiceAccounts: If source service accounts are specified, the + // firewall will apply only to traffic originating from an instance with + // a service account in this list. Source service accounts cannot be + // used to control traffic to an instance's external IP address because + // service accounts are associated with an instance, not an IP address. + // sourceRanges can be set at the same time as sourceServiceAccounts. If + // both are set, the firewall will apply to traffic that has source IP + // address within sourceRanges OR the source IP belongs to an instance + // with service account listed in sourceServiceAccount. The connection + // does not need to match both properties for the firewall to apply. + // sourceServiceAccounts cannot be used at the same time as sourceTags + // or targetTags. + SourceServiceAccounts []string `json:"sourceServiceAccounts,omitempty"` + // SourceTags: If source tags are specified, the firewall will apply // only to traffic with source IP that belongs to a tag listed in source // tags. Source tags cannot be used to control traffic to an instance's @@ -4779,7 +5102,7 @@ type ForwardingRule struct { // IPAddress: The IP address that this forwarding rule is serving on // behalf of. // - // For global forwarding rules, the address must be a global IP; for + // For global forwarding rules, the address must be a global IP. For // regional forwarding rules, the address must live in the same region // as the forwarding rule. By default, this field is empty and an // ephemeral IP from the same scope (global or regional) will be @@ -4796,8 +5119,8 @@ type ForwardingRule struct { // IPProtocol: The IP protocol to which this rule applies. Valid options // are TCP, UDP, ESP, AH, SCTP or ICMP. // - // When the load balancing scheme is INTERNAL (name, metadata) mapping is constant for the lifetime of + // ShortId: The service-generated short identifier for this counter. + // The short_id -> (name, metadata) mapping is constant for the lifetime + // of // a job. ShortId int64 `json:"shortId,omitempty,string"` @@ -738,9 +909,10 @@ type CreateJobFromTemplateRequest struct { // Environment: The runtime environment for the job. Environment *RuntimeEnvironment `json:"environment,omitempty"` - // GcsPath: Required. A Cloud Storage path to the template from which to - // create the job. Must be a valid Cloud Storage URL, beginning with - // `gs://`. + // GcsPath: Required. A Cloud Storage path to the template from which + // to + // create the job. + // Must be a valid Cloud Storage URL, beginning with `gs://`. GcsPath string `json:"gcsPath,omitempty"` // JobName: Required. The job name to use for the created job. @@ -803,13 +975,16 @@ func (s *CustomSourceLocation) MarshalJSON() ([]byte, error) { // DataDiskAssignment: Data disk assignment for a given VM instance. type DataDiskAssignment struct { // DataDisks: Mounted data disks. The order is important a data disk's - // 0-based index in this list defines which persistent directory the - // disk is mounted to, for example the list of { - // "myproject-1014-104817-4c2-harness-0-disk-0" }, { - // "myproject-1014-104817-4c2-harness-0-disk-1" }. + // 0-based index in + // this list defines which persistent directory the disk is mounted to, + // for + // example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" + // }, + // { "myproject-1014-104817-4c2-harness-0-disk-1" }. DataDisks []string `json:"dataDisks,omitempty"` - // VmInstance: VM instance name the data disks mounted to, for example + // VmInstance: VM instance name the data disks mounted to, for + // example // "myproject-1014-104817-4c2-harness-0". VmInstance string `json:"vmInstance,omitempty"` @@ -837,18 +1012,23 @@ func (s *DataDiskAssignment) MarshalJSON() ([]byte, error) { } // DerivedSource: Specification of one of the bundles produced as a -// result of splitting a Source (e.g. when executing a -// SourceSplitRequest, or when splitting an active task using -// WorkItemStatus.dynamic_source_split), relative to the source being -// split. +// result of splitting +// a Source (e.g. when executing a SourceSplitRequest, or when +// splitting an active task using +// WorkItemStatus.dynamic_source_split), +// relative to the source being split. type DerivedSource struct { // DerivationMode: What source to base the produced source on (if any). // // Possible values: - // "SOURCE_DERIVATION_MODE_UNKNOWN" - // "SOURCE_DERIVATION_MODE_INDEPENDENT" - // "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" - // "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" + // "SOURCE_DERIVATION_MODE_UNKNOWN" - The source derivation is + // unknown, or unspecified. + // "SOURCE_DERIVATION_MODE_INDEPENDENT" - Produce a completely + // independent Source with no base. + // "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" - Produce a Source based + // on the Source being split. + // "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" - Produce a Source + // based on the base of the Source being split. DerivationMode string `json:"derivationMode,omitempty"` // Source: Specification of the source. @@ -881,26 +1061,38 @@ func (s *DerivedSource) MarshalJSON() ([]byte, error) { // Disk: Describes the data disk used by a workflow job. type Disk struct { // DiskType: Disk storage type, as defined by Google Compute Engine. - // This must be a disk type appropriate to the project and zone in which - // the workers will run. If unknown or unspecified, the service will - // attempt to choose a reasonable default. For example, the standard - // persistent disk type is a resource name typically ending in - // "pd-standard". If SSD persistent disks are available, the resource - // name typically ends with "pd-ssd". The actual valid values are - // defined the Google Compute Engine API, not by the Cloud Dataflow API; - // consult the Google Compute Engine documentation for more information - // about determining the set of available disk types for a particular - // project and zone. Google Compute Engine Disk types are local to a - // particular project in a particular zone, and so the resource name - // will typically look something like this: - // compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-sta - // ndard + // This + // must be a disk type appropriate to the project and zone in which + // the workers will run. If unknown or unspecified, the service + // will attempt to choose a reasonable default. + // + // For example, the standard persistent disk type is a resource + // name + // typically ending in "pd-standard". If SSD persistent disks + // are + // available, the resource name typically ends with "pd-ssd". + // The + // actual valid values are defined the Google Compute Engine API, + // not by the Cloud Dataflow API; consult the Google Compute + // Engine + // documentation for more information about determining the set + // of + // available disk types for a particular project and zone. + // + // Google Compute Engine Disk types are local to a particular + // project in a particular zone, and so the resource name will + // typically look something like + // this: + // + // compute.googleapis.com/projects/project-id/zones/zone/diskTypes + // /pd-standard DiskType string `json:"diskType,omitempty"` // MountPoint: Directory in a VM where disk is mounted. MountPoint string `json:"mountPoint,omitempty"` - // SizeGb: Size of disk in GB. If zero or unspecified, the service will + // SizeGb: Size of disk in GB. If zero or unspecified, the service + // will // attempt to choose a reasonable default. SizeGb int64 `json:"sizeGb,omitempty"` @@ -927,6 +1119,95 @@ func (s *Disk) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DisplayData: Data provided with a pipeline or transform to provide +// descriptive info. +type DisplayData struct { + // BoolValue: Contains value if the data is of a boolean type. + BoolValue bool `json:"boolValue,omitempty"` + + // DurationValue: Contains value if the data is of duration type. + DurationValue string `json:"durationValue,omitempty"` + + // FloatValue: Contains value if the data is of float type. + FloatValue float64 `json:"floatValue,omitempty"` + + // Int64Value: Contains value if the data is of int64 type. + Int64Value int64 `json:"int64Value,omitempty,string"` + + // JavaClassValue: Contains value if the data is of java class type. + JavaClassValue string `json:"javaClassValue,omitempty"` + + // Key: The key identifying the display data. + // This is intended to be used as a label for the display data + // when viewed in a dax monitoring system. + Key string `json:"key,omitempty"` + + // Label: An optional label to display in a dax UI for the element. + Label string `json:"label,omitempty"` + + // Namespace: The namespace for the key. This is usually a class name or + // programming + // language namespace (i.e. python module) which defines the display + // data. + // This allows a dax monitoring system to specially handle the data + // and perform custom rendering. + Namespace string `json:"namespace,omitempty"` + + // ShortStrValue: A possible additional shorter value to display. + // For example a java_class_name_value of com.mypackage.MyDoFn + // will be stored with MyDoFn as the short_str_value + // and + // com.mypackage.MyDoFn as the java_class_name value. + // short_str_value can be displayed and java_class_name_value + // will be displayed as a tooltip. + ShortStrValue string `json:"shortStrValue,omitempty"` + + // StrValue: Contains value if the data is of string type. + StrValue string `json:"strValue,omitempty"` + + // TimestampValue: Contains value if the data is of timestamp type. + TimestampValue string `json:"timestampValue,omitempty"` + + // Url: An optional full URL. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BoolValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BoolValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DisplayData) MarshalJSON() ([]byte, error) { + type noMethod DisplayData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *DisplayData) UnmarshalJSON(data []byte) error { + type noMethod DisplayData + var s1 struct { + FloatValue gensupport.JSONFloat64 `json:"floatValue"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FloatValue = float64(s1.FloatValue) + return nil +} + // DistributionUpdate: A metric value representing a distribution. type DistributionUpdate struct { // Count: The count of the number of elements present in the @@ -940,8 +1221,8 @@ type DistributionUpdate struct { Min *SplitInt64 `json:"min,omitempty"` // Sum: Use an int64 since we'd prefer the added precision. If overflow - // is a common problem we can detect it and use an additional int64 or a - // double. + // is a common + // problem we can detect it and use an additional int64 or a double. Sum *SplitInt64 `json:"sum,omitempty"` // SumOfSquares: Use a double since the sum of squares is likely to @@ -986,16 +1267,19 @@ func (s *DistributionUpdate) UnmarshalJSON(data []byte) error { } // DynamicSourceSplit: When a task splits using -// WorkItemStatus.dynamic_source_split, this message describes the two -// parts of the split relative to the description of the current task's -// input. +// WorkItemStatus.dynamic_source_split, this +// message describes the two parts of the split relative to +// the +// description of the current task's input. type DynamicSourceSplit struct { - // Primary: Primary part (continued to be processed by worker). - // Specified relative to the previously-current source. Becomes current. + // Primary: Primary part (continued to be processed by + // worker). + // Specified relative to the previously-current source. + // Becomes current. Primary *DerivedSource `json:"primary,omitempty"` - // Residual: Residual part (returned to the pool of work). Specified - // relative to the previously-current source. + // Residual: Residual part (returned to the pool of work). + // Specified relative to the previously-current source. Residual *DerivedSource `json:"residual,omitempty"` // ForceSendFields is a list of field names (e.g. "Primary") to @@ -1023,15 +1307,21 @@ func (s *DynamicSourceSplit) MarshalJSON() ([]byte, error) { // Environment: Describes the environment in which a Dataflow Job runs. type Environment struct { - // ClusterManagerApiService: The type of cluster manager API to use. If - // unknown or unspecified, the service will attempt to choose a - // reasonable default. This should be in the form of the API service - // name, e.g. "compute.googleapis.com". + // ClusterManagerApiService: The type of cluster manager API to use. If + // unknown or + // unspecified, the service will attempt to choose a reasonable + // default. This should be in the form of the API service name, + // e.g. "compute.googleapis.com". ClusterManagerApiService string `json:"clusterManagerApiService,omitempty"` - // Dataset: The dataset for the current project where various workflow - // related tables are stored. The supported resource type is: Google - // BigQuery: bigquery.googleapis.com/{dataset} + // Dataset: The dataset for the current project where various + // workflow + // related tables are stored. + // + // The supported resource type is: + // + // Google BigQuery: + // bigquery.googleapis.com/{dataset} Dataset string `json:"dataset,omitempty"` // Experiments: The list of experiments to enable. @@ -1041,9 +1331,12 @@ type Environment struct { InternalExperiments googleapi.RawMessage `json:"internalExperiments,omitempty"` // SdkPipelineOptions: The Cloud Dataflow SDK pipeline options specified - // by the user. These options are passed through the service and are - // used to recreate the SDK pipeline options on the worker in a language - // agnostic and platform independent way. + // by the user. These + // options are passed through the service and are used to recreate + // the + // SDK pipeline options on the worker in a language agnostic and + // platform + // independent way. SdkPipelineOptions googleapi.RawMessage `json:"sdkPipelineOptions,omitempty"` // ServiceAccountEmail: Identity to run virtual machines as. Defaults to @@ -1051,25 +1344,32 @@ type Environment struct { ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` // TempStoragePrefix: The prefix of the resources the system should use - // for temporary storage. The system will append the suffix - // "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the - // value of the job_name field. The resulting bucket and object prefix - // is used as the prefix of the resources used to store temporary data - // needed during the job execution. NOTE: This will override the value - // in taskrunner_settings. The supported resource type is: Google Cloud - // Storage: storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // for temporary + // storage. The system will append the suffix "/temp-{JOBNAME} to + // this resource prefix, where {JOBNAME} is the value of the + // job_name field. The resulting bucket and object prefix is used + // as the prefix of the resources used to store temporary data + // needed during the job execution. NOTE: This will override the + // value in taskrunner_settings. + // The supported resource type is: + // + // Google Cloud Storage: + // + // storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempStoragePrefix string `json:"tempStoragePrefix,omitempty"` // UserAgent: A description of the process that generated the request. UserAgent googleapi.RawMessage `json:"userAgent,omitempty"` // Version: A structure describing which components and their versions - // of the service are required in order to run the job. + // of the service + // are required in order to run the job. Version googleapi.RawMessage `json:"version,omitempty"` // WorkerPools: The worker pools. At least one "harness" worker pool - // must be specified in order for the job to have workers. + // must be + // specified in order for the job to have workers. WorkerPools []*WorkerPool `json:"workerPools,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1097,6 +1397,71 @@ func (s *Environment) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ExecutionStageSummary: Description of the composing transforms, +// names/ids, and input/outputs of a +// stage of execution. Some composing transforms and sources may have +// been +// generated by the Dataflow service during execution planning. +type ExecutionStageSummary struct { + // ComponentSource: Collections produced and consumed by component + // transforms of this stage. + ComponentSource []*ComponentSource `json:"componentSource,omitempty"` + + // ComponentTransform: Transforms that comprise this execution stage. + ComponentTransform []*ComponentTransform `json:"componentTransform,omitempty"` + + // Id: Dataflow service generated id for this stage. + Id string `json:"id,omitempty"` + + // InputSource: Input sources for this stage. + InputSource []*StageSource `json:"inputSource,omitempty"` + + // Kind: Type of tranform this stage is executing. + // + // Possible values: + // "UNKNOWN_KIND" - Unrecognized transform type. + // "PAR_DO_KIND" - ParDo transform. + // "GROUP_BY_KEY_KIND" - Group By Key transform. + // "FLATTEN_KIND" - Flatten transform. + // "READ_KIND" - Read transform. + // "WRITE_KIND" - Write transform. + // "CONSTANT_KIND" - Constructs from a constant value, such as with + // Create.of. + // "SINGLETON_KIND" - Creates a Singleton view of a collection. + // "SHUFFLE_KIND" - Opening or closing a shuffle session, often as + // part of a GroupByKey. + Kind string `json:"kind,omitempty"` + + // Name: Dataflow service generated name for this stage. + Name string `json:"name,omitempty"` + + // OutputSource: Output sources for this stage. + OutputSource []*StageSource `json:"outputSource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ComponentSource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ComponentSource") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ExecutionStageSummary) MarshalJSON() ([]byte, error) { + type noMethod ExecutionStageSummary + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // FailedLocation: Indicates which location failed to respond to a // request for data. type FailedLocation struct { @@ -1234,7 +1599,8 @@ func (s *FloatingPointMean) UnmarshalJSON(data []byte) error { // component. type GetDebugConfigRequest struct { // ComponentId: The internal component id for which debug configuration - // is requested. + // is + // requested. ComponentId string `json:"componentId,omitempty"` // WorkerId: The worker id, i.e., VM hostname. @@ -1297,15 +1663,17 @@ func (s *GetDebugConfigResponse) MarshalJSON() ([]byte, error) { } // InstructionInput: An input of an instruction, as a reference to an -// output of a producer instruction. +// output of a +// producer instruction. type InstructionInput struct { // OutputNum: The output index (origin zero) within the producer. OutputNum int64 `json:"outputNum,omitempty"` // ProducerInstructionIndex: The index (origin zero) of the parallel - // instruction that produces the output to be consumed by this input. - // This index is relative to the list of instructions in this input's - // instruction's containing MapTask. + // instruction that produces + // the output to be consumed by this input. This index is relative + // to the list of instructions in this input's instruction's + // containing MapTask. ProducerInstructionIndex int64 `json:"producerInstructionIndex,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputNum") to @@ -1340,20 +1708,23 @@ type InstructionOutput struct { Name string `json:"name,omitempty"` // OnlyCountKeyBytes: For system-generated byte and mean byte metrics, - // certain instructions should only report the key size. + // certain instructions + // should only report the key size. OnlyCountKeyBytes bool `json:"onlyCountKeyBytes,omitempty"` // OnlyCountValueBytes: For system-generated byte and mean byte metrics, - // certain instructions should only report the value size. + // certain instructions + // should only report the value size. OnlyCountValueBytes bool `json:"onlyCountValueBytes,omitempty"` // OriginalName: System-defined name for this output in the original - // workflow graph. Outputs that do not contribute to an original - // instruction do not set this. + // workflow graph. + // Outputs that do not contribute to an original instruction do not set + // this. OriginalName string `json:"originalName,omitempty"` - // SystemName: System-defined name of this output. Unique across the - // workflow. + // SystemName: System-defined name of this output. + // Unique across the workflow. SystemName string `json:"systemName,omitempty"` // ForceSendFields is a list of field names (e.g. "Codec") to @@ -1441,36 +1812,92 @@ func (s *IntegerMean) MarshalJSON() ([]byte, error) { // Job: Defines a job to be run by the Cloud Dataflow service. type Job struct { // ClientRequestId: The client's unique identifier of the job, re-used - // across retried attempts. If this field is set, the service will - // ensure its uniqueness. The request to create a job will fail if the - // service has knowledge of a previously submitted job with the same - // client's ID and job name. The caller may use this field to ensure - // idempotence of job creation across retried attempts to create a job. + // across retried attempts. + // If this field is set, the service will ensure its uniqueness. + // The request to create a job will fail if the service has knowledge of + // a + // previously submitted job with the same client's ID and job name. + // The caller may use this field to ensure idempotence of job + // creation across retried attempts to create a job. // By default, the field is empty and, in that case, the service ignores // it. ClientRequestId string `json:"clientRequestId,omitempty"` // CreateTime: The timestamp when the job was initially created. - // Immutable and set by the Cloud Dataflow service. + // Immutable and set by the + // Cloud Dataflow service. CreateTime string `json:"createTime,omitempty"` - // CurrentState: The current state of the job. Jobs are created in the - // `JOB_STATE_STOPPED` state unless otherwise specified. A job in the - // `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. - // After a job has reached a terminal state, no further state updates - // may be made. This field may be mutated by the Cloud Dataflow service; + // CurrentState: The current state of the job. + // + // Jobs are created in the `JOB_STATE_STOPPED` state unless + // otherwise + // specified. + // + // A job in the `JOB_STATE_RUNNING` state may asynchronously enter + // a + // terminal state. After a job has reached a terminal state, no + // further state updates may be made. + // + // This field may be mutated by the Cloud Dataflow service; // callers cannot mutate it. // // Possible values: - // "JOB_STATE_UNKNOWN" - // "JOB_STATE_STOPPED" - // "JOB_STATE_RUNNING" - // "JOB_STATE_DONE" - // "JOB_STATE_FAILED" - // "JOB_STATE_CANCELLED" - // "JOB_STATE_UPDATED" - // "JOB_STATE_DRAINING" - // "JOB_STATE_DRAINED" + // "JOB_STATE_UNKNOWN" - The job's run state isn't specified. + // "JOB_STATE_STOPPED" - `JOB_STATE_STOPPED` indicates that the job + // has not + // yet started to run. + // "JOB_STATE_RUNNING" - `JOB_STATE_RUNNING` indicates that the job is + // currently running. + // "JOB_STATE_DONE" - `JOB_STATE_DONE` indicates that the job has + // successfully completed. + // This is a terminal job state. This state may be set by the Cloud + // Dataflow + // service, as a transition from `JOB_STATE_RUNNING`. It may also be set + // via a + // Cloud Dataflow `UpdateJob` call, if the job has not yet reached a + // terminal + // state. + // "JOB_STATE_FAILED" - `JOB_STATE_FAILED` indicates that the job has + // failed. This is a + // terminal job state. This state may only be set by the Cloud + // Dataflow + // service, and only as a transition from `JOB_STATE_RUNNING`. + // "JOB_STATE_CANCELLED" - `JOB_STATE_CANCELLED` indicates that the + // job has been explicitly + // cancelled. This is a terminal job state. This state may only be + // set via a Cloud Dataflow `UpdateJob` call, and only if the job has + // not + // yet reached another terminal state. + // "JOB_STATE_UPDATED" - `JOB_STATE_UPDATED` indicates that the job + // was successfully updated, + // meaning that this job was stopped and another job was started, + // inheriting + // state from this one. This is a terminal job state. This state may + // only be + // set by the Cloud Dataflow service, and only as a transition + // from + // `JOB_STATE_RUNNING`. + // "JOB_STATE_DRAINING" - `JOB_STATE_DRAINING` indicates that the job + // is in the process of draining. + // A draining job has stopped pulling from its input sources and is + // processing + // any data that remains in-flight. This state may be set via a Cloud + // Dataflow + // `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. + // Jobs + // that are draining may only transition to + // `JOB_STATE_DRAINED`, + // `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. + // "JOB_STATE_DRAINED" - `JOB_STATE_DRAINED` indicates that the job + // has been drained. + // A drained job terminated by stopping pulling from its input sources + // and + // processing any data that remained in-flight when draining was + // requested. + // This state is a terminal state, may only be set by the Cloud + // Dataflow + // service, and only as a transition from `JOB_STATE_DRAINING`. CurrentState string `json:"currentState,omitempty"` // CurrentStateTime: The timestamp associated with the current state. @@ -1479,89 +1906,177 @@ type Job struct { // Environment: The environment for the job. Environment *Environment `json:"environment,omitempty"` - // ExecutionInfo: Information about how the Cloud Dataflow service will - // run the job. + // ExecutionInfo: Deprecated. ExecutionInfo *JobExecutionInfo `json:"executionInfo,omitempty"` - // Id: The unique ID of this job. This field is set by the Cloud - // Dataflow service when the Job is created, and is immutable for the - // life of the job. + // Id: The unique ID of this job. + // + // This field is set by the Cloud Dataflow service when the Job + // is + // created, and is immutable for the life of the job. Id string `json:"id,omitempty"` - // Labels: User-defined labels for this job. The labels map can contain - // no more than 64 entries. Entries of the labels map are UTF8 strings - // that comply with the following restrictions: * Keys must conform to - // regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: - // [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally - // constrained to be <= 128 bytes in size. + // Labels: User-defined labels for this job. + // + // The labels map can contain no more than 64 entries. Entries of the + // labels + // map are UTF8 strings that comply with the following restrictions: + // + // * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} + // * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + // * Both keys and values are additionally constrained to be <= 128 + // bytes in + // size. Labels map[string]string `json:"labels,omitempty"` // Location: The location that contains this job. Location string `json:"location,omitempty"` - // Name: The user-specified Cloud Dataflow job name. Only one Job with a - // given name may exist in a project at any given time. If a caller - // attempts to create a Job with the same name as an already-existing - // Job, the attempt returns the existing Job. The name must match the - // regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?` + // Name: The user-specified Cloud Dataflow job name. + // + // Only one Job with a given name may exist in a project at any + // given time. If a caller attempts to create a Job with the same + // name as an already-existing Job, the attempt returns the + // existing Job. + // + // The name must match the regular + // expression + // `[a-z]([-a-z0-9]{0,38}[a-z0-9])?` Name string `json:"name,omitempty"` + // PipelineDescription: Preliminary field: The format of this data may + // change at any time. + // A description of the user pipeline and stages through which it is + // executed. + // Created by Cloud Dataflow service. Only retrieved + // with + // JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. + PipelineDescription *PipelineDescription `json:"pipelineDescription,omitempty"` + // ProjectId: The ID of the Cloud Platform project that the job belongs // to. ProjectId string `json:"projectId,omitempty"` // ReplaceJobId: If this job is an update of an existing job, this field - // is the job ID of the job it replaced. When sending a - // `CreateJobRequest`, you can update a job by specifying it here. The - // job named here is stopped, and its intermediate state is transferred - // to this job. + // is the job ID + // of the job it replaced. + // + // When sending a `CreateJobRequest`, you can update a job by specifying + // it + // here. The job named here is stopped, and its intermediate state + // is + // transferred to this job. ReplaceJobId string `json:"replaceJobId,omitempty"` // ReplacedByJobId: If another job is an update of this job (and thus, - // this job is in `JOB_STATE_UPDATED`), this field contains the ID of - // that job. + // this job is in + // `JOB_STATE_UPDATED`), this field contains the ID of that job. ReplacedByJobId string `json:"replacedByJobId,omitempty"` - // RequestedState: The job's requested state. `UpdateJob` may be used to - // switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` - // states, by setting requested_state. `UpdateJob` may also be used to - // directly set a job's requested state to `JOB_STATE_CANCELLED` or - // `JOB_STATE_DONE`, irrevocably terminating the job if it has not - // already reached a terminal state. + // RequestedState: The job's requested state. + // + // `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` + // and + // `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` + // may + // also be used to directly set a job's requested state + // to + // `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating + // the + // job if it has not already reached a terminal state. // // Possible values: - // "JOB_STATE_UNKNOWN" - // "JOB_STATE_STOPPED" - // "JOB_STATE_RUNNING" - // "JOB_STATE_DONE" - // "JOB_STATE_FAILED" - // "JOB_STATE_CANCELLED" - // "JOB_STATE_UPDATED" - // "JOB_STATE_DRAINING" - // "JOB_STATE_DRAINED" + // "JOB_STATE_UNKNOWN" - The job's run state isn't specified. + // "JOB_STATE_STOPPED" - `JOB_STATE_STOPPED` indicates that the job + // has not + // yet started to run. + // "JOB_STATE_RUNNING" - `JOB_STATE_RUNNING` indicates that the job is + // currently running. + // "JOB_STATE_DONE" - `JOB_STATE_DONE` indicates that the job has + // successfully completed. + // This is a terminal job state. This state may be set by the Cloud + // Dataflow + // service, as a transition from `JOB_STATE_RUNNING`. It may also be set + // via a + // Cloud Dataflow `UpdateJob` call, if the job has not yet reached a + // terminal + // state. + // "JOB_STATE_FAILED" - `JOB_STATE_FAILED` indicates that the job has + // failed. This is a + // terminal job state. This state may only be set by the Cloud + // Dataflow + // service, and only as a transition from `JOB_STATE_RUNNING`. + // "JOB_STATE_CANCELLED" - `JOB_STATE_CANCELLED` indicates that the + // job has been explicitly + // cancelled. This is a terminal job state. This state may only be + // set via a Cloud Dataflow `UpdateJob` call, and only if the job has + // not + // yet reached another terminal state. + // "JOB_STATE_UPDATED" - `JOB_STATE_UPDATED` indicates that the job + // was successfully updated, + // meaning that this job was stopped and another job was started, + // inheriting + // state from this one. This is a terminal job state. This state may + // only be + // set by the Cloud Dataflow service, and only as a transition + // from + // `JOB_STATE_RUNNING`. + // "JOB_STATE_DRAINING" - `JOB_STATE_DRAINING` indicates that the job + // is in the process of draining. + // A draining job has stopped pulling from its input sources and is + // processing + // any data that remains in-flight. This state may be set via a Cloud + // Dataflow + // `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. + // Jobs + // that are draining may only transition to + // `JOB_STATE_DRAINED`, + // `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. + // "JOB_STATE_DRAINED" - `JOB_STATE_DRAINED` indicates that the job + // has been drained. + // A drained job terminated by stopping pulling from its input sources + // and + // processing any data that remained in-flight when draining was + // requested. + // This state is a terminal state, may only be set by the Cloud + // Dataflow + // service, and only as a transition from `JOB_STATE_DRAINING`. RequestedState string `json:"requestedState,omitempty"` // Steps: The top-level steps that constitute the entire job. Steps []*Step `json:"steps,omitempty"` - // TempFiles: A set of files the system should be aware of that are used - // for temporary storage. These temporary files will be removed on job - // completion. No duplicates are allowed. No file patterns are - // supported. The supported files are: Google Cloud Storage: - // storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // TempFiles: A set of files the system should be aware of that are + // used + // for temporary storage. These temporary files will be + // removed on job completion. + // No duplicates are allowed. + // No file patterns are supported. + // + // The supported files are: + // + // Google Cloud Storage: + // + // storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempFiles []string `json:"tempFiles,omitempty"` // TransformNameMapping: The map of transform name prefixes of the job - // to be replaced to the corresponding name prefixes of the new job. + // to be replaced to the + // corresponding name prefixes of the new job. TransformNameMapping map[string]string `json:"transformNameMapping,omitempty"` // Type: The type of Cloud Dataflow job. // // Possible values: - // "JOB_TYPE_UNKNOWN" - // "JOB_TYPE_BATCH" - // "JOB_TYPE_STREAMING" + // "JOB_TYPE_UNKNOWN" - The type of the job is unspecified, or + // unknown. + // "JOB_TYPE_BATCH" - A batch job with a well-defined end point: data + // is read, data is + // processed, data is written, and the job is done. + // "JOB_TYPE_STREAMING" - A continuously streaming job with no end: + // data is read, + // processed, and written continuously. Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1593,7 +2108,8 @@ func (s *Job) MarshalJSON() ([]byte, error) { } // JobExecutionInfo: Additional information about how a Cloud Dataflow -// job will be executed that isn't contained in the submitted job. +// job will be executed that +// isn't contained in the submitted job. type JobExecutionInfo struct { // Stages: A mapping from each stage to the information about that // stage. @@ -1622,12 +2138,13 @@ func (s *JobExecutionInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// JobExecutionStageInfo: Contains information about how a particular +// JobExecutionStageInfo: Contains information about how a +// particular // google.dataflow.v1beta3.Step will be executed. type JobExecutionStageInfo struct { - // StepName: The steps associated with the execution stage. Note that - // stages may have several steps, and that a given step might be run by - // more than one stage. + // StepName: The steps associated with the execution stage. + // Note that stages may have several steps, and that a given step + // might be run by more than one stage. StepName []string `json:"stepName,omitempty"` // ForceSendFields is a list of field names (e.g. "StepName") to @@ -1655,19 +2172,48 @@ func (s *JobExecutionStageInfo) MarshalJSON() ([]byte, error) { // JobMessage: A particular message pertaining to a Dataflow job. type JobMessage struct { - // Id: Identifies the message. This is automatically generated by the + // Id: Identifies the message. This is automatically generated by + // the // service; the caller should treat it as an opaque string. Id string `json:"id,omitempty"` // MessageImportance: Importance level of the message. // // Possible values: - // "JOB_MESSAGE_IMPORTANCE_UNKNOWN" - // "JOB_MESSAGE_DEBUG" - // "JOB_MESSAGE_DETAILED" - // "JOB_MESSAGE_BASIC" - // "JOB_MESSAGE_WARNING" - // "JOB_MESSAGE_ERROR" + // "JOB_MESSAGE_IMPORTANCE_UNKNOWN" - The message importance isn't + // specified, or is unknown. + // "JOB_MESSAGE_DEBUG" - The message is at the 'debug' level: + // typically only useful for + // software engineers working on the code the job is running. + // Typically, Dataflow pipeline runners do not display log messages + // at this level by default. + // "JOB_MESSAGE_DETAILED" - The message is at the 'detailed' level: + // somewhat verbose, but + // potentially useful to users. Typically, Dataflow pipeline + // runners do not display log messages at this level by default. + // These messages are displayed by default in the Dataflow + // monitoring UI. + // "JOB_MESSAGE_BASIC" - The message is at the 'basic' level: useful + // for keeping + // track of the execution of a Dataflow pipeline. Typically, + // Dataflow pipeline runners display log messages at this level + // by + // default, and these messages are displayed by default in the + // Dataflow monitoring UI. + // "JOB_MESSAGE_WARNING" - The message is at the 'warning' level: + // indicating a condition + // pertaining to a job which may require human intervention. + // Typically, Dataflow pipeline runners display log messages at + // this + // level by default, and these messages are displayed by default in + // the Dataflow monitoring UI. + // "JOB_MESSAGE_ERROR" - The message is at the 'error' level: + // indicating a condition + // preventing a job from succeeding. Typically, Dataflow + // pipeline + // runners display log messages at this level by default, and + // these + // messages are displayed by default in the Dataflow monitoring UI. MessageImportance string `json:"messageImportance,omitempty"` // MessageText: The text of the message. @@ -1700,11 +2246,16 @@ func (s *JobMessage) MarshalJSON() ([]byte, error) { } // JobMetrics: JobMetrics contains a collection of metrics descibing the -// detailed progress of a Dataflow job. Metrics correspond to -// user-defined and system-defined metrics in the job. This resource -// captures only the most recent values of each metric; time-series data -// can be queried for them (under the same metric names) from Cloud -// Monitoring. +// detailed progress +// of a Dataflow job. Metrics correspond to user-defined and +// system-defined +// metrics in the job. +// +// This resource captures only the most recent values of each +// metric; +// time-series data can be queried for them (under the same metric +// names) +// from Cloud Monitoring. type JobMetrics struct { // MetricTime: Timestamp as of which metric values are current. MetricTime string `json:"metricTime,omitempty"` @@ -1740,12 +2291,18 @@ func (s *JobMetrics) MarshalJSON() ([]byte, error) { } // KeyRangeDataDiskAssignment: Data disk assignment information for a -// specific key-range of a sharded computation. Currently we only -// support UTF-8 character splits to simplify encoding into JSON. +// specific key-range of a sharded +// computation. +// Currently we only support UTF-8 character splits to simplify encoding +// into +// JSON. type KeyRangeDataDiskAssignment struct { // DataDisk: The name of the data disk where data for this range is - // stored. This name is local to the Google Cloud Platform project and - // uniquely identifies the disk within that project, for example + // stored. + // This name is local to the Google Cloud Platform project and + // uniquely + // identifies the disk within that project, for + // example // "myproject-1014-104817-4c2-harness-0-disk-1". DataDisk string `json:"dataDisk,omitempty"` @@ -1779,24 +2336,31 @@ func (s *KeyRangeDataDiskAssignment) MarshalJSON() ([]byte, error) { } // KeyRangeLocation: Location information for a specific key-range of a -// sharded computation. Currently we only support UTF-8 character splits -// to simplify encoding into JSON. +// sharded computation. +// Currently we only support UTF-8 character splits to simplify encoding +// into +// JSON. type KeyRangeLocation struct { // DataDisk: The name of the data disk where data for this range is - // stored. This name is local to the Google Cloud Platform project and - // uniquely identifies the disk within that project, for example + // stored. + // This name is local to the Google Cloud Platform project and + // uniquely + // identifies the disk within that project, for + // example // "myproject-1014-104817-4c2-harness-0-disk-1". DataDisk string `json:"dataDisk,omitempty"` // DeliveryEndpoint: The physical location of this range assignment to - // be used for streaming computation cross-worker message delivery. + // be used for + // streaming computation cross-worker message delivery. DeliveryEndpoint string `json:"deliveryEndpoint,omitempty"` // End: The end (exclusive) of the key range. End string `json:"end,omitempty"` // PersistentDirectory: The location of the persistent state for this - // range, as a persistent directory in the worker local filesystem. + // range, as a + // persistent directory in the worker local filesystem. PersistentDirectory string `json:"persistentDirectory,omitempty"` // Start: The start (inclusive) of the key range. @@ -1840,11 +2404,13 @@ type LeaseWorkItemRequest struct { WorkItemTypes []string `json:"workItemTypes,omitempty"` // WorkerCapabilities: Worker capabilities. WorkItems might be limited - // to workers with specific capabilities. + // to workers with specific + // capabilities. WorkerCapabilities []string `json:"workerCapabilities,omitempty"` // WorkerId: Identifies the worker leasing work -- typically the ID of - // the virtual machine running the worker. + // the + // virtual machine running the worker. WorkerId string `json:"workerId,omitempty"` // ForceSendFields is a list of field names (e.g. "CurrentWorkerTime") @@ -1940,8 +2506,8 @@ func (s *ListJobMessagesResponse) MarshalJSON() ([]byte, error) { } // ListJobsResponse: Response to a request to list Cloud Dataflow jobs. -// This may be a partial response, depending on the page size in the -// ListJobsRequest. +// This may be a partial +// response, depending on the page size in the ListJobsRequest. type ListJobsResponse struct { // FailedLocation: Zero or more messages describing locations that // failed to respond. @@ -1983,20 +2549,25 @@ func (s *ListJobsResponse) MarshalJSON() ([]byte, error) { } // MapTask: MapTask consists of an ordered set of instructions, each of -// which describes one particular low-level operation for the worker to -// perform in order to accomplish the MapTask's WorkItem. Each -// instruction must appear in the list before any instructions which +// which +// describes one particular low-level operation for the worker +// to +// perform in order to accomplish the MapTask's WorkItem. +// +// Each instruction must appear in the list before any instructions +// which // depends on its output. type MapTask struct { // Instructions: The instructions in the MapTask. Instructions []*ParallelInstruction `json:"instructions,omitempty"` - // StageName: System-defined name of the stage containing this MapTask. + // StageName: System-defined name of the stage containing this + // MapTask. // Unique across the workflow. StageName string `json:"stageName,omitempty"` - // SystemName: System-defined name of this MapTask. Unique across the - // workflow. + // SystemName: System-defined name of this MapTask. + // Unique across the workflow. SystemName string `json:"systemName,omitempty"` // ForceSendFields is a list of field names (e.g. "Instructions") to @@ -2023,10 +2594,11 @@ func (s *MapTask) MarshalJSON() ([]byte, error) { } // MetricShortId: The metric short id is returned to the user alongside -// an offset into ReportWorkItemStatusRequest +// an offset into +// ReportWorkItemStatusRequest type MetricShortId struct { - // MetricIndex: The index of the corresponding metric in the - // ReportWorkItemStatusRequest. Required. + // MetricIndex: The index of the corresponding metric in + // the ReportWorkItemStatusRequest. Required. MetricIndex int64 `json:"metricIndex,omitempty"` // ShortId: The service-generated short identifier for the metric. @@ -2056,22 +2628,28 @@ func (s *MetricShortId) MarshalJSON() ([]byte, error) { } // MetricStructuredName: Identifies a metric, by describing the source -// which generated the metric. +// which generated the +// metric. type MetricStructuredName struct { // Context: Zero or more labeled fields which identify the part of the - // job this metric is associated with, such as the name of a step or - // collection. For example, built-in counters associated with steps will - // have context['step'] = . Counters associated with PCollections in the - // SDK will have context['pcollection'] = - // . + // job this + // metric is associated with, such as the name of a step or + // collection. + // + // For example, built-in counters associated with steps will + // have + // context['step'] = . Counters associated with + // PCollections + // in the SDK will have context['pcollection'] = . Context map[string]string `json:"context,omitempty"` // Name: Worker-defined metric name. Name string `json:"name,omitempty"` // Origin: Origin (namespace) of metric name. May be blank for - // user-define metrics; will be "dataflow" for metrics defined by the - // Dataflow service or SDK. + // user-define metrics; + // will be "dataflow" for metrics defined by the Dataflow service or + // SDK. Origin string `json:"origin,omitempty"` // ForceSendFields is a list of field names (e.g. "Context") to @@ -2100,50 +2678,66 @@ func (s *MetricStructuredName) MarshalJSON() ([]byte, error) { // MetricUpdate: Describes the state of a metric. type MetricUpdate struct { // Cumulative: True if this metric is reported as the total cumulative - // aggregate value accumulated since the worker started working on this - // WorkItem. By default this is false, indicating that this metric is - // reported as a delta that is not associated with any WorkItem. + // aggregate + // value accumulated since the worker started working on this + // WorkItem. + // By default this is false, indicating that this metric is reported + // as a delta that is not associated with any WorkItem. Cumulative bool `json:"cumulative,omitempty"` // Internal: Worker-computed aggregate value for internal use by the - // Dataflow service. + // Dataflow + // service. Internal interface{} `json:"internal,omitempty"` - // Kind: Metric aggregation kind. The possible metric aggregation kinds - // are "Sum", "Max", "Min", "Mean", "Set", "And", and "Or". The - // specified aggregation kind is case-insensitive. If omitted, this is - // not an aggregated value but instead a single metric sample value. + // Kind: Metric aggregation kind. The possible metric aggregation kinds + // are + // "Sum", "Max", "Min", "Mean", "Set", "And", and "Or". + // The specified aggregation kind is case-insensitive. + // + // If omitted, this is not an aggregated value but instead + // a single metric sample value. Kind string `json:"kind,omitempty"` // MeanCount: Worker-computed aggregate value for the "Mean" aggregation - // kind. This holds the count of the aggregated values and is used in - // combination with mean_sum above to obtain the actual mean aggregate - // value. The only possible value type is Long. + // kind. + // This holds the count of the aggregated values and is used in + // combination + // with mean_sum above to obtain the actual mean aggregate value. + // The only possible value type is Long. MeanCount interface{} `json:"meanCount,omitempty"` // MeanSum: Worker-computed aggregate value for the "Mean" aggregation - // kind. This holds the sum of the aggregated values and is used in - // combination with mean_count below to obtain the actual mean aggregate - // value. The only possible value types are Long and Double. + // kind. + // This holds the sum of the aggregated values and is used in + // combination + // with mean_count below to obtain the actual mean aggregate value. + // The only possible value types are Long and Double. MeanSum interface{} `json:"meanSum,omitempty"` // Name: Name of the metric. Name *MetricStructuredName `json:"name,omitempty"` // Scalar: Worker-computed aggregate value for aggregation kinds "Sum", - // "Max", "Min", "And", and "Or". The possible value types are Long, - // Double, and Boolean. + // "Max", "Min", + // "And", and "Or". The possible value types are Long, Double, and + // Boolean. Scalar interface{} `json:"scalar,omitempty"` // Set: Worker-computed aggregate value for the "Set" aggregation kind. - // The only possible value type is a list of Values whose type can be - // Long, Double, or String, according to the metric's type. All Values - // in the list must be of the same type. + // The only + // possible value type is a list of Values whose type can be Long, + // Double, + // or String, according to the metric's type. All Values in the list + // must + // be of the same type. Set interface{} `json:"set,omitempty"` // UpdateTime: Timestamp associated with the metric value. Optional when - // workers are reporting work progress; it will be filled in responses - // from the metrics API. + // workers are + // reporting work progress; it will be filled in responses from + // the + // metrics API. UpdateTime string `json:"updateTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Cumulative") to @@ -2171,9 +2765,12 @@ func (s *MetricUpdate) MarshalJSON() ([]byte, error) { // MountedDataDisk: Describes mounted data disk. type MountedDataDisk struct { - // DataDisk: The name of the data disk. This name is local to the Google - // Cloud Platform project and uniquely identifies the disk within that - // project, for example "myproject-1014-104817-4c2-harness-0-disk-1". + // DataDisk: The name of the data disk. + // This name is local to the Google Cloud Platform project and + // uniquely + // identifies the disk within that project, for + // example + // "myproject-1014-104817-4c2-harness-0-disk-1". DataDisk string `json:"dataDisk,omitempty"` // ForceSendFields is a list of field names (e.g. "DataDisk") to @@ -2202,7 +2799,8 @@ func (s *MountedDataDisk) MarshalJSON() ([]byte, error) { // MultiOutputInfo: Information about an output of a multi-output DoFn. type MultiOutputInfo struct { // Tag: The id of the tag the user code will emit to this output by; - // this should correspond to the tag of some SideInputInfo. + // this + // should correspond to the tag of some SideInputInfo. Tag string `json:"tag,omitempty"` // ForceSendFields is a list of field names (e.g. "Tag") to @@ -2233,15 +2831,18 @@ type NameAndKind struct { // Kind: Counter aggregation kind. // // Possible values: - // "INVALID" - // "SUM" - // "MAX" - // "MIN" - // "MEAN" - // "OR" - // "AND" - // "SET" - // "DISTRIBUTION" + // "INVALID" - Counter aggregation kind was not set. + // "SUM" - Aggregated value is the sum of all contributed values. + // "MAX" - Aggregated value is the max of all contributed values. + // "MIN" - Aggregated value is the min of all contributed values. + // "MEAN" - Aggregated value is the mean of all contributed values. + // "OR" - Aggregated value represents the logical 'or' of all + // contributed values. + // "AND" - Aggregated value represents the logical 'and' of all + // contributed values. + // "SET" - Aggregated value is a set of unique contributed values. + // "DISTRIBUTION" - Aggregated value captures statistics about a + // distribution. Kind string `json:"kind,omitempty"` // Name: Name of the counter. @@ -2271,16 +2872,28 @@ func (s *NameAndKind) MarshalJSON() ([]byte, error) { } // Package: The packages that must be installed in order for a worker to -// run the steps of the Cloud Dataflow job that will be assigned to its -// worker pool. This is the mechanism by which the Cloud Dataflow SDK -// causes code to be loaded onto the workers. For example, the Cloud -// Dataflow Java SDK might use this to install jars containing the -// user's code and all of the various dependencies (libraries, data -// files, etc.) required in order for that code to run. +// run the +// steps of the Cloud Dataflow job that will be assigned to its +// worker +// pool. +// +// This is the mechanism by which the Cloud Dataflow SDK causes code +// to +// be loaded onto the workers. For example, the Cloud Dataflow Java +// SDK +// might use this to install jars containing the user's code and all of +// the +// various dependencies (libraries, data files, etc.) required in +// order +// for that code to run. type Package struct { // Location: The resource to read the package from. The supported - // resource type is: Google Cloud Storage: - // storage.googleapis.com/{bucket} bucket.storage.googleapis.com/ + // resource type is: + // + // Google Cloud Storage: + // + // storage.googleapis.com/{bucket} + // bucket.storage.googleapis.com/ Location string `json:"location,omitempty"` // Name: The name of the package. @@ -2309,15 +2922,16 @@ func (s *Package) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ParDoInstruction: An instruction that does a ParDo operation. Takes -// one main input and zero or more side inputs, and produces zero or -// more outputs. Runs user code. +// ParDoInstruction: An instruction that does a ParDo operation. +// Takes one main input and zero or more side inputs, and produces +// zero or more outputs. +// Runs user code. type ParDoInstruction struct { // Input: The input. Input *InstructionInput `json:"input,omitempty"` // MultiOutputInfos: Information about each of the outputs, if user_fn - // is a MultiDoFn. + // is a MultiDoFn. MultiOutputInfos []*MultiOutputInfo `json:"multiOutputInfos,omitempty"` // NumOutputs: The number of outputs. @@ -2378,8 +2992,8 @@ type ParallelInstruction struct { // Read: Additional information for Read instructions. Read *ReadInstruction `json:"read,omitempty"` - // SystemName: System-defined name of this operation. Unique across the - // workflow. + // SystemName: System-defined name of this operation. + // Unique across the workflow. SystemName string `json:"systemName,omitempty"` // Write: Additional information for Write instructions. @@ -2408,8 +3022,50 @@ func (s *ParallelInstruction) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ParameterMetadata: Metadata for a specific parameter. +type ParameterMetadata struct { + // HelpText: Required. The help text to display for the parameter. + HelpText string `json:"helpText,omitempty"` + + // IsOptional: Optional. Whether the parameter is optional. Defaults to + // false. + IsOptional bool `json:"isOptional,omitempty"` + + // Label: Required. The label to display for the parameter. + Label string `json:"label,omitempty"` + + // Name: Required. The name of the parameter. + Name string `json:"name,omitempty"` + + // Regexes: Optional. Regexes that the parameter must match. + Regexes []string `json:"regexes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HelpText") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HelpText") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ParameterMetadata) MarshalJSON() ([]byte, error) { + type noMethod ParameterMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // PartialGroupByKeyInstruction: An instruction that does a partial -// group-by-key. One input and one output. +// group-by-key. +// One input and one output. type PartialGroupByKeyInstruction struct { // Input: Describes the input to the partial group-by-key instruction. Input *InstructionInput `json:"input,omitempty"` @@ -2419,13 +3075,13 @@ type PartialGroupByKeyInstruction struct { InputElementCodec googleapi.RawMessage `json:"inputElementCodec,omitempty"` // OriginalCombineValuesInputStoreName: If this instruction includes a - // combining function this is the name of the intermediate store between - // the GBK and the CombineValues. + // combining function this is the name of the + // intermediate store between the GBK and the CombineValues. OriginalCombineValuesInputStoreName string `json:"originalCombineValuesInputStoreName,omitempty"` // OriginalCombineValuesStepName: If this instruction includes a - // combining function, this is the name of the CombineValues instruction - // lifted into this instruction. + // combining function, this is the name of the + // CombineValues instruction lifted into this instruction. OriginalCombineValuesStepName string `json:"originalCombineValuesStepName,omitempty"` // SideInputs: Zero or more side inputs. @@ -2457,8 +3113,50 @@ func (s *PartialGroupByKeyInstruction) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PipelineDescription: A descriptive representation of submitted +// pipeline as well as the executed +// form. This data is provided by the Dataflow service for ease of +// visualizing +// the pipeline and interpretting Dataflow provided metrics. +type PipelineDescription struct { + // DisplayData: Pipeline level display data. + DisplayData []*DisplayData `json:"displayData,omitempty"` + + // ExecutionPipelineStage: Description of each stage of execution of the + // pipeline. + ExecutionPipelineStage []*ExecutionStageSummary `json:"executionPipelineStage,omitempty"` + + // OriginalPipelineTransform: Description of each transform in the + // pipeline and collections between them. + OriginalPipelineTransform []*TransformSummary `json:"originalPipelineTransform,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayData") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayData") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PipelineDescription) MarshalJSON() ([]byte, error) { + type noMethod PipelineDescription + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Position: Position defines a position within a collection of data. -// The value can be either the end position, a key (used with ordered +// The value +// can be either the end position, a key (used with +// ordered // collections), a byte offset, or a record index. type Position struct { // ByteOffset: Position is a byte offset. @@ -2467,7 +3165,8 @@ type Position struct { // ConcatPosition: CloudPosition is a concat position. ConcatPosition *ConcatPosition `json:"concatPosition,omitempty"` - // End: Position is past all other positions. Also useful for the end + // End: Position is past all other positions. Also useful for the + // end // position of an unbounded range. End bool `json:"end,omitempty"` @@ -2478,7 +3177,8 @@ type Position struct { RecordIndex int64 `json:"recordIndex,omitempty,string"` // ShufflePosition: CloudPosition is a base64 encoded - // BatchShufflePosition (with FIXED sharding). + // BatchShufflePosition (with FIXED + // sharding). ShufflePosition string `json:"shufflePosition,omitempty"` // ForceSendFields is a list of field names (e.g. "ByteOffset") to @@ -2505,35 +3205,37 @@ func (s *Position) MarshalJSON() ([]byte, error) { } // PubsubLocation: Identifies a pubsub location to use for transferring -// data into or out of a streaming Dataflow job. +// data into or +// out of a streaming Dataflow job. type PubsubLocation struct { // DropLateData: Indicates whether the pipeline allows late-arriving // data. DropLateData bool `json:"dropLateData,omitempty"` // IdLabel: If set, contains a pubsub label from which to extract record - // ids. If left empty, record deduplication will be strictly best - // effort. + // ids. + // If left empty, record deduplication will be strictly best effort. IdLabel string `json:"idLabel,omitempty"` - // Subscription: A pubsub subscription, in the form of - // "pubsub.googleapis.com/subscriptions/ - // /" + // Subscription: A pubsub subscription, in the form + // of + // "pubsub.googleapis.com/subscriptions//" Subscription string `json:"subscription,omitempty"` // TimestampLabel: If set, contains a pubsub label from which to extract - // record timestamps. If left empty, record timestamps will be generated - // upon arrival. + // record timestamps. + // If left empty, record timestamps will be generated upon arrival. TimestampLabel string `json:"timestampLabel,omitempty"` - // Topic: A pubsub topic, in the form of - // "pubsub.googleapis.com/topics/ - // /" + // Topic: A pubsub topic, in the form + // of + // "pubsub.googleapis.com/topics//" Topic string `json:"topic,omitempty"` // TrackingSubscription: If set, specifies the pubsub subscription that - // will be used for tracking custom time timestamps for watermark - // estimation. + // will be used for tracking + // custom time timestamps for watermark estimation. TrackingSubscription string `json:"trackingSubscription,omitempty"` // WithAttributes: If true, then the client has requested to get pubsub @@ -2563,8 +3265,8 @@ func (s *PubsubLocation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReadInstruction: An instruction that reads records. Takes no inputs, -// produces one output. +// ReadInstruction: An instruction that reads records. +// Takes no inputs, produces one output. type ReadInstruction struct { // Source: The source to read from. Source *Source `json:"source,omitempty"` @@ -2602,14 +3304,18 @@ type ReportWorkItemStatusRequest struct { Location string `json:"location,omitempty"` // WorkItemStatuses: The order is unimportant, except that the order of - // the WorkItemServiceState messages in the ReportWorkItemStatusResponse + // the + // WorkItemServiceState messages in the + // ReportWorkItemStatusResponse // corresponds to the order of WorkItemStatus messages here. WorkItemStatuses []*WorkItemStatus `json:"workItemStatuses,omitempty"` - // WorkerId: The ID of the worker reporting the WorkItem status. If this - // does not match the ID of the worker which the Dataflow service - // believes currently has the lease on the WorkItem, the report will be - // dropped (with an error response). + // WorkerId: The ID of the worker reporting the WorkItem status. If + // this + // does not match the ID of the worker which the Dataflow + // service + // believes currently has the lease on the WorkItem, the report + // will be dropped (with an error response). WorkerId string `json:"workerId,omitempty"` // ForceSendFields is a list of field names (e.g. "CurrentWorkerTime") @@ -2640,9 +3346,12 @@ func (s *ReportWorkItemStatusRequest) MarshalJSON() ([]byte, error) { // status of WorkItems. type ReportWorkItemStatusResponse struct { // WorkItemServiceStates: A set of messages indicating the service-side - // state for each WorkItem whose status was reported, in the same order - // as the WorkItemStatus messages in the ReportWorkItemStatusRequest - // which resulting in this response. + // state for each + // WorkItem whose status was reported, in the same order as + // the + // WorkItemStatus messages in the ReportWorkItemStatusRequest + // which + // resulting in this response. WorkItemServiceStates []*WorkItemServiceState `json:"workItemServiceStates,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2675,13 +3384,19 @@ func (s *ReportWorkItemStatusResponse) MarshalJSON() ([]byte, error) { } // ReportedParallelism: Represents the level of parallelism in a -// WorkItem's input, reported by the worker. +// WorkItem's input, +// reported by the worker. type ReportedParallelism struct { // IsInfinite: Specifies whether the parallelism is infinite. If true, - // "value" is ignored. Infinite parallelism means the service will - // assume that the work item can always be split into more non-empty - // work items by dynamic splitting. This is a work-around for lack of - // support for infinity by the current JSON-based Java RPC stack. + // "value" is + // ignored. + // Infinite parallelism means the service will assume that the work + // item + // can always be split into more non-empty work items by dynamic + // splitting. + // This is a work-around for lack of support for infinity by the + // current + // JSON-based Java RPC stack. IsInfinite bool `json:"isInfinite,omitempty"` // Value: Specifies the level of parallelism in case it is finite. @@ -2725,17 +3440,15 @@ func (s *ReportedParallelism) UnmarshalJSON(data []byte) error { } // ResourceUtilizationReport: Worker metrics exported from workers. This -// contains resource utilization metrics accumulated from a variety of -// sources. For more information, see go/df-resource-signals. Note that -// this proto closely follows the structure of its DFE siblings in its -// contents. +// contains resource utilization +// metrics accumulated from a variety of sources. For more information, +// see +// go/df-resource-signals. type ResourceUtilizationReport struct { - // Metrics: Each Struct must parallel DFE worker metrics protos (eg., - // cpu_time metric will have nested values “timestamp_ms, total_ms, - // rate”). - Metrics []googleapi.RawMessage `json:"metrics,omitempty"` + // CpuTime: CPU utilization samples. + CpuTime []*CPUTime `json:"cpuTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "Metrics") to + // ForceSendFields is a list of field names (e.g. "CpuTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2743,7 +3456,7 @@ type ResourceUtilizationReport struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Metrics") to include in + // NullFields is a list of field names (e.g. "CpuTime") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -2766,24 +3479,27 @@ type ResourceUtilizationReportResponse struct { // RuntimeEnvironment: The environment values to set at runtime. type RuntimeEnvironment struct { // BypassTempDirValidation: Whether to bypass the safety checks for the - // job's temporary directory. Use with caution. + // job's temporary directory. + // Use with caution. BypassTempDirValidation bool `json:"bypassTempDirValidation,omitempty"` // MaxWorkers: The maximum number of Google Compute Engine instances to - // be made available to your pipeline during execution, from 1 to 1000. + // be made + // available to your pipeline during execution, from 1 to 1000. MaxWorkers int64 `json:"maxWorkers,omitempty"` // ServiceAccountEmail: The email address of the service account to run // the job as. ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` - // TempLocation: The Cloud Storage path to use for temporary files. Must - // be a valid Cloud Storage URL, beginning with `gs://`. + // TempLocation: The Cloud Storage path to use for temporary files. + // Must be a valid Cloud Storage URL, beginning with `gs://`. TempLocation string `json:"tempLocation,omitempty"` // Zone: The Compute Engine [availability // zone](https://cloud.google.com/compute/docs/regions-zones/regions-zone - // s) for launching worker instances to run your pipeline. + // s) + // for launching worker instances to run your pipeline. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2846,7 +3562,8 @@ func (s *SendDebugCaptureRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SendDebugCaptureResponse: Response to a send capture request. nothing +// SendDebugCaptureResponse: Response to a send capture request. +// nothing type SendDebugCaptureResponse struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -2929,11 +3646,12 @@ type SeqMapTask struct { OutputInfos []*SeqMapTaskOutputInfo `json:"outputInfos,omitempty"` // StageName: System-defined name of the stage containing the SeqDo - // operation. Unique across the workflow. + // operation. + // Unique across the workflow. StageName string `json:"stageName,omitempty"` - // SystemName: System-defined name of the SeqDo operation. Unique across - // the workflow. + // SystemName: System-defined name of the SeqDo operation. + // Unique across the workflow. SystemName string `json:"systemName,omitempty"` // UserFn: The user function to invoke. @@ -3033,12 +3751,15 @@ type SideInputInfo struct { Kind googleapi.RawMessage `json:"kind,omitempty"` // Sources: The source(s) to read element(s) from to get the value of - // this side input. If more than one source, then the elements are taken - // from the sources, in the specified order if order matters. At least - // one source is required. + // this side input. + // If more than one source, then the elements are taken from + // the + // sources, in the specified order if order matters. + // At least one source is required. Sources []*Source `json:"sources,omitempty"` - // Tag: The id of the tag the user code will access this side input by; + // Tag: The id of the tag the user code will access this side input + // by; // this should correspond to the tag of some MultiOutputInfo. Tag string `json:"tag,omitempty"` @@ -3098,39 +3819,55 @@ func (s *Sink) MarshalJSON() ([]byte, error) { // Source: A source that records can be read and decoded from. type Source struct { - // BaseSpecs: While splitting, sources may specify the produced bundles - // as differences against another source, in order to save backend-side - // memory and allow bigger jobs. For details, see SourceSplitRequest. To - // support this use case, the full set of parameters of the source is - // logically obtained by taking the latest explicitly specified value of - // each parameter in the order: base_specs (later items win), spec - // (overrides anything in base_specs). + // BaseSpecs: While splitting, sources may specify the produced + // bundles + // as differences against another source, in order to save + // backend-side + // memory and allow bigger jobs. For details, see SourceSplitRequest. + // To support this use case, the full set of parameters of the source + // is logically obtained by taking the latest explicitly specified + // value + // of each parameter in the order: + // base_specs (later items win), spec (overrides anything in + // base_specs). BaseSpecs []googleapi.RawMessage `json:"baseSpecs,omitempty"` // Codec: The codec to use to decode data read from the source. Codec googleapi.RawMessage `json:"codec,omitempty"` // DoesNotNeedSplitting: Setting this value to true hints to the - // framework that the source doesn't need splitting, and using - // SourceSplitRequest on it would yield - // SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter may set this - // to true when splitting a single file into a set of byte ranges of - // appropriate size, and set this to false when splitting a filepattern - // into individual files. However, for efficiency, a file splitter may - // decide to produce file subranges directly from the filepattern to - // avoid a splitting round-trip. See SourceSplitRequest for an overview - // of the splitting process. This field is meaningful only in the Source - // objects populated by the user (e.g. when filling in a DerivedSource). - // Source objects supplied by the framework to the user don't have this - // field populated. + // framework that the source + // doesn't need splitting, and using SourceSplitRequest on it + // would + // yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. + // + // E.g. a file splitter may set this to true when splitting a single + // file + // into a set of byte ranges of appropriate size, and set this + // to false when splitting a filepattern into individual files. + // However, for efficiency, a file splitter may decide to produce + // file subranges directly from the filepattern to avoid a + // splitting + // round-trip. + // + // See SourceSplitRequest for an overview of the splitting + // process. + // + // This field is meaningful only in the Source objects populated + // by the user (e.g. when filling in a DerivedSource). + // Source objects supplied by the framework to the user don't have + // this field populated. DoesNotNeedSplitting bool `json:"doesNotNeedSplitting,omitempty"` // Metadata: Optionally, metadata for this source can be supplied right - // away, avoiding a SourceGetMetadataOperation roundtrip (see - // SourceOperationRequest). This field is meaningful only in the Source - // objects populated by the user (e.g. when filling in a DerivedSource). - // Source objects supplied by the framework to the user don't have this - // field populated. + // away, + // avoiding a SourceGetMetadataOperation roundtrip + // (see SourceOperationRequest). + // + // This field is meaningful only in the Source objects populated + // by the user (e.g. when filling in a DerivedSource). + // Source objects supplied by the framework to the user don't have + // this field populated. Metadata *SourceMetadata `json:"metadata,omitempty"` // Spec: The source to read from, plus its parameters. @@ -3256,20 +3993,25 @@ func (s *SourceGetMetadataResponse) MarshalJSON() ([]byte, error) { } // SourceMetadata: Metadata about a Source useful for automatically -// optimizing and tuning the pipeline, etc. +// optimizing +// and tuning the pipeline, etc. type SourceMetadata struct { // EstimatedSizeBytes: An estimate of the total size (in bytes) of the - // data that would be read from this source. This estimate is in terms - // of external storage size, before any decompression or other - // processing done by the reader. + // data that would be + // read from this source. This estimate is in terms of external + // storage + // size, before any decompression or other processing done by the + // reader. EstimatedSizeBytes int64 `json:"estimatedSizeBytes,omitempty,string"` // Infinite: Specifies that the size of this source is known to be - // infinite (this is a streaming source). + // infinite + // (this is a streaming source). Infinite bool `json:"infinite,omitempty"` // ProducesSortedKeys: Whether this source is known to produce key/value - // pairs with the (encoded) keys in lexicographically sorted order. + // pairs with + // the (encoded) keys in lexicographically sorted order. ProducesSortedKeys bool `json:"producesSortedKeys,omitempty"` // ForceSendFields is a list of field names (e.g. "EstimatedSizeBytes") @@ -3297,8 +4039,8 @@ func (s *SourceMetadata) MarshalJSON() ([]byte, error) { } // SourceOperationRequest: A work item that represents the different -// operations that can be performed on a user-defined Source -// specification. +// operations that can be +// performed on a user-defined Source specification. type SourceOperationRequest struct { // GetMetadata: Information about a request to get metadata about a // source. @@ -3331,8 +4073,9 @@ func (s *SourceOperationRequest) MarshalJSON() ([]byte, error) { } // SourceOperationResponse: The result of a SourceOperationRequest, -// specified in ReportWorkItemStatusRequest.source_operation when the -// work item is completed. +// specified in +// ReportWorkItemStatusRequest.source_operation when the work item +// is completed. type SourceOperationResponse struct { // GetMetadata: A response to a request to get metadata about a source. GetMetadata *SourceGetMetadataResponse `json:"getMetadata,omitempty"` @@ -3364,11 +4107,12 @@ func (s *SourceOperationResponse) MarshalJSON() ([]byte, error) { } // SourceSplitOptions: Hints for splitting a Source into bundles (parts -// for parallel processing) using SourceSplitRequest. +// for parallel +// processing) using SourceSplitRequest. type SourceSplitOptions struct { // DesiredBundleSizeBytes: The source should be split into a set of - // bundles where the estimated size of each is approximately this many - // bytes. + // bundles where the estimated size + // of each is approximately this many bytes. DesiredBundleSizeBytes int64 `json:"desiredBundleSizeBytes,omitempty,string"` // DesiredShardSizeBytes: DEPRECATED in favor of @@ -3401,17 +4145,28 @@ func (s *SourceSplitOptions) MarshalJSON() ([]byte, error) { } // SourceSplitRequest: Represents the operation to split a high-level -// Source specification into bundles (parts for parallel processing). At -// a high level, splitting of a source into bundles happens as follows: -// SourceSplitRequest is applied to the source. If it returns +// Source specification +// into bundles (parts for parallel processing). +// +// At a high level, splitting of a source into bundles happens as +// follows: +// SourceSplitRequest is applied to the source. If it +// returns // SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and -// the source is used "as is". Otherwise, splitting is applied -// recursively to each produced DerivedSource. As an optimization, for -// any Source, if its does_not_need_splitting is true, the framework -// assumes that splitting this source would return +// the source +// is used "as is". Otherwise, splitting is applied recursively to +// each +// produced DerivedSource. +// +// As an optimization, for any Source, if its does_not_need_splitting +// is +// true, the framework assumes that splitting this source would +// return // SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a -// SourceSplitRequest. This applies both to the initial source being -// split and to bundles produced from it. +// SourceSplitRequest. +// This applies both to the initial source being split and to +// bundles +// produced from it. type SourceSplitRequest struct { // Options: Hints for tuning the splitting process. Options *SourceSplitOptions `json:"options,omitempty"` @@ -3445,21 +4200,29 @@ func (s *SourceSplitRequest) MarshalJSON() ([]byte, error) { // SourceSplitResponse: The response to a SourceSplitRequest. type SourceSplitResponse struct { // Bundles: If outcome is SPLITTING_HAPPENED, then this is a list of - // bundles into which the source was split. Otherwise this field is - // ignored. This list can be empty, which means the source represents an - // empty input. + // bundles + // into which the source was split. Otherwise this field is + // ignored. + // This list can be empty, which means the source represents an empty + // input. Bundles []*DerivedSource `json:"bundles,omitempty"` // Outcome: Indicates whether splitting happened and produced a list of - // bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source - // should be processed "as is" without splitting. "bundles" is ignored - // in this case. If this is SPLITTING_HAPPENED, then "bundles" contains - // a list of bundles into which the source was split. + // bundles. + // If this is USE_CURRENT_SOURCE_AS_IS, the current source should + // be processed "as is" without splitting. "bundles" is ignored in this + // case. + // If this is SPLITTING_HAPPENED, then "bundles" contains a list + // of + // bundles into which the source was split. // // Possible values: - // "SOURCE_SPLIT_OUTCOME_UNKNOWN" - // "SOURCE_SPLIT_OUTCOME_USE_CURRENT" - // "SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED" + // "SOURCE_SPLIT_OUTCOME_UNKNOWN" - The source split outcome is + // unknown, or unspecified. + // "SOURCE_SPLIT_OUTCOME_USE_CURRENT" - The current source should be + // processed "as is" without splitting. + // "SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED" - Splitting produced a + // list of bundles. Outcome string `json:"outcome,omitempty"` // Shards: DEPRECATED in favor of bundles. @@ -3493,10 +4256,14 @@ type SourceSplitShard struct { // DerivationMode: DEPRECATED // // Possible values: - // "SOURCE_DERIVATION_MODE_UNKNOWN" - // "SOURCE_DERIVATION_MODE_INDEPENDENT" - // "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" - // "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" + // "SOURCE_DERIVATION_MODE_UNKNOWN" - The source derivation is + // unknown, or unspecified. + // "SOURCE_DERIVATION_MODE_INDEPENDENT" - Produce a completely + // independent Source with no base. + // "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" - Produce a Source based + // on the Source being split. + // "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" - Produce a Source + // based on the base of the Source being split. DerivationMode string `json:"derivationMode,omitempty"` // Source: DEPRECATED @@ -3527,7 +4294,8 @@ func (s *SourceSplitShard) MarshalJSON() ([]byte, error) { } // SplitInt64: A representation of an int64, n, that is immune to -// precision loss when encoded in JSON. +// precision loss when +// encoded in JSON. type SplitInt64 struct { // HighBits: The high order bits, including the sign: n >> 32. HighBits int64 `json:"highBits,omitempty"` @@ -3558,6 +4326,46 @@ func (s *SplitInt64) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// StageSource: Description of an input or output of an execution stage. +type StageSource struct { + // Name: Dataflow service generated name for this source. + Name string `json:"name,omitempty"` + + // OriginalTransformOrCollection: User name for the original user + // transform or collection with which this + // source is most closely associated. + OriginalTransformOrCollection string `json:"originalTransformOrCollection,omitempty"` + + // SizeBytes: Size of the source, if measurable. + SizeBytes int64 `json:"sizeBytes,omitempty,string"` + + // UserName: Human-readable name for this source; may be user or system + // generated. + UserName string `json:"userName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StageSource) MarshalJSON() ([]byte, error) { + type noMethod StageSource + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // StateFamilyConfig: State family configuration. type StateFamilyConfig struct { // IsRead: If true, this family corresponds to a read operation. @@ -3590,53 +4398,99 @@ func (s *StateFamilyConfig) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for different programming environments, including REST APIs -// and RPC APIs. It is used by [gRPC](https://github.com/grpc). The -// error model is designed to be: - Simple to use and understand for -// most users - Flexible enough to meet unexpected needs # Overview The -// `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of +// suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of // google.rpc.Code, but it may accept additional error codes if needed. -// The error message should be a developer-facing English message that -// helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the -// error details or localize it in the client. The optional error -// details may contain arbitrary information about the error. There is a -// predefined set of error detail types in the package `google.rpc` -// which can be used for common error conditions. # Language mapping The -// `Status` message is the logical representation of the error model, -// but it is not necessarily the actual wire format. When the `Status` -// message is exposed in different client libraries and different wire -// protocols, it can be mapped differently. For example, it will likely -// be mapped to some exceptions in Java, but more likely mapped to some -// error codes in C. # Other uses The error model and the `Status` -// message can be used in a variety of environments, either with or -// without APIs, to provide a consistent developer experience across -// different environments. Example uses of this error model include: - -// Partial errors. If a service needs to return partial errors to the -// client, it may embed the `Status` in the normal response to indicate -// the partial errors. - Workflow errors. A typical workflow has -// multiple steps. Each step may have a `Status` message for error -// reporting purpose. - Batch operations. If a client uses batch request -// and batch response, the `Status` message should be used directly -// inside batch response, one for each error sub-response. - -// Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. - Logging. If some -// API errors are stored in logs, the message `Status` could be used -// directly after any stripping needed for security/privacy reasons. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error +// details or +// localize it in the client. The optional error details may contain +// arbitrary +// information about the error. There is a predefined set of error +// detail types +// in the package `google.rpc` which can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it +// is not necessarily the actual wire format. When the `Status` message +// is +// exposed in different client libraries and different wire protocols, +// it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety +// of +// environments, either with or without APIs, to provide a +// consistent developer experience across different +// environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, +// it may embed the `Status` in the normal response to indicate the +// partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may +// have a `Status` message for error reporting purpose. +// +// - Batch operations. If a client uses batch request and batch +// response, the +// `Status` message should be used directly inside batch response, +// one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation +// results in its response, the status of those operations should +// be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could +// be used directly after any stripping needed for security/privacy +// reasons. type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There will - // be a common set of message types for APIs to use. + // Details: A list of messages that carry the error details. There will + // be a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any user-facing error message should be localized and sent - // in the google.rpc.Status.details field, or localized by the client. + // English. Any + // user-facing error message should be localized and sent in + // the + // google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -3662,29 +4516,48 @@ func (s *Status) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Step: Defines a particular step within a Cloud Dataflow job. A job -// consists of multiple steps, each of which performs some specific -// operation as part of the overall job. Data is typically passed from -// one step to another as part of the job. Here's an example of a -// sequence of steps which together implement a Map-Reduce job: * Read a -// collection of data from some source, parsing the collection's -// elements. * Validate the elements. * Apply a user-defined function to -// map each element to some value and extract an element-specific key -// value. * Group elements with the same key into a single element with -// that key, transforming a multiply-keyed collection into a -// uniquely-keyed collection. * Write the elements out to some data -// sink. Note that the Cloud Dataflow service may be used to run many -// different types of jobs, not just Map-Reduce. +// Step: Defines a particular step within a Cloud Dataflow job. +// +// A job consists of multiple steps, each of which performs +// some +// specific operation as part of the overall job. Data is +// typically +// passed from one step to another as part of the job. +// +// Here's an example of a sequence of steps which together implement +// a +// Map-Reduce job: +// +// * Read a collection of data from some source, parsing the +// collection's elements. +// +// * Validate the elements. +// +// * Apply a user-defined function to map each element to some value +// and extract an element-specific key value. +// +// * Group elements with the same key into a single element with +// that key, transforming a multiply-keyed collection into a +// uniquely-keyed collection. +// +// * Write the elements out to some data sink. +// +// Note that the Cloud Dataflow service may be used to run many +// different +// types of jobs, not just Map-Reduce. type Step struct { // Kind: The kind of step in the Cloud Dataflow job. Kind string `json:"kind,omitempty"` - // Name: The name that identifies the step. This must be unique for each + // Name: The name that identifies the step. This must be unique for + // each // step with respect to all other steps in the Cloud Dataflow job. Name string `json:"name,omitempty"` - // Properties: Named properties associated with the step. Each kind of + // Properties: Named properties associated with the step. Each kind + // of // predefined step has its own required set of properties. + // Must be provided on Create. Only retrieved with JOB_VIEW_ALL. Properties googleapi.RawMessage `json:"properties,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -3711,7 +4584,8 @@ func (s *Step) MarshalJSON() ([]byte, error) { } // StreamLocation: Describes a stream of data, either as input to be -// processed or as output of a streaming Dataflow job. +// processed or as +// output of a streaming Dataflow job. type StreamLocation struct { // CustomSourceLocation: The stream is a custom source. CustomSourceLocation *CustomSourceLocation `json:"customSourceLocation,omitempty"` @@ -3723,7 +4597,8 @@ type StreamLocation struct { SideInputLocation *StreamingSideInputLocation `json:"sideInputLocation,omitempty"` // StreamingStageLocation: The stream is part of another computation - // within the current streaming Dataflow job. + // within the current + // streaming Dataflow job. StreamingStageLocation *StreamingStageLocation `json:"streamingStageLocation,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3790,7 +4665,8 @@ func (s *StreamingComputationConfig) MarshalJSON() ([]byte, error) { } // StreamingComputationRanges: Describes full or partial data disk -// assignment information of the computation ranges. +// assignment information of the computation +// ranges. type StreamingComputationRanges struct { // ComputationId: The ID of the computation. ComputationId string `json:"computationId,omitempty"` @@ -3823,7 +4699,8 @@ func (s *StreamingComputationRanges) MarshalJSON() ([]byte, error) { } // StreamingComputationTask: A task which describes what action should -// be performed for the specified streaming computation ranges. +// be performed for the specified +// streaming computation ranges. type StreamingComputationTask struct { // ComputationRanges: Contains ranges of a streaming computation this // task should apply to. @@ -3835,9 +4712,12 @@ type StreamingComputationTask struct { // TaskType: A type of streaming computation task. // // Possible values: - // "STREAMING_COMPUTATION_TASK_UNKNOWN" - // "STREAMING_COMPUTATION_TASK_STOP" - // "STREAMING_COMPUTATION_TASK_START" + // "STREAMING_COMPUTATION_TASK_UNKNOWN" - The streaming computation + // task is unknown, or unspecified. + // "STREAMING_COMPUTATION_TASK_STOP" - Stop processing specified + // streaming computation range(s). + // "STREAMING_COMPUTATION_TASK_START" - Start processing specified + // streaming computation range(s). TaskType string `json:"taskType,omitempty"` // ForceSendFields is a list of field names (e.g. "ComputationRanges") @@ -3907,7 +4787,8 @@ type StreamingSetupTask struct { Drain bool `json:"drain,omitempty"` // ReceiveWorkPort: The TCP port on which the worker should listen for - // messages from other streaming computation workers. + // messages from + // other streaming computation workers. ReceiveWorkPort int64 `json:"receiveWorkPort,omitempty"` // StreamingComputationTopology: The global topology of the streaming @@ -3915,7 +4796,8 @@ type StreamingSetupTask struct { StreamingComputationTopology *TopologyConfig `json:"streamingComputationTopology,omitempty"` // WorkerHarnessPort: The TCP port used by the worker to communicate - // with the Dataflow worker harness. + // with the Dataflow + // worker harness. WorkerHarnessPort int64 `json:"workerHarnessPort,omitempty"` // ForceSendFields is a list of field names (e.g. "Drain") to @@ -3976,10 +4858,12 @@ func (s *StreamingSideInputLocation) MarshalJSON() ([]byte, error) { } // StreamingStageLocation: Identifies the location of a streaming -// computation stage, for stage-to-stage communication. +// computation stage, for +// stage-to-stage communication. type StreamingStageLocation struct { // StreamId: Identifies the particular stream within the streaming - // Dataflow job. + // Dataflow + // job. StreamId string `json:"streamId,omitempty"` // ForceSendFields is a list of field names (e.g. "StreamId") to @@ -4043,12 +4927,18 @@ type TaskRunnerSettings struct { BaseTaskDir string `json:"baseTaskDir,omitempty"` // BaseUrl: The base URL for the taskrunner to use when accessing Google - // Cloud APIs. When workers access Google Cloud APIs, they logically do - // so via relative URLs. If this field is specified, it supplies the - // base URL to use for resolving these relative URLs. The normative - // algorithm used is defined by RFC 1808, "Relative Uniform Resource - // Locators". If not specified, the default value is - // "http://www.googleapis.com/" + // Cloud APIs. + // + // When workers access Google Cloud APIs, they logically do so + // via + // relative URLs. If this field is specified, it supplies the base + // URL to use for resolving these relative URLs. The + // normative + // algorithm used is defined by RFC 1808, "Relative Uniform + // Resource + // Locators". + // + // If not specified, the default value is "http://www.googleapis.com/" BaseUrl string `json:"baseUrl,omitempty"` // CommandlinesFileName: The file to store preprocessing commands in. @@ -4071,17 +4961,24 @@ type TaskRunnerSettings struct { LogDir string `json:"logDir,omitempty"` // LogToSerialconsole: Whether to send taskrunner log info to Google - // Compute Engine VM serial console. + // Compute Engine VM serial + // console. LogToSerialconsole bool `json:"logToSerialconsole,omitempty"` - // LogUploadLocation: Indicates where to put logs. If this is not - // specified, the logs will not be uploaded. The supported resource type - // is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // LogUploadLocation: Indicates where to put logs. If this is not + // specified, the logs + // will not be uploaded. + // + // The supported resource type is: + // + // Google Cloud Storage: + // storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} LogUploadLocation string `json:"logUploadLocation,omitempty"` // OauthScopes: The OAuth2 scopes to be requested by the taskrunner in - // order to access the Cloud Dataflow API. + // order to + // access the Cloud Dataflow API. OauthScopes []string `json:"oauthScopes,omitempty"` // ParallelWorkerSettings: The settings to pass to the parallel worker @@ -4092,17 +4989,24 @@ type TaskRunnerSettings struct { StreamingWorkerMainClass string `json:"streamingWorkerMainClass,omitempty"` // TaskGroup: The UNIX group ID on the worker VM to use for tasks - // launched by taskrunner; e.g. "wheel". + // launched by + // taskrunner; e.g. "wheel". TaskGroup string `json:"taskGroup,omitempty"` // TaskUser: The UNIX user ID on the worker VM to use for tasks launched - // by taskrunner; e.g. "root". + // by + // taskrunner; e.g. "root". TaskUser string `json:"taskUser,omitempty"` // TempStoragePrefix: The prefix of the resources the taskrunner should - // use for temporary storage. The supported resource type is: Google - // Cloud Storage: storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // use for + // temporary storage. + // + // The supported resource type is: + // + // Google Cloud Storage: + // storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempStoragePrefix string `json:"tempStoragePrefix,omitempty"` // VmId: The ID string of the VM. @@ -4135,8 +5039,91 @@ func (s *TaskRunnerSettings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TemplateMetadata: Metadata describing a template. +type TemplateMetadata struct { + // BypassTempDirValidation: If true, will bypass the validation that the + // temp directory is + // writable. This should only be used with templates for pipelines + // that are guaranteed not to need to write to the temp directory, + // which is subject to change based on the optimizer. + BypassTempDirValidation bool `json:"bypassTempDirValidation,omitempty"` + + // Description: Optional. A description of the template. + Description string `json:"description,omitempty"` + + // Name: Required. The name of the template. + Name string `json:"name,omitempty"` + + // Parameters: The parameters for the template. + Parameters []*ParameterMetadata `json:"parameters,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "BypassTempDirValidation") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BypassTempDirValidation") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TemplateMetadata) MarshalJSON() ([]byte, error) { + type noMethod TemplateMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TemplateValidationResult: The result of validating a +// CretaeJobFromTemplateRequest. +type TemplateValidationResult struct { + // Status: The status of the creation request. Any problems with the + // request + // will be indicated in the error_details. + Status *Status `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Status") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Status") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TemplateValidationResult) MarshalJSON() ([]byte, error) { + type noMethod TemplateValidationResult + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TopologyConfig: Global topology of the streaming Dataflow job, -// including all computations and their sharded locations. +// including all +// computations and their sharded locations. type TopologyConfig struct { // Computations: The computations associated with a streaming Dataflow // job. @@ -4179,8 +5166,68 @@ func (s *TopologyConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TransformSummary: Description of the type, names/ids, and +// input/outputs for a transform. +type TransformSummary struct { + // DisplayData: Transform-specific display data. + DisplayData []*DisplayData `json:"displayData,omitempty"` + + // Id: SDK generated id of this transform instance. + Id string `json:"id,omitempty"` + + // InputCollectionName: User names for all collection inputs to this + // transform. + InputCollectionName []string `json:"inputCollectionName,omitempty"` + + // Kind: Type of transform. + // + // Possible values: + // "UNKNOWN_KIND" - Unrecognized transform type. + // "PAR_DO_KIND" - ParDo transform. + // "GROUP_BY_KEY_KIND" - Group By Key transform. + // "FLATTEN_KIND" - Flatten transform. + // "READ_KIND" - Read transform. + // "WRITE_KIND" - Write transform. + // "CONSTANT_KIND" - Constructs from a constant value, such as with + // Create.of. + // "SINGLETON_KIND" - Creates a Singleton view of a collection. + // "SHUFFLE_KIND" - Opening or closing a shuffle session, often as + // part of a GroupByKey. + Kind string `json:"kind,omitempty"` + + // Name: User provided name for this transform instance. + Name string `json:"name,omitempty"` + + // OutputCollectionName: User names for all collection outputs to this + // transform. + OutputCollectionName []string `json:"outputCollectionName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayData") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayData") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TransformSummary) MarshalJSON() ([]byte, error) { + type noMethod TransformSummary + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // WorkItem: WorkItem represents basic information about a WorkItem to -// be executed in the cloud. +// be executed +// in the cloud. type WorkItem struct { // Configuration: Work item-specific configuration as an opaque blob. Configuration string `json:"configuration,omitempty"` @@ -4202,7 +5249,8 @@ type WorkItem struct { MapTask *MapTask `json:"mapTask,omitempty"` // Packages: Any required packages that need to be fetched in order to - // execute this WorkItem. + // execute + // this WorkItem. Packages []*Package `json:"packages,omitempty"` // ProjectId: Identifies the cloud project this WorkItem belongs to. @@ -4257,33 +5305,41 @@ func (s *WorkItem) MarshalJSON() ([]byte, error) { } // WorkItemServiceState: The Dataflow service's idea of the current -// state of a WorkItem being processed by a worker. +// state of a WorkItem +// being processed by a worker. type WorkItemServiceState struct { // HarnessData: Other data returned by the service, specific to the - // particular worker harness. + // particular + // worker harness. HarnessData googleapi.RawMessage `json:"harnessData,omitempty"` // LeaseExpireTime: Time at which the current lease will expire. LeaseExpireTime string `json:"leaseExpireTime,omitempty"` // MetricShortId: The short ids that workers should use in subsequent - // metric updates. Workers should strive to use short ids whenever - // possible, but it is ok to request the short_id again if a worker lost - // track of it (e.g. if the worker is recovering from a crash). NOTE: it - // is possible that the response may have short ids for a subset of the - // metrics. + // metric updates. + // Workers should strive to use short ids whenever possible, but it is + // ok + // to request the short_id again if a worker lost track of it + // (e.g. if the worker is recovering from a crash). + // NOTE: it is possible that the response may have short ids for a + // subset + // of the metrics. MetricShortId []*MetricShortId `json:"metricShortId,omitempty"` // NextReportIndex: The index value to use for the next report sent by - // the worker. Note: If the report call fails for whatever reason, the - // worker should reuse this index for subsequent report attempts. + // the worker. + // Note: If the report call fails for whatever reason, the worker + // should + // reuse this index for subsequent report attempts. NextReportIndex int64 `json:"nextReportIndex,omitempty,string"` // ReportStatusInterval: New recommended reporting interval. ReportStatusInterval string `json:"reportStatusInterval,omitempty"` // SplitRequest: The progress point in the WorkItem where the Dataflow - // service suggests that the worker truncate the task. + // service + // suggests that the worker truncate the task. SplitRequest *ApproximateSplitRequest `json:"splitRequest,omitempty"` // SuggestedStopPoint: DEPRECATED in favor of split_request. @@ -4328,8 +5384,9 @@ type WorkItemStatus struct { // DynamicSourceSplit: See documentation of stop_position. DynamicSourceSplit *DynamicSourceSplit `json:"dynamicSourceSplit,omitempty"` - // Errors: Specifies errors which occurred during processing. If errors - // are provided, and completed = true, then the WorkItem is considered + // Errors: Specifies errors which occurred during processing. If errors + // are + // provided, and completed = true, then the WorkItem is considered // to have failed. Errors []*Status `json:"errors,omitempty"` @@ -4339,17 +5396,24 @@ type WorkItemStatus struct { // Progress: DEPRECATED in favor of reported_progress. Progress *ApproximateProgress `json:"progress,omitempty"` - // ReportIndex: The report index. When a WorkItem is leased, the lease - // will contain an initial report index. When a WorkItem's status is - // reported to the system, the report should be sent with that report - // index, and the response will contain the index the worker should use - // for the next report. Reports received with unexpected index values - // will be rejected by the service. In order to preserve idempotency, - // the worker should not alter the contents of a report, even if the - // worker must submit the same report multiple times before getting back - // a response. The worker should not submit a subsequent report until - // the response for the previous report had been received from the - // service. + // ReportIndex: The report index. When a WorkItem is leased, the lease + // will + // contain an initial report index. When a WorkItem's status + // is + // reported to the system, the report should be sent with + // that report index, and the response will contain the index the + // worker should use for the next report. Reports received + // with + // unexpected index values will be rejected by the service. + // + // In order to preserve idempotency, the worker should not alter + // the + // contents of a report, even if the worker must submit the same + // report multiple times before getting back a response. The + // worker + // should not submit a subsequent report until the response for + // the + // previous report had been received from the service. ReportIndex int64 `json:"reportIndex,omitempty,string"` // ReportedProgress: The worker's progress through this WorkItem. @@ -4363,35 +5427,51 @@ type WorkItemStatus struct { SourceFork *SourceFork `json:"sourceFork,omitempty"` // SourceOperationResponse: If the work item represented a - // SourceOperationRequest, and the work is completed, contains the - // result of the operation. + // SourceOperationRequest, and the work + // is completed, contains the result of the operation. SourceOperationResponse *SourceOperationResponse `json:"sourceOperationResponse,omitempty"` // StopPosition: A worker may split an active map task in two parts, - // "primary" and "residual", continuing to process the primary part and - // returning the residual part into the pool of available work. This - // event is called a "dynamic split" and is critical to the dynamic work - // rebalancing feature. The two obtained sub-tasks are called "parts" of - // the split. The parts, if concatenated, must represent the same input - // as would be read by the current task if the split did not happen. The - // exact way in which the original task is decomposed into the two parts - // is specified either as a position demarcating them (stop_position), - // or explicitly as two DerivedSources, if this task consumes a - // user-defined source type (dynamic_source_split). The "current" task - // is adjusted as a result of the split: after a task with range [A, B) - // sends a stop_position update at C, its range is considered to be [A, - // C), e.g.: * Progress should be interpreted relative to the new range, - // e.g. "75% completed" means "75% of [A, C) completed" * The worker - // should interpret proposed_stop_position relative to the new range, - // e.g. "split at 68%" should be interpreted as "split at 68% of [A, - // C)". * If the worker chooses to split again using stop_position, only - // stop_positions in [A, C) will be accepted. * Etc. - // dynamic_source_split has similar semantics: e.g., if a task with - // source S splits using dynamic_source_split into {P, R} (where P and R - // must be together equivalent to S), then subsequent progress and - // proposed_stop_position should be interpreted relative to P, and in a - // potential subsequent dynamic_source_split into {P', R'}, P' and R' - // must be together equivalent to P, etc. + // "primary" and + // "residual", continuing to process the primary part and returning + // the + // residual part into the pool of available work. + // This event is called a "dynamic split" and is critical to the + // dynamic + // work rebalancing feature. The two obtained sub-tasks are + // called + // "parts" of the split. + // The parts, if concatenated, must represent the same input as would + // be read by the current task if the split did not happen. + // The exact way in which the original task is decomposed into the + // two + // parts is specified either as a position demarcating + // them + // (stop_position), or explicitly as two DerivedSources, if this + // task consumes a user-defined source type (dynamic_source_split). + // + // The "current" task is adjusted as a result of the split: after a + // task + // with range [A, B) sends a stop_position update at C, its range + // is + // considered to be [A, C), e.g.: + // * Progress should be interpreted relative to the new range, e.g. + // "75% completed" means "75% of [A, C) completed" + // * The worker should interpret proposed_stop_position relative to the + // new range, e.g. "split at 68%" should be interpreted as + // "split at 68% of [A, C)". + // * If the worker chooses to split again using stop_position, only + // stop_positions in [A, C) will be accepted. + // * Etc. + // dynamic_source_split has similar semantics: e.g., if a task + // with + // source S splits using dynamic_source_split into {P, R} + // (where P and R must be together equivalent to S), then + // subsequent + // progress and proposed_stop_position should be interpreted relative + // to P, and in a potential subsequent dynamic_source_split into {P', + // R'}, + // P' and R' must be together equivalent to P, etc. StopPosition *Position `json:"stopPosition,omitempty"` // WorkItemId: Identifies the WorkItem. @@ -4421,17 +5501,27 @@ func (s *WorkItemStatus) MarshalJSON() ([]byte, error) { } // WorkerHealthReport: WorkerHealthReport contains information about the -// health of a worker. The VM should be identified by the labels -// attached to the WorkerMessage that this health ping belongs to. +// health of a worker. +// +// The VM should be identified by the labels attached to the +// WorkerMessage that +// this health ping belongs to. type WorkerHealthReport struct { - // Pods: The pods running on the worker. See: - // http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod This field is used by the worker to send the status of the indvidual containers running on each - // worker. + // Pods: The pods running on the worker. + // See: + // http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html# + // _v1_pod + // + // This field is used by the worker to send the status of the + // indvidual + // containers running on each worker. Pods []googleapi.RawMessage `json:"pods,omitempty"` // ReportInterval: The interval at which the worker is sending health - // reports. The default value of 0 should be interpreted as the field is - // not being explicitly set by the worker. + // reports. + // The default value of 0 should be interpreted as the field is not + // being + // explicitly set by the worker. ReportInterval string `json:"reportInterval,omitempty"` // VmIsHealthy: Whether the VM is healthy. @@ -4464,11 +5554,16 @@ func (s *WorkerHealthReport) MarshalJSON() ([]byte, error) { } // WorkerHealthReportResponse: WorkerHealthReportResponse contains -// information returned to the worker in response to a health ping. +// information returned to the worker +// in response to a health ping. type WorkerHealthReportResponse struct { // ReportInterval: A positive value indicates the worker should change - // its reporting interval to the specified value. The default value of - // zero means no change in report rate is requested by the server. + // its reporting interval + // to the specified value. + // + // The default value of zero means no change in report rate is requested + // by + // the server. ReportInterval string `json:"reportInterval,omitempty"` // ForceSendFields is a list of field names (e.g. "ReportInterval") to @@ -4498,13 +5593,18 @@ func (s *WorkerHealthReportResponse) MarshalJSON() ([]byte, error) { // WorkerMessage: WorkerMessage provides information to the backend // about a worker. type WorkerMessage struct { - // Labels: Labels are used to group WorkerMessages. For example, a - // worker_message about a particular container might have the labels: { - // "JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015…" - // "CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags - // typically correspond to Label enum values. However, for ease of - // development other strings can be used as tags. LABEL_UNSPECIFIED - // should not be used here. + // Labels: Labels are used to group WorkerMessages. + // For example, a worker_message about a particular container + // might have the labels: + // { "JOB_ID": "2015-04-22", + // "WORKER_ID": "wordcount-vm-2015…" + // "CONTAINER_TYPE": "worker", + // "CONTAINER_ID": "ac1234def"} + // Label tags typically correspond to Label enum values. However, for + // ease + // of development other strings can be used as tags. LABEL_UNSPECIFIED + // should + // not be used here. Labels map[string]string `json:"labels,omitempty"` // Time: The timestamp of the worker_message. @@ -4543,34 +5643,65 @@ func (s *WorkerMessage) MarshalJSON() ([]byte, error) { } // WorkerMessageCode: A message code is used to report status and error -// messages to the service. The message codes are intended to be machine -// readable. The service will take care of translating these into user -// understandable messages if necessary. Example use cases: 1. Worker -// processes reporting successful startup. 2. Worker processes reporting -// specific errors (e.g. package staging failure). +// messages to the service. +// The message codes are intended to be machine readable. The service +// will +// take care of translating these into user understandable messages +// if +// necessary. +// +// Example use cases: +// 1. Worker processes reporting successful startup. +// 2. Worker processes reporting specific errors (e.g. package +// staging +// failure). type WorkerMessageCode struct { // Code: The code is a string intended for consumption by a machine that - // identifies the type of message being sent. Examples: 1. - // "HARNESS_STARTED" might be used to indicate the worker harness has - // started. 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error - // downloading a GCS file as part of the boot process of one of the - // worker containers. This is a string and not an enum to make it easy - // to add new codes without waiting for an API change. + // identifies + // the type of message being sent. + // Examples: + // 1. "HARNESS_STARTED" might be used to indicate the worker harness + // has + // started. + // 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error + // downloading + // a GCS file as part of the boot process of one of the worker + // containers. + // + // This is a string and not an enum to make it easy to add new codes + // without + // waiting for an API change. Code string `json:"code,omitempty"` - // Parameters: Parameters contains specific information about the code. - // This is a struct to allow parameters of different types. Examples: 1. - // For a "HARNESS_STARTED" message parameters might provide the name of - // the worker and additional data like timing information. 2. For a - // "GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS - // objects being downloaded and fields containing errors. In general - // complex data structures should be avoided. If a worker needs to send - // a specific and complicated data structure then please consider - // defining a new proto and adding it to the data oneof in - // WorkerMessageResponse. Conventions: Parameters should only be used - // for information that isn't typically passed as a label. hostname and - // other worker identifiers should almost always be passed as labels - // since they will be included on most messages. + // Parameters: Parameters contains specific information about the + // code. + // + // This is a struct to allow parameters of different types. + // + // Examples: + // 1. For a "HARNESS_STARTED" message parameters might provide the + // name + // of the worker and additional data like timing information. + // 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields + // listing + // the GCS objects being downloaded and fields containing + // errors. + // + // In general complex data structures should be avoided. If a + // worker + // needs to send a specific and complicated data structure then + // please + // consider defining a new proto and adding it to the data oneof + // in + // WorkerMessageResponse. + // + // Conventions: + // Parameters should only be used for information that isn't typically + // passed + // as a label. + // hostname and other worker identifiers should almost always be + // passed + // as labels since they will be included on most messages. Parameters googleapi.RawMessage `json:"parameters,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -4597,7 +5728,8 @@ func (s *WorkerMessageCode) MarshalJSON() ([]byte, error) { } // WorkerMessageResponse: A worker_message response allows the server to -// pass information to the sender. +// pass information to the +// sender. type WorkerMessageResponse struct { // WorkerHealthReportResponse: The service's response to a worker's // health report. @@ -4633,9 +5765,13 @@ func (s *WorkerMessageResponse) MarshalJSON() ([]byte, error) { } // WorkerPool: Describes one particular pool of Cloud Dataflow workers -// to be instantiated by the Cloud Dataflow service in order to perform -// the computations required by a job. Note that a workflow job may use -// multiple pools, in order to match the various computational +// to be +// instantiated by the Cloud Dataflow service in order to perform +// the +// computations required by a job. Note that a workflow job may +// use +// multiple pools, in order to match the various +// computational // requirements of the various stages of the job. type WorkerPool struct { // AutoscalingSettings: Settings for autoscaling of this WorkerPool. @@ -4644,64 +5780,80 @@ type WorkerPool struct { // DataDisks: Data disks that are used by a VM in this workflow. DataDisks []*Disk `json:"dataDisks,omitempty"` - // DefaultPackageSet: The default package set to install. This allows - // the service to select a default set of packages which are useful to - // worker harnesses written in a particular language. + // DefaultPackageSet: The default package set to install. This allows + // the service to + // select a default set of packages which are useful to worker + // harnesses written in a particular language. // // Possible values: - // "DEFAULT_PACKAGE_SET_UNKNOWN" - // "DEFAULT_PACKAGE_SET_NONE" - // "DEFAULT_PACKAGE_SET_JAVA" - // "DEFAULT_PACKAGE_SET_PYTHON" + // "DEFAULT_PACKAGE_SET_UNKNOWN" - The default set of packages to + // stage is unknown, or unspecified. + // "DEFAULT_PACKAGE_SET_NONE" - Indicates that no packages should be + // staged at the worker unless + // explicitly specified by the job. + // "DEFAULT_PACKAGE_SET_JAVA" - Stage packages typically useful to + // workers written in Java. + // "DEFAULT_PACKAGE_SET_PYTHON" - Stage pacakges typically useful to + // workers written in Python. DefaultPackageSet string `json:"defaultPackageSet,omitempty"` - // DiskSizeGb: Size of root disk for VMs, in GB. If zero or unspecified, - // the service will attempt to choose a reasonable default. + // DiskSizeGb: Size of root disk for VMs, in GB. If zero or + // unspecified, the service will + // attempt to choose a reasonable default. DiskSizeGb int64 `json:"diskSizeGb,omitempty"` // DiskSourceImage: Fully qualified source image for disks. DiskSourceImage string `json:"diskSourceImage,omitempty"` - // DiskType: Type of root disk for VMs. If empty or unspecified, the - // service will attempt to choose a reasonable default. + // DiskType: Type of root disk for VMs. If empty or unspecified, the + // service will + // attempt to choose a reasonable default. DiskType string `json:"diskType,omitempty"` // IpConfiguration: Configuration for VM IPs. // // Possible values: - // "WORKER_IP_UNSPECIFIED" - // "WORKER_IP_PUBLIC" - // "WORKER_IP_PRIVATE" + // "WORKER_IP_UNSPECIFIED" - The configuration is unknown, or + // unspecified. + // "WORKER_IP_PUBLIC" - Workers should have public IP addresses. + // "WORKER_IP_PRIVATE" - Workers should have private IP addresses. IpConfiguration string `json:"ipConfiguration,omitempty"` // Kind: The kind of the worker pool; currently only `harness` and - // `shuffle` are supported. + // `shuffle` + // are supported. Kind string `json:"kind,omitempty"` - // MachineType: Machine type (e.g. "n1-standard-1"). If empty or - // unspecified, the service will attempt to choose a reasonable default. + // MachineType: Machine type (e.g. "n1-standard-1"). If empty or + // unspecified, the + // service will attempt to choose a reasonable default. MachineType string `json:"machineType,omitempty"` // Metadata: Metadata to set on the Google Compute Engine VMs. Metadata map[string]string `json:"metadata,omitempty"` - // Network: Network to which VMs will be assigned. If empty or - // unspecified, the service will use the network "default". + // Network: Network to which VMs will be assigned. If empty or + // unspecified, + // the service will use the network "default". Network string `json:"network,omitempty"` // NumThreadsPerWorker: The number of threads per worker harness. If - // empty or unspecified, the service will choose a number of threads - // (according to the number of cores on the selected machine type for - // batch, or 1 by convention for streaming). + // empty or unspecified, the + // service will choose a number of threads (according to the number of + // cores + // on the selected machine type for batch, or 1 by convention for + // streaming). NumThreadsPerWorker int64 `json:"numThreadsPerWorker,omitempty"` // NumWorkers: Number of Google Compute Engine workers in this pool - // needed to execute the job. If zero or unspecified, the service will + // needed to + // execute the job. If zero or unspecified, the service will // attempt to choose a reasonable default. NumWorkers int64 `json:"numWorkers,omitempty"` // OnHostMaintenance: The action to take on host maintenance, as defined - // by the Google Compute Engine API. + // by the Google + // Compute Engine API. OnHostMaintenance string `json:"onHostMaintenance,omitempty"` // Packages: Packages to be installed on workers. @@ -4711,41 +5863,61 @@ type WorkerPool struct { PoolArgs googleapi.RawMessage `json:"poolArgs,omitempty"` // Subnetwork: Subnetwork to which VMs will be assigned, if desired. - // Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK". + // Expected to be of + // the form "regions/REGION/subnetworks/SUBNETWORK". Subnetwork string `json:"subnetwork,omitempty"` // TaskrunnerSettings: Settings passed through to Google Compute Engine - // workers when using the standard Dataflow task runner. Users should - // ignore this field. + // workers when + // using the standard Dataflow task runner. Users should ignore + // this field. TaskrunnerSettings *TaskRunnerSettings `json:"taskrunnerSettings,omitempty"` // TeardownPolicy: Sets the policy for determining when to turndown - // worker pool. Allowed values are: `TEARDOWN_ALWAYS`, - // `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means - // workers are always torn down regardless of whether the job succeeds. - // `TEARDOWN_ON_SUCCESS` means workers are torn down if the job - // succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If - // the workers are not torn down by the service, they will continue to - // run and use Google Compute Engine VM resources in the user's project - // until they are explicitly terminated by the user. Because of this, - // Google recommends using the `TEARDOWN_ALWAYS` policy except for - // small, manually supervised test jobs. If unknown or unspecified, the - // service will attempt to choose a reasonable default. + // worker pool. + // Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, + // and + // `TEARDOWN_NEVER`. + // `TEARDOWN_ALWAYS` means workers are always torn down regardless of + // whether + // the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn + // down + // if the job succeeds. `TEARDOWN_NEVER` means the workers are never + // torn + // down. + // + // If the workers are not torn down by the service, they will + // continue to run and use Google Compute Engine VM resources in + // the + // user's project until they are explicitly terminated by the + // user. + // Because of this, Google recommends using the `TEARDOWN_ALWAYS` + // policy except for small, manually supervised test jobs. + // + // If unknown or unspecified, the service will attempt to choose a + // reasonable + // default. // // Possible values: - // "TEARDOWN_POLICY_UNKNOWN" - // "TEARDOWN_ALWAYS" - // "TEARDOWN_ON_SUCCESS" - // "TEARDOWN_NEVER" + // "TEARDOWN_POLICY_UNKNOWN" - The teardown policy isn't specified, or + // is unknown. + // "TEARDOWN_ALWAYS" - Always teardown the resource. + // "TEARDOWN_ON_SUCCESS" - Teardown the resource on success. This is + // useful for debugging + // failures. + // "TEARDOWN_NEVER" - Never teardown the resource. This is useful for + // debugging and + // development. TeardownPolicy string `json:"teardownPolicy,omitempty"` // WorkerHarnessContainerImage: Required. Docker container image that - // executes the Cloud Dataflow worker harness, residing in Google - // Container Registry. + // executes the Cloud Dataflow worker + // harness, residing in Google Container Registry. WorkerHarnessContainerImage string `json:"workerHarnessContainerImage,omitempty"` - // Zone: Zone to run the worker pools in. If empty or unspecified, the - // service will attempt to choose a reasonable default. + // Zone: Zone to run the worker pools in. If empty or unspecified, the + // service + // will attempt to choose a reasonable default. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "AutoscalingSettings") @@ -4774,12 +5946,18 @@ func (s *WorkerPool) MarshalJSON() ([]byte, error) { // WorkerSettings: Provides data to pass through to the worker harness. type WorkerSettings struct { - // BaseUrl: The base URL for accessing Google Cloud APIs. When workers - // access Google Cloud APIs, they logically do so via relative URLs. If - // this field is specified, it supplies the base URL to use for - // resolving these relative URLs. The normative algorithm used is - // defined by RFC 1808, "Relative Uniform Resource Locators". If not - // specified, the default value is "http://www.googleapis.com/" + // BaseUrl: The base URL for accessing Google Cloud APIs. + // + // When workers access Google Cloud APIs, they logically do so + // via + // relative URLs. If this field is specified, it supplies the base + // URL to use for resolving these relative URLs. The + // normative + // algorithm used is defined by RFC 1808, "Relative Uniform + // Resource + // Locators". + // + // If not specified, the default value is "http://www.googleapis.com/" BaseUrl string `json:"baseUrl,omitempty"` // ReportingEnabled: Whether to send work progress updates to the @@ -4787,17 +5965,25 @@ type WorkerSettings struct { ReportingEnabled bool `json:"reportingEnabled,omitempty"` // ServicePath: The Cloud Dataflow service path relative to the root - // URL, for example, "dataflow/v1b3/projects". + // URL, for example, + // "dataflow/v1b3/projects". ServicePath string `json:"servicePath,omitempty"` // ShuffleServicePath: The Shuffle service path relative to the root - // URL, for example, "shuffle/v1beta1". + // URL, for example, + // "shuffle/v1beta1". ShuffleServicePath string `json:"shuffleServicePath,omitempty"` // TempStoragePrefix: The prefix of the resources the system should use - // for temporary storage. The supported resource type is: Google Cloud - // Storage: storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // for temporary + // storage. + // + // The supported resource type is: + // + // Google Cloud Storage: + // + // storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempStoragePrefix string `json:"tempStoragePrefix,omitempty"` // WorkerId: The ID of the worker running this pipeline. @@ -4826,8 +6012,8 @@ func (s *WorkerSettings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// WriteInstruction: An instruction that writes records. Takes one -// input, produces no outputs. +// WriteInstruction: An instruction that writes records. +// Takes one input, produces no outputs. type WriteInstruction struct { // Input: The input. Input *InstructionInput `json:"input,omitempty"` @@ -4908,6 +6094,7 @@ func (c *ProjectsWorkerMessagesCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sendworkermessagesrequest) if err != nil { @@ -4964,6 +6151,7 @@ func (c *ProjectsWorkerMessagesCall) Do(opts ...googleapi.CallOption) (*SendWork return ret, nil // { // "description": "Send a worker_message to the service.", + // "flatPath": "v1b3/projects/{projectId}/WorkerMessages", // "httpMethod": "POST", // "id": "dataflow.projects.workerMessages", // "parameterOrder": [ @@ -5032,6 +6220,7 @@ func (c *ProjectsJobsCreateCall) ReplaceJobId(replaceJobId string) *ProjectsJobs // "JOB_VIEW_UNKNOWN" // "JOB_VIEW_SUMMARY" // "JOB_VIEW_ALL" +// "JOB_VIEW_DESCRIPTION" func (c *ProjectsJobsCreateCall) View(view string) *ProjectsJobsCreateCall { c.urlParams_.Set("view", view) return c @@ -5068,6 +6257,7 @@ func (c *ProjectsJobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) if err != nil { @@ -5124,6 +6314,7 @@ func (c *ProjectsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job, error) return ret, nil // { // "description": "Creates a Cloud Dataflow job.", + // "flatPath": "v1b3/projects/{projectId}/jobs", // "httpMethod": "POST", // "id": "dataflow.projects.jobs.create", // "parameterOrder": [ @@ -5151,7 +6342,8 @@ func (c *ProjectsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job, error) // "enum": [ // "JOB_VIEW_UNKNOWN", // "JOB_VIEW_SUMMARY", - // "JOB_VIEW_ALL" + // "JOB_VIEW_ALL", + // "JOB_VIEW_DESCRIPTION" // ], // "location": "query", // "type": "string" @@ -5206,6 +6398,7 @@ func (c *ProjectsJobsGetCall) Location(location string) *ProjectsJobsGetCall { // "JOB_VIEW_UNKNOWN" // "JOB_VIEW_SUMMARY" // "JOB_VIEW_ALL" +// "JOB_VIEW_DESCRIPTION" func (c *ProjectsJobsGetCall) View(view string) *ProjectsJobsGetCall { c.urlParams_.Set("view", view) return c @@ -5252,6 +6445,7 @@ func (c *ProjectsJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5307,6 +6501,7 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { return ret, nil // { // "description": "Gets the state of the specified Cloud Dataflow job.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.get", // "parameterOrder": [ @@ -5336,7 +6531,8 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { // "enum": [ // "JOB_VIEW_UNKNOWN", // "JOB_VIEW_SUMMARY", - // "JOB_VIEW_ALL" + // "JOB_VIEW_ALL", + // "JOB_VIEW_DESCRIPTION" // ], // "location": "query", // "type": "string" @@ -5382,8 +6578,8 @@ func (c *ProjectsJobsGetMetricsCall) Location(location string) *ProjectsJobsGetM } // StartTime sets the optional parameter "startTime": Return only metric -// data that has changed since this time. Default is to return all -// information about all metrics for the job. +// data that has changed since this time. +// Default is to return all information about all metrics for the job. func (c *ProjectsJobsGetMetricsCall) StartTime(startTime string) *ProjectsJobsGetMetricsCall { c.urlParams_.Set("startTime", startTime) return c @@ -5430,6 +6626,7 @@ func (c *ProjectsJobsGetMetricsCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5485,6 +6682,7 @@ func (c *ProjectsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) (*JobMetri return ret, nil // { // "description": "Request the job status.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/metrics", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.getMetrics", // "parameterOrder": [ @@ -5510,7 +6708,8 @@ func (c *ProjectsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) (*JobMetri // "type": "string" // }, // "startTime": { - // "description": "Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.", + // "description": "Return only metric data that has changed since this time.\nDefault is to return all information about all metrics for the job.", + // "format": "google-datetime", // "location": "query", // "type": "string" // } @@ -5566,17 +6765,18 @@ func (c *ProjectsJobsListCall) Location(location string) *ProjectsJobsListCall { } // PageSize sets the optional parameter "pageSize": If there are many -// jobs, limit response to at most this many. The actual number of jobs -// returned will be the lesser of max_responses and an unspecified -// server-defined limit. +// jobs, limit response to at most this many. +// The actual number of jobs returned will be the lesser of +// max_responses +// and an unspecified server-defined limit. func (c *ProjectsJobsListCall) PageSize(pageSize int64) *ProjectsJobsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Set this to the -// 'next_page_token' field of a previous response to request additional -// results in a long list. +// 'next_page_token' field of a previous response +// to request additional results in a long list. func (c *ProjectsJobsListCall) PageToken(pageToken string) *ProjectsJobsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -5589,6 +6789,7 @@ func (c *ProjectsJobsListCall) PageToken(pageToken string) *ProjectsJobsListCall // "JOB_VIEW_UNKNOWN" // "JOB_VIEW_SUMMARY" // "JOB_VIEW_ALL" +// "JOB_VIEW_DESCRIPTION" func (c *ProjectsJobsListCall) View(view string) *ProjectsJobsListCall { c.urlParams_.Set("view", view) return c @@ -5635,6 +6836,7 @@ func (c *ProjectsJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5689,6 +6891,7 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon return ret, nil // { // "description": "List the jobs of a project.", + // "flatPath": "v1b3/projects/{projectId}/jobs", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.list", // "parameterOrder": [ @@ -5712,13 +6915,13 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "type": "string" // }, // "pageSize": { - // "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", + // "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", + // "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", // "location": "query", // "type": "string" // }, @@ -5733,7 +6936,8 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "enum": [ // "JOB_VIEW_UNKNOWN", // "JOB_VIEW_SUMMARY", - // "JOB_VIEW_ALL" + // "JOB_VIEW_ALL", + // "JOB_VIEW_DESCRIPTION" // ], // "location": "query", // "type": "string" @@ -5831,6 +7035,7 @@ func (c *ProjectsJobsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) if err != nil { @@ -5888,6 +7093,7 @@ func (c *ProjectsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job, error) return ret, nil // { // "description": "Updates the state of an existing Cloud Dataflow job.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}", // "httpMethod": "PUT", // "id": "dataflow.projects.jobs.update", // "parameterOrder": [ @@ -5981,6 +7187,7 @@ func (c *ProjectsJobsDebugGetConfigCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getdebugconfigrequest) if err != nil { @@ -6038,6 +7245,7 @@ func (c *ProjectsJobsDebugGetConfigCall) Do(opts ...googleapi.CallOption) (*GetD return ret, nil // { // "description": "Get encoded debug configuration for component. Not cacheable.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/debug/getConfig", // "httpMethod": "POST", // "id": "dataflow.projects.jobs.debug.getConfig", // "parameterOrder": [ @@ -6125,6 +7333,7 @@ func (c *ProjectsJobsDebugSendCaptureCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.senddebugcapturerequest) if err != nil { @@ -6182,6 +7391,7 @@ func (c *ProjectsJobsDebugSendCaptureCall) Do(opts ...googleapi.CallOption) (*Se return ret, nil // { // "description": "Send encoded debug capture data for component.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/debug/sendCapture", // "httpMethod": "POST", // "id": "dataflow.projects.jobs.debug.sendCapture", // "parameterOrder": [ @@ -6238,8 +7448,8 @@ func (r *ProjectsJobsMessagesService) List(projectId string, jobId string) *Proj } // EndTime sets the optional parameter "endTime": Return only messages -// with timestamps < end_time. The default is now (i.e. return up to the -// latest messages available). +// with timestamps < end_time. The default is now +// (i.e. return up to the latest messages available). func (c *ProjectsJobsMessagesListCall) EndTime(endTime string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("endTime", endTime) return c @@ -6268,25 +7478,27 @@ func (c *ProjectsJobsMessagesListCall) MinimumImportance(minimumImportance strin } // PageSize sets the optional parameter "pageSize": If specified, -// determines the maximum number of messages to return. If unspecified, -// the service may choose an appropriate default, or may return an -// arbitrarily large number of results. +// determines the maximum number of messages to +// return. If unspecified, the service may choose an +// appropriate +// default, or may return an arbitrarily large number of results. func (c *ProjectsJobsMessagesListCall) PageSize(pageSize int64) *ProjectsJobsMessagesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If supplied, this -// should be the value of next_page_token returned by an earlier call. -// This will cause the next page of results to be returned. +// should be the value of next_page_token returned +// by an earlier call. This will cause the next page of results to +// be returned. func (c *ProjectsJobsMessagesListCall) PageToken(pageToken string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // StartTime sets the optional parameter "startTime": If specified, -// return only messages with timestamps >= start_time. The default is -// the job creation time (i.e. beginning of messages). +// return only messages with timestamps >= start_time. +// The default is the job creation time (i.e. beginning of messages). func (c *ProjectsJobsMessagesListCall) StartTime(startTime string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("startTime", startTime) return c @@ -6333,6 +7545,7 @@ func (c *ProjectsJobsMessagesListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6388,6 +7601,7 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo return ret, nil // { // "description": "Request the job status.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/messages", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.messages.list", // "parameterOrder": [ @@ -6396,7 +7610,8 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo // ], // "parameters": { // "endTime": { - // "description": "Return only messages with timestamps \u003c end_time. The default is now (i.e. return up to the latest messages available).", + // "description": "Return only messages with timestamps \u003c end_time. The default is now\n(i.e. return up to the latest messages available).", + // "format": "google-datetime", // "location": "query", // "type": "string" // }, @@ -6425,13 +7640,13 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo // "type": "string" // }, // "pageSize": { - // "description": "If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", + // "description": "If specified, determines the maximum number of messages to\nreturn. If unspecified, the service may choose an appropriate\ndefault, or may return an arbitrarily large number of results.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", + // "description": "If supplied, this should be the value of next_page_token returned\nby an earlier call. This will cause the next page of results to\nbe returned.", // "location": "query", // "type": "string" // }, @@ -6442,7 +7657,8 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo // "type": "string" // }, // "startTime": { - // "description": "If specified, return only messages with timestamps \u003e= start_time. The default is the job creation time (i.e. beginning of messages).", + // "description": "If specified, return only messages with timestamps \u003e= start_time.\nThe default is the job creation time (i.e. beginning of messages).", + // "format": "google-datetime", // "location": "query", // "type": "string" // } @@ -6532,6 +7748,7 @@ func (c *ProjectsJobsWorkItemsLeaseCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.leaseworkitemrequest) if err != nil { @@ -6589,6 +7806,7 @@ func (c *ProjectsJobsWorkItemsLeaseCall) Do(opts ...googleapi.CallOption) (*Leas return ret, nil // { // "description": "Leases a dataflow WorkItem to run.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/workItems:lease", // "httpMethod": "POST", // "id": "dataflow.projects.jobs.workItems.lease", // "parameterOrder": [ @@ -6677,6 +7895,7 @@ func (c *ProjectsJobsWorkItemsReportStatusCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reportworkitemstatusrequest) if err != nil { @@ -6734,6 +7953,7 @@ func (c *ProjectsJobsWorkItemsReportStatusCall) Do(opts ...googleapi.CallOption) return ret, nil // { // "description": "Reports the status of dataflow WorkItems leased by a worker.", + // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/workItems:reportStatus", // "httpMethod": "POST", // "id": "dataflow.projects.jobs.workItems.reportStatus", // "parameterOrder": [ @@ -6804,6 +8024,7 @@ func (c *ProjectsLocationsJobsCreateCall) ReplaceJobId(replaceJobId string) *Pro // "JOB_VIEW_UNKNOWN" // "JOB_VIEW_SUMMARY" // "JOB_VIEW_ALL" +// "JOB_VIEW_DESCRIPTION" func (c *ProjectsLocationsJobsCreateCall) View(view string) *ProjectsLocationsJobsCreateCall { c.urlParams_.Set("view", view) return c @@ -6840,6 +8061,7 @@ func (c *ProjectsLocationsJobsCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) if err != nil { @@ -6897,6 +8119,7 @@ func (c *ProjectsLocationsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job return ret, nil // { // "description": "Creates a Cloud Dataflow job.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", // "httpMethod": "POST", // "id": "dataflow.projects.locations.jobs.create", // "parameterOrder": [ @@ -6926,7 +8149,8 @@ func (c *ProjectsLocationsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job // "enum": [ // "JOB_VIEW_UNKNOWN", // "JOB_VIEW_SUMMARY", - // "JOB_VIEW_ALL" + // "JOB_VIEW_ALL", + // "JOB_VIEW_DESCRIPTION" // ], // "location": "query", // "type": "string" @@ -6976,6 +8200,7 @@ func (r *ProjectsLocationsJobsService) Get(projectId string, location string, jo // "JOB_VIEW_UNKNOWN" // "JOB_VIEW_SUMMARY" // "JOB_VIEW_ALL" +// "JOB_VIEW_DESCRIPTION" func (c *ProjectsLocationsJobsGetCall) View(view string) *ProjectsLocationsJobsGetCall { c.urlParams_.Set("view", view) return c @@ -7022,6 +8247,7 @@ func (c *ProjectsLocationsJobsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7078,6 +8304,7 @@ func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, e return ret, nil // { // "description": "Gets the state of the specified Cloud Dataflow job.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.get", // "parameterOrder": [ @@ -7109,7 +8336,8 @@ func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, e // "enum": [ // "JOB_VIEW_UNKNOWN", // "JOB_VIEW_SUMMARY", - // "JOB_VIEW_ALL" + // "JOB_VIEW_ALL", + // "JOB_VIEW_DESCRIPTION" // ], // "location": "query", // "type": "string" @@ -7150,8 +8378,8 @@ func (r *ProjectsLocationsJobsService) GetMetrics(projectId string, location str } // StartTime sets the optional parameter "startTime": Return only metric -// data that has changed since this time. Default is to return all -// information about all metrics for the job. +// data that has changed since this time. +// Default is to return all information about all metrics for the job. func (c *ProjectsLocationsJobsGetMetricsCall) StartTime(startTime string) *ProjectsLocationsJobsGetMetricsCall { c.urlParams_.Set("startTime", startTime) return c @@ -7198,6 +8426,7 @@ func (c *ProjectsLocationsJobsGetMetricsCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7254,6 +8483,7 @@ func (c *ProjectsLocationsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) ( return ret, nil // { // "description": "Request the job status.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/metrics", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.getMetrics", // "parameterOrder": [ @@ -7281,7 +8511,8 @@ func (c *ProjectsLocationsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "startTime": { - // "description": "Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.", + // "description": "Return only metric data that has changed since this time.\nDefault is to return all information about all metrics for the job.", + // "format": "google-datetime", // "location": "query", // "type": "string" // } @@ -7332,17 +8563,18 @@ func (c *ProjectsLocationsJobsListCall) Filter(filter string) *ProjectsLocations } // PageSize sets the optional parameter "pageSize": If there are many -// jobs, limit response to at most this many. The actual number of jobs -// returned will be the lesser of max_responses and an unspecified -// server-defined limit. +// jobs, limit response to at most this many. +// The actual number of jobs returned will be the lesser of +// max_responses +// and an unspecified server-defined limit. func (c *ProjectsLocationsJobsListCall) PageSize(pageSize int64) *ProjectsLocationsJobsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Set this to the -// 'next_page_token' field of a previous response to request additional -// results in a long list. +// 'next_page_token' field of a previous response +// to request additional results in a long list. func (c *ProjectsLocationsJobsListCall) PageToken(pageToken string) *ProjectsLocationsJobsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -7355,6 +8587,7 @@ func (c *ProjectsLocationsJobsListCall) PageToken(pageToken string) *ProjectsLoc // "JOB_VIEW_UNKNOWN" // "JOB_VIEW_SUMMARY" // "JOB_VIEW_ALL" +// "JOB_VIEW_DESCRIPTION" func (c *ProjectsLocationsJobsListCall) View(view string) *ProjectsLocationsJobsListCall { c.urlParams_.Set("view", view) return c @@ -7401,6 +8634,7 @@ func (c *ProjectsLocationsJobsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7456,6 +8690,7 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ return ret, nil // { // "description": "List the jobs of a project.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.list", // "parameterOrder": [ @@ -7481,13 +8716,13 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ // "type": "string" // }, // "pageSize": { - // "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", + // "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", + // "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", // "location": "query", // "type": "string" // }, @@ -7502,7 +8737,8 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ // "enum": [ // "JOB_VIEW_UNKNOWN", // "JOB_VIEW_SUMMARY", - // "JOB_VIEW_ALL" + // "JOB_VIEW_ALL", + // "JOB_VIEW_DESCRIPTION" // ], // "location": "query", // "type": "string" @@ -7595,6 +8831,7 @@ func (c *ProjectsLocationsJobsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) if err != nil { @@ -7653,6 +8890,7 @@ func (c *ProjectsLocationsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job return ret, nil // { // "description": "Updates the state of an existing Cloud Dataflow job.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", // "httpMethod": "PUT", // "id": "dataflow.projects.locations.jobs.update", // "parameterOrder": [ @@ -7718,8 +8956,8 @@ func (r *ProjectsLocationsJobsMessagesService) List(projectId string, location s } // EndTime sets the optional parameter "endTime": Return only messages -// with timestamps < end_time. The default is now (i.e. return up to the -// latest messages available). +// with timestamps < end_time. The default is now +// (i.e. return up to the latest messages available). func (c *ProjectsLocationsJobsMessagesListCall) EndTime(endTime string) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("endTime", endTime) return c @@ -7741,25 +8979,27 @@ func (c *ProjectsLocationsJobsMessagesListCall) MinimumImportance(minimumImporta } // PageSize sets the optional parameter "pageSize": If specified, -// determines the maximum number of messages to return. If unspecified, -// the service may choose an appropriate default, or may return an -// arbitrarily large number of results. +// determines the maximum number of messages to +// return. If unspecified, the service may choose an +// appropriate +// default, or may return an arbitrarily large number of results. func (c *ProjectsLocationsJobsMessagesListCall) PageSize(pageSize int64) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If supplied, this -// should be the value of next_page_token returned by an earlier call. -// This will cause the next page of results to be returned. +// should be the value of next_page_token returned +// by an earlier call. This will cause the next page of results to +// be returned. func (c *ProjectsLocationsJobsMessagesListCall) PageToken(pageToken string) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // StartTime sets the optional parameter "startTime": If specified, -// return only messages with timestamps >= start_time. The default is -// the job creation time (i.e. beginning of messages). +// return only messages with timestamps >= start_time. +// The default is the job creation time (i.e. beginning of messages). func (c *ProjectsLocationsJobsMessagesListCall) StartTime(startTime string) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("startTime", startTime) return c @@ -7806,6 +9046,7 @@ func (c *ProjectsLocationsJobsMessagesListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7862,6 +9103,7 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) return ret, nil // { // "description": "Request the job status.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/messages", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.messages.list", // "parameterOrder": [ @@ -7871,7 +9113,8 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "endTime": { - // "description": "Return only messages with timestamps \u003c end_time. The default is now (i.e. return up to the latest messages available).", + // "description": "Return only messages with timestamps \u003c end_time. The default is now\n(i.e. return up to the latest messages available).", + // "format": "google-datetime", // "location": "query", // "type": "string" // }, @@ -7901,13 +9144,13 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "pageSize": { - // "description": "If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", + // "description": "If specified, determines the maximum number of messages to\nreturn. If unspecified, the service may choose an appropriate\ndefault, or may return an arbitrarily large number of results.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", + // "description": "If supplied, this should be the value of next_page_token returned\nby an earlier call. This will cause the next page of results to\nbe returned.", // "location": "query", // "type": "string" // }, @@ -7918,7 +9161,8 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "startTime": { - // "description": "If specified, return only messages with timestamps \u003e= start_time. The default is the job creation time (i.e. beginning of messages).", + // "description": "If specified, return only messages with timestamps \u003e= start_time.\nThe default is the job creation time (i.e. beginning of messages).", + // "format": "google-datetime", // "location": "query", // "type": "string" // } @@ -8010,6 +9254,7 @@ func (c *ProjectsLocationsJobsWorkItemsLeaseCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.leaseworkitemrequest) if err != nil { @@ -8068,6 +9313,7 @@ func (c *ProjectsLocationsJobsWorkItemsLeaseCall) Do(opts ...googleapi.CallOptio return ret, nil // { // "description": "Leases a dataflow WorkItem to run.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/workItems:lease", // "httpMethod": "POST", // "id": "dataflow.projects.locations.jobs.workItems.lease", // "parameterOrder": [ @@ -8165,6 +9411,7 @@ func (c *ProjectsLocationsJobsWorkItemsReportStatusCall) doRequest(alt string) ( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reportworkitemstatusrequest) if err != nil { @@ -8223,6 +9470,7 @@ func (c *ProjectsLocationsJobsWorkItemsReportStatusCall) Do(opts ...googleapi.Ca return ret, nil // { // "description": "Reports the status of dataflow WorkItems leased by a worker.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/workItems:reportStatus", // "httpMethod": "POST", // "id": "dataflow.projects.locations.jobs.workItems.reportStatus", // "parameterOrder": [ @@ -8315,6 +9563,7 @@ func (c *ProjectsTemplatesCreateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createjobfromtemplaterequest) if err != nil { @@ -8371,6 +9620,7 @@ func (c *ProjectsTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*Job, er return ret, nil // { // "description": "Creates a Cloud Dataflow job from a template.", + // "flatPath": "v1b3/projects/{projectId}/templates", // "httpMethod": "POST", // "id": "dataflow.projects.templates.create", // "parameterOrder": [ @@ -8398,3 +9648,293 @@ func (c *ProjectsTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*Job, er // } } + +// method id "dataflow.projects.templates.get": + +type ProjectsTemplatesGetCall struct { + s *Service + projectId string + gcsPath string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get the template metadata associated with a template. +func (r *ProjectsTemplatesService) Get(projectId string, gcsPath string) *ProjectsTemplatesGetCall { + c := &ProjectsTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.gcsPath = gcsPath + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTemplatesGetCall) Fields(s ...googleapi.Field) *ProjectsTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTemplatesGetCall) IfNoneMatch(entityTag string) *ProjectsTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTemplatesGetCall) Context(ctx context.Context) *ProjectsTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/templates/{gcsPath}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "gcsPath": c.gcsPath, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.templates.get" call. +// Exactly one of *TemplateMetadata or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TemplateMetadata.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTemplatesGetCall) Do(opts ...googleapi.CallOption) (*TemplateMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TemplateMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get the template metadata associated with a template.", + // "flatPath": "v1b3/projects/{projectId}/templates/{gcsPath}", + // "httpMethod": "GET", + // "id": "dataflow.projects.templates.get", + // "parameterOrder": [ + // "projectId", + // "gcsPath" + // ], + // "parameters": { + // "gcsPath": { + // "description": "Required. A Cloud Storage path to the template from which to\ncreate the job.\nMust be a valid Cloud Storage URL, beginning with `gs://`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required. The ID of the Cloud Platform project that the job belongs to.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/projects/{projectId}/templates/{gcsPath}", + // "response": { + // "$ref": "TemplateMetadata" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.templates.validate": + +type ProjectsTemplatesValidateCall struct { + s *Service + projectId string + createjobfromtemplaterequest *CreateJobFromTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Validate: Validates the parameters set in create job request. The +// response +// includes details about any problematic parameters, and also +// descriptions of +// how parameters have been filled in. +func (r *ProjectsTemplatesService) Validate(projectId string, createjobfromtemplaterequest *CreateJobFromTemplateRequest) *ProjectsTemplatesValidateCall { + c := &ProjectsTemplatesValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.createjobfromtemplaterequest = createjobfromtemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTemplatesValidateCall) Fields(s ...googleapi.Field) *ProjectsTemplatesValidateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTemplatesValidateCall) Context(ctx context.Context) *ProjectsTemplatesValidateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTemplatesValidateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTemplatesValidateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createjobfromtemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/templates/validate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.templates.validate" call. +// Exactly one of *TemplateValidationResult or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TemplateValidationResult.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTemplatesValidateCall) Do(opts ...googleapi.CallOption) (*TemplateValidationResult, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TemplateValidationResult{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Validates the parameters set in create job request. The response\nincludes details about any problematic parameters, and also descriptions of\nhow parameters have been filled in.", + // "flatPath": "v1b3/projects/{projectId}/templates/validate", + // "httpMethod": "POST", + // "id": "dataflow.projects.templates.validate", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Required. The ID of the Cloud Platform project that the job belongs to.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/projects/{projectId}/templates/validate", + // "request": { + // "$ref": "CreateJobFromTemplateRequest" + // }, + // "response": { + // "$ref": "TemplateValidationResult" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json b/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json index 0c6e38c52..a87ba467c 100644 --- a/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json +++ b/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json @@ -1,1797 +1,1963 @@ { - "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/UWIx0i13luZLKjW6gmDu2evUqDA\"", - "discoveryVersion": "v1", - "id": "dataproc:v1", - "name": "dataproc", - "version": "v1", - "revision": "20161102", - "title": "Google Cloud Dataproc API", - "description": "An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/dataproc/", - "protocol": "rest", - "baseUrl": "https://dataproc.googleapis.com/", - "basePath": "", - "rootUrl": "https://dataproc.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" - }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" - }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" - }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "schemas": { - "Cluster": { - "id": "Cluster", - "type": "object", - "description": "Describes the identifying information, config, and status of a cluster of Google Compute Engine instances.", - "properties": { - "projectId": { - "type": "string", - "description": "[Required] The Google Cloud Platform project ID that the cluster belongs to." - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused." - }, - "config": { - "$ref": "ClusterConfig", - "description": "[Required] The cluster config. Note that Cloud Dataproc may set default values, and values may change when clusters are updated." - }, - "labels": { - "type": "object", - "description": "[Optional] The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", - "additionalProperties": { - "type": "string" - } - }, - "status": { - "$ref": "ClusterStatus", - "description": "[Output-only] Cluster status." - }, - "statusHistory": { - "type": "array", - "description": "[Output-only] The previous cluster status.", - "items": { - "$ref": "ClusterStatus" - } - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster." - }, - "metrics": { - "$ref": "ClusterMetrics", - "description": "Contains cluster daemon metrics such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release." - } - } - }, - "ClusterConfig": { - "id": "ClusterConfig", - "type": "object", - "description": "The cluster config.", - "properties": { - "configBucket": { - "type": "string", - "description": "[Optional] A Google Cloud Storage staging bucket used for sharing generated SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, and then it will create and manage this project-level, per-location bucket for you." - }, - "gceClusterConfig": { - "$ref": "GceClusterConfig", - "description": "[Required] The shared Google Compute Engine config settings for all instances in a cluster." - }, - "masterConfig": { - "$ref": "InstanceGroupConfig", - "description": "[Optional] The Google Compute Engine config settings for the master instance in a cluster." - }, - "workerConfig": { - "$ref": "InstanceGroupConfig", - "description": "[Optional] The Google Compute Engine config settings for worker instances in a cluster." - }, - "secondaryWorkerConfig": { - "$ref": "InstanceGroupConfig", - "description": "[Optional] The Google Compute Engine config settings for additional worker instances in a cluster." - }, - "softwareConfig": { - "$ref": "SoftwareConfig", - "description": "[Optional] The config settings for software inside the cluster." - }, - "initializationActions": { - "type": "array", - "description": "[Optional] Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", - "items": { - "$ref": "NodeInitializationAction" - } - } - } - }, - "GceClusterConfig": { - "id": "GceClusterConfig", - "type": "object", - "description": "Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", - "properties": { - "zoneUri": { - "type": "string", - "description": "[Required] The zone where the Google Compute Engine cluster will be located. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`." - }, - "networkUri": { - "type": "string", - "description": "[Optional] The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](/compute/docs/subnetworks) for more information). Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`." - }, - "subnetworkUri": { - "type": "string", - "description": "[Optional] The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`." - }, - "internalIpOnly": { - "type": "boolean", - "description": "[Optional] If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses." - }, - "serviceAccountScopes": { - "type": "array", - "description": "[Optional] The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", - "items": { - "type": "string" - } - }, - "tags": { - "type": "array", - "description": "The Google Compute Engine tags to add to all instances (see [Tagging instances](/compute/docs/label-or-tag-resources#tags)).", - "items": { - "type": "string" - } - }, - "metadata": { - "type": "object", - "description": "The Google Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", - "additionalProperties": { - "type": "string" - } - } - } - }, - "InstanceGroupConfig": { - "id": "InstanceGroupConfig", - "type": "object", - "description": "[Optional] The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.", - "properties": { - "numInstances": { - "type": "integer", - "description": "[Required] The number of VM instances in the instance group. For master instance groups, must be set to 1.", - "format": "int32" - }, - "instanceNames": { - "type": "array", - "description": "[Optional] The list of instance names. Cloud Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group if not set by user (recommended practice is to let Cloud Dataproc derive the name).", - "items": { - "type": "string" - } - }, - "imageUri": { - "type": "string", - "description": "[Output-only] The Google Compute Engine image resource used for cluster instances. Inferred from `SoftwareConfig.image_version`." - }, - "machineTypeUri": { - "type": "string", - "description": "[Required] The Google Compute Engine machine type used for cluster instances. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`." - }, - "diskConfig": { - "$ref": "DiskConfig", - "description": "[Optional] Disk option config settings." - }, - "isPreemptible": { - "type": "boolean", - "description": "[Optional] Specifies that this instance group contains preemptible instances." - }, - "managedGroupConfig": { - "$ref": "ManagedGroupConfig", - "description": "[Output-only] The config for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups." - } - } - }, - "DiskConfig": { - "id": "DiskConfig", - "type": "object", - "description": "Specifies the config of disk options for a group of VM instances.", - "properties": { - "bootDiskSizeGb": { - "type": "integer", - "description": "[Optional] Size in GB of the boot disk (default is 500GB).", - "format": "int32" - }, - "numLocalSsds": { - "type": "integer", - "description": "[Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - "format": "int32" - } - } - }, - "ManagedGroupConfig": { - "id": "ManagedGroupConfig", - "type": "object", - "description": "Specifies the resources used to actively manage an instance group.", - "properties": { - "instanceTemplateName": { - "type": "string", - "description": "[Output-only] The name of the Instance Template used for the Managed Instance Group." - }, - "instanceGroupManagerName": { - "type": "string", - "description": "[Output-only] The name of the Instance Group Manager for this group." + "discoveryVersion": "v1", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "regions": { + "resources": { + "operations": { + "methods": { + "delete": { + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", + "httpMethod": "DELETE", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "location": "path", + "description": "The name of the operation resource to be deleted.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", + "id": "dataproc.projects.regions.operations.delete", + "path": "v1/{+name}" + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.", + "httpMethod": "GET", + "response": { + "$ref": "ListOperationsResponse" + }, + "parameterOrder": [ + "name" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the operation collection.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/regions/[^/]+/operations$", + "location": "path" + }, + "pageToken": { + "description": "The standard list page token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + }, + "filter": { + "description": "The standard list filter.", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", + "id": "dataproc.projects.regions.operations.list", + "path": "v1/{+name}" + }, + "get": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "description": "The name of the operation resource.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", + "path": "v1/{+name}", + "id": "dataproc.projects.regions.operations.get", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service." + }, + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "httpMethod": "POST", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:cancel", + "id": "dataproc.projects.regions.operations.cancel", + "path": "v1/{+name}:cancel" + } + } + }, + "jobs": { + "methods": { + "cancel": { + "response": { + "$ref": "Job" + }, + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "jobId": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The job ID." + }, + "region": { + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", + "id": "dataproc.projects.regions.jobs.cancel", + "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get.", + "request": { + "$ref": "CancelJobRequest" + } + }, + "get": { + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "id": "dataproc.projects.regions.jobs.get", + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "description": "Gets the resource representation for a job in a project.", + "httpMethod": "GET", + "response": { + "$ref": "Job" + }, + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + }, + "region": { + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a job in a project.", + "request": { + "$ref": "Job" + }, + "response": { + "$ref": "Job" + }, + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + }, + "region": { + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "id": "dataproc.projects.regions.jobs.patch" + }, + "submit": { + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submit", + "path": "v1/projects/{projectId}/regions/{region}/jobs:submit", + "id": "dataproc.projects.regions.jobs.submit", + "description": "Submits a job to a cluster.", + "request": { + "$ref": "SubmitJobRequest" + }, + "response": { + "$ref": "Job" + }, + "parameterOrder": [ + "projectId", + "region" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "region": { + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string" + } + } + }, + "delete": { + "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "httpMethod": "DELETE", + "parameters": { + "projectId": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to." + }, + "jobId": { + "description": "Required The job ID.", + "required": true, + "type": "string", + "location": "path" + }, + "region": { + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "id": "dataproc.projects.regions.jobs.delete" + }, + "list": { + "flatPath": "v1/projects/{projectId}/regions/{region}/jobs", + "id": "dataproc.projects.regions.jobs.list", + "path": "v1/projects/{projectId}/regions/{region}/jobs", + "description": "Lists regions/{region}/jobs in a project.", + "httpMethod": "GET", + "response": { + "$ref": "ListJobsResponse" + }, + "parameterOrder": [ + "projectId", + "region" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "filter": { + "location": "query", + "description": "Optional A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or INACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *", + "type": "string" + }, + "jobStateMatcher": { + "location": "query", + "enum": [ + "ALL", + "ACTIVE", + "NON_ACTIVE" + ], + "description": "Optional Specifies enumerated categories of jobs to list (default = match ALL jobs).", + "type": "string" + }, + "pageToken": { + "description": "Optional The page token, returned by a previous call, to request the next page of results.", + "type": "string", + "location": "query" + }, + "pageSize": { + "type": "integer", + "location": "query", + "description": "Optional The number of results to return in each response.", + "format": "int32" + }, + "region": { + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string", + "location": "path" + }, + "clusterName": { + "type": "string", + "location": "query", + "description": "Optional If set, the returned jobs list includes only jobs that were submitted to the named cluster." + }, + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + } + } + } + } + }, + "clusters": { + "methods": { + "get": { + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "id": "dataproc.projects.regions.clusters.get", + "description": "Gets the resource representation for a cluster in a project.", + "response": { + "$ref": "Cluster" + }, + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "region": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request." + }, + "clusterName": { + "description": "Required The cluster name.", + "required": true, + "type": "string", + "location": "path" + } + } + }, + "patch": { + "description": "Updates a cluster in a project.", + "request": { + "$ref": "Cluster" + }, + "httpMethod": "PATCH", + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "region": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request." + }, + "updateMask": { + "description": "Required Specifies the path, relative to \u003ccode\u003eCluster\u003c/code\u003e, of the field to update. For example, to change the number of workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003econfig.worker_config.num_instances\u003c/code\u003e, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003econfig.worker_config.num_instances\u003c/code\u003e and \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e are the only fields that can be updated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + }, + "clusterName": { + "location": "path", + "description": "Required The cluster name.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "id": "dataproc.projects.regions.clusters.patch", + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}" + }, + "diagnose": { + "httpMethod": "POST", + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "clusterName": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The cluster name." + }, + "projectId": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to." + }, + "region": { + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", + "id": "dataproc.projects.regions.clusters.diagnose", + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", + "request": { + "$ref": "DiagnoseClusterRequest" + }, + "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation." + }, + "delete": { + "id": "dataproc.projects.regions.clusters.delete", + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "description": "Deletes a cluster in a project.", + "httpMethod": "DELETE", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterName": { + "description": "Required The cluster name.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "region": { + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}" + }, + "list": { + "id": "dataproc.projects.regions.clusters.list", + "path": "v1/projects/{projectId}/regions/{region}/clusters", + "description": "Lists all regions/{region}/clusters in a project.", + "httpMethod": "GET", + "parameterOrder": [ + "projectId", + "region" + ], + "response": { + "$ref": "ListClustersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "region": { + "location": "path", + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string" + }, + "filter": { + "location": "query", + "description": "Optional A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING and ERROR states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *", + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "Optional The standard List page token.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Optional The standard List page size.", + "format": "int32", + "type": "integer" + }, + "projectId": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to." + } + }, + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters" + }, + "create": { + "request": { + "$ref": "Cluster" + }, + "description": "Creates a cluster in a project.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "region" + ], + "httpMethod": "POST", + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string" + }, + "region": { + "description": "Required The Cloud Dataproc region in which to handle the request.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters", + "path": "v1/projects/{projectId}/regions/{region}/clusters", + "id": "dataproc.projects.regions.clusters.create" + } + } + } + } + } + } } - } }, - "SoftwareConfig": { - "id": "SoftwareConfig", - "type": "object", - "description": "Specifies the selection and config of software inside the cluster.", - "properties": { - "imageVersion": { - "type": "string", - "description": "[Optional] The version of software inside the cluster. It must match the regular expression `[0-9]+\\.[0-9]+`. If unspecified, it defaults to the latest version (see [Cloud Dataproc Versioning](/dataproc/versioning))." - }, - "properties": { - "type": "object", - "description": "[Optional] The properties to set on daemon config files. Property keys are specified in `prefix:property` format, such as `core:fs.defaultFS`. The following are supported prefixes and their mappings: * core: `core-site.xml` * hdfs: `hdfs-site.xml` * mapred: `mapred-site.xml` * yarn: `yarn-site.xml` * hive: `hive-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf`", - "additionalProperties": { + "parameters": { + "upload_protocol": { + "type": "string", + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\")." + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", "type": "string" - } - } - } - }, - "NodeInitializationAction": { - "id": "NodeInitializationAction", - "type": "object", - "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", - "properties": { - "executableFile": { - "type": "string", - "description": "[Required] Google Cloud Storage URI of executable file." - }, - "executionTimeout": { - "type": "string", - "description": "[Optional] Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period." - } - } - }, - "ClusterStatus": { - "id": "ClusterStatus", - "type": "object", - "description": "The status of a cluster and its instances.", - "properties": { - "state": { - "type": "string", - "description": "[Output-only] The cluster's state.", - "enum": [ - "UNKNOWN", - "CREATING", - "RUNNING", - "ERROR", - "DELETING", - "UPDATING" - ] - }, - "detail": { - "type": "string", - "description": "[Output-only] Optional details of cluster's state." }, - "stateStartTime": { - "type": "string", - "description": "[Output-only] Time when this state was entered." - } - } - }, - "ClusterMetrics": { - "id": "ClusterMetrics", - "type": "object", - "description": "Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.", - "properties": { - "hdfsMetrics": { - "type": "object", - "description": "The HDFS metrics.", - "additionalProperties": { + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", "type": "string", - "format": "int64" - } + "location": "query" }, - "yarnMetrics": { - "type": "object", - "description": "The YARN metrics.", - "additionalProperties": { + "callback": { + "description": "JSONP", "type": "string", - "format": "int64" - } - } - } - }, - "Operation": { - "id": "Operation", - "type": "object", - "description": "This resource represents a long-running operation that is the result of a network API call.", - "properties": { - "name": { - "type": "string", - "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should have the format of `operations/some/unique/name`." - }, - "metadata": { - "type": "object", - "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - }, - "done": { - "type": "boolean", - "description": "If the value is `false`, it means the operation is still in progress. If true, the operation is completed, and either `error` or `response` is available." - }, - "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure or cancellation." - }, - "response": { - "type": "object", - "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - }, - "Status": { - "id": "Status", - "type": "object", - "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). The error model is designed to be: - Simple to use and understand for most users - Flexible enough to meet unexpected needs # Overview The `Status` message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers *understand* and *resolve* the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package `google.rpc` which can be used for common error conditions. # Language mapping The `Status` message is the logical representation of the error model, but it is not necessarily the actual wire format. When the `Status` message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C. # Other uses The error model and the `Status` message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments. Example uses of this error model include: - Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors. - Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose. - Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response. - Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message. - Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.", - "properties": { - "code": { - "type": "integer", - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32" - }, - "message": { - "type": "string", - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client." - }, - "details": { - "type": "array", - "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", - "items": { - "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - } - }, - "ListClustersResponse": { - "id": "ListClustersResponse", - "type": "object", - "description": "The list of all clusters in a project.", - "properties": { - "clusters": { - "type": "array", - "description": "[Output-only] The clusters in the project.", - "items": { - "$ref": "Cluster" - } - }, - "nextPageToken": { - "type": "string", - "description": "[Output-only] This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the `page_token` in a subsequent ListClustersRequest." - } - } - }, - "DiagnoseClusterRequest": { - "id": "DiagnoseClusterRequest", - "type": "object", - "description": "A request to collect cluster diagnostic information." - }, - "SubmitJobRequest": { - "id": "SubmitJobRequest", - "type": "object", - "description": "A request to submit a job.", - "properties": { - "job": { - "$ref": "Job", - "description": "[Required] The job resource." - } - } - }, - "Job": { - "id": "Job", - "type": "object", - "description": "A Cloud Dataproc job resource.", - "properties": { - "reference": { - "$ref": "JobReference", - "description": "[Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." - }, - "placement": { - "$ref": "JobPlacement", - "description": "[Required] Job information, including how, when, and where to run the job." - }, - "hadoopJob": { - "$ref": "HadoopJob", - "description": "Job is a Hadoop job." - }, - "sparkJob": { - "$ref": "SparkJob", - "description": "Job is a Spark job." - }, - "pysparkJob": { - "$ref": "PySparkJob", - "description": "Job is a Pyspark job." - }, - "hiveJob": { - "$ref": "HiveJob", - "description": "Job is a Hive job." - }, - "pigJob": { - "$ref": "PigJob", - "description": "Job is a Pig job." - }, - "sparkSqlJob": { - "$ref": "SparkSqlJob", - "description": "Job is a SparkSql job." - }, - "status": { - "$ref": "JobStatus", - "description": "[Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields." + "location": "query" }, - "statusHistory": { - "type": "array", - "description": "[Output-only] The previous job status.", - "items": { - "$ref": "JobStatus" - } - }, - "yarnApplications": { - "type": "array", - "description": "[Output-only] The collection of YARN applications spun up by this job. **Beta** Feature: This report is available for testing purposes only. It may be changed before final release.", - "items": { - "$ref": "YarnApplication" - } - }, - "driverOutputResourceUri": { - "type": "string", - "description": "[Output-only] A URI pointing to the location of the stdout of the job's driver program." - }, - "driverControlFilesUri": { - "type": "string", - "description": "[Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`." - }, - "labels": { - "type": "object", - "description": "[Optional] The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "JobReference": { - "id": "JobReference", - "type": "object", - "description": "Encapsulates the full scoping used to reference a job.", - "properties": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to." - }, - "jobId": { - "type": "string", - "description": "[Optional] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters." - } - } - }, - "JobPlacement": { - "id": "JobPlacement", - "type": "object", - "description": "Cloud Dataproc job config.", - "properties": { - "clusterName": { - "type": "string", - "description": "[Required] The name of the cluster where the job will be submitted." - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] A cluster UUID generated by the Cloud Dataproc service when the job is submitted." - } - } - }, - "HadoopJob": { - "id": "HadoopJob", - "type": "object", - "description": "A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).", - "properties": { - "mainJarFileUri": { - "type": "string", - "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'" - }, - "mainClass": { - "type": "string", - "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } - }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", - "items": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "[Optional] The runtime log config for job execution." - } - } - }, - "LoggingConfig": { - "id": "LoggingConfig", - "type": "object", - "description": "The runtime logging config of the job.", - "properties": { - "driverLogLevels": { - "type": "object", - "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - "additionalProperties": { + "$.xgafv": { + "description": "V1 error format.", "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", "enum": [ - "LEVEL_UNSPECIFIED", - "ALL", - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL", - "OFF" + "1", + "2" ] - } - } - } - }, - "SparkJob": { - "id": "SparkJob", - "type": "object", - "description": "A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN.", - "properties": { - "mainJarFileUri": { - "type": "string", - "description": "The HCFS URI of the jar file that contains the main class." - }, - "mainClass": { - "type": "string", - "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } - }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "[Optional] The runtime log config for job execution." - } - } - }, - "PySparkJob": { - "id": "PySparkJob", - "type": "object", - "description": "A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.", - "properties": { - "mainPythonFileUri": { - "type": "string", - "description": "[Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } - }, - "pythonFileUris": { - "type": "array", - "description": "[Optional] HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", - "items": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } - }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "[Optional] The runtime log config for job execution." - } - } - }, - "HiveJob": { - "id": "HiveJob", - "type": "object", - "description": "A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains Hive queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "continueOnFailure": { - "type": "boolean", - "description": "[Optional] Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", - "additionalProperties": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", - "items": { - "type": "string" - } - } - } - }, - "QueryList": { - "id": "QueryList", - "type": "object", - "description": "A list of queries to run on a cluster.", - "properties": { - "queries": { - "type": "array", - "description": "[Required] The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - "items": { - "type": "string" - } - } - } - }, - "PigJob": { - "id": "PigJob", - "type": "object", - "description": "A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains the Pig queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "continueOnFailure": { - "type": "boolean", - "description": "[Optional] Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", - "additionalProperties": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", - "items": { + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", "type": "string" - } }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "[Optional] The runtime log config for job execution." - } - } - }, - "SparkSqlJob": { - "id": "SparkSqlJob", - "type": "object", - "description": "A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains SQL queries." + "key": { + "type": "string", + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token." }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", - "additionalProperties": { - "type": "string" - } + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.", - "additionalProperties": { + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", "type": "string" - } }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH.", - "items": { + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", "type": "string" - } - }, - "loggingConfig": { - "$ref": "LoggingConfig", - "description": "[Optional] The runtime log config for job execution." - } - } - }, - "JobStatus": { - "id": "JobStatus", - "type": "object", - "description": "Cloud Dataproc job status.", - "properties": { - "state": { - "type": "string", - "description": "[Output-only] A state message specifying the overall job state.", - "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "SETUP_DONE", - "RUNNING", - "CANCEL_PENDING", - "CANCEL_STARTED", - "CANCELLED", - "DONE", - "ERROR" - ] - }, - "details": { - "type": "string", - "description": "[Output-only] Optional job state details, such as an error description if the state is ERROR." - }, - "stateStartTime": { - "type": "string", - "description": "[Output-only] The time when this state was entered." - } - } - }, - "YarnApplication": { - "id": "YarnApplication", - "type": "object", - "description": "A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.", - "properties": { - "name": { - "type": "string", - "description": "[Required] The application name." - }, - "state": { - "type": "string", - "description": "[Required] The application state.", - "enum": [ - "STATE_UNSPECIFIED", - "NEW", - "NEW_SAVING", - "SUBMITTED", - "ACCEPTED", - "RUNNING", - "FINISHED", - "FAILED", - "KILLED" - ] - }, - "progress": { - "type": "number", - "description": "[Required] The numerical progress of the application, from 1 to 100.", - "format": "float" - }, - "trackingUrl": { - "type": "string", - "description": "[Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access." - } - } - }, - "ListJobsResponse": { - "id": "ListJobsResponse", - "type": "object", - "description": "A list of jobs in a project.", - "properties": { - "jobs": { - "type": "array", - "description": "[Output-only] Jobs list.", - "items": { - "$ref": "Job" - } - }, - "nextPageToken": { - "type": "string", - "description": "[Optional] This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the `page_token` in a subsequent ListJobsRequest." - } - } - }, - "CancelJobRequest": { - "id": "CancelJobRequest", - "type": "object", - "description": "A request to cancel a job." - }, - "Empty": { - "id": "Empty", - "type": "object", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`." - }, - "ListOperationsResponse": { - "id": "ListOperationsResponse", - "type": "object", - "description": "The response message for Operations.ListOperations.", - "properties": { - "operations": { - "type": "array", - "description": "A list of operations that matches the specified filter in the request.", - "items": { - "$ref": "Operation" - } - }, - "nextPageToken": { - "type": "string", - "description": "The standard List next-page token." - } - } - }, - "DiagnoseClusterResults": { - "id": "DiagnoseClusterResults", - "type": "object", - "description": "The location of diagnostic output.", - "properties": { - "outputUri": { - "type": "string", - "description": "[Output-only] The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics." } - } }, - "ClusterOperationMetadata": { - "id": "ClusterOperationMetadata", - "type": "object", - "description": "Metadata describing the operation.", - "properties": { - "clusterName": { - "type": "string", - "description": "[Output-only] Name of the cluster for the operation." - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] Cluster UUID for the operation." - }, - "status": { - "$ref": "ClusterOperationStatus", - "description": "[Output-only] Current operation status." + "schemas": { + "PySparkJob": { + "description": "A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.", + "type": "object", + "properties": { + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional The runtime log config for job execution." + }, + "properties": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code." + }, + "args": { + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "type": "array", + "items": { + "type": "string" + } + }, + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "pythonFileUris": { + "description": "Optional HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainPythonFileUri": { + "description": "Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + "type": "string" + }, + "archiveUris": { + "description": "Optional HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "PySparkJob" }, - "statusHistory": { - "type": "array", - "description": "[Output-only] The previous operation status.", - "items": { - "$ref": "ClusterOperationStatus" - } + "GceClusterConfig": { + "description": "Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", + "type": "object", + "properties": { + "internalIpOnly": { + "description": "Optional If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + "type": "boolean" + }, + "metadata": { + "description": "The Google Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "serviceAccountScopes": { + "description": "Optional The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included:\nhttps://www.googleapis.com/auth/cloud.useraccounts.readonly\nhttps://www.googleapis.com/auth/devstorage.read_write\nhttps://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided:\nhttps://www.googleapis.com/auth/bigquery\nhttps://www.googleapis.com/auth/bigtable.admin.table\nhttps://www.googleapis.com/auth/bigtable.data\nhttps://www.googleapis.com/auth/devstorage.full_control", + "type": "array", + "items": { + "type": "string" + } + }, + "tags": { + "description": "The Google Compute Engine tags to add to all instances (see Tagging instances).", + "type": "array", + "items": { + "type": "string" + } + }, + "serviceAccount": { + "description": "Optional The service account of the instances. Defaults to the default Google Compute Engine service account. Custom service accounts need permissions equivalent to the folloing IAM roles:\nroles/logging.logWriter\nroles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: [account_id]@[project_id].iam.gserviceaccount.com", + "type": "string" + }, + "subnetworkUri": { + "description": "Optional The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.", + "type": "string" + }, + "networkUri": { + "description": "Optional The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks for more information). Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default.", + "type": "string" + }, + "zoneUri": { + "description": "Required The zone where the Google Compute Engine cluster will be located. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone].", + "type": "string" + } + }, + "id": "GceClusterConfig" }, - "operationType": { - "type": "string", - "description": "[Output-only] The operation type." + "ClusterMetrics": { + "type": "object", + "properties": { + "yarnMetrics": { + "additionalProperties": { + "format": "int64", + "type": "string" + }, + "description": "The YARN metrics.", + "type": "object" + }, + "hdfsMetrics": { + "description": "The HDFS metrics.", + "type": "object", + "additionalProperties": { + "format": "int64", + "type": "string" + } + } + }, + "id": "ClusterMetrics", + "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release." + }, + "LoggingConfig": { + "properties": { + "driverLogLevels": { + "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + "type": "object", + "additionalProperties": { + "enum": [ + "LEVEL_UNSPECIFIED", + "ALL", + "TRACE", + "DEBUG", + "INFO", + "WARN", + "ERROR", + "FATAL", + "OFF" + ], + "type": "string" + } + } + }, + "id": "LoggingConfig", + "description": "The runtime logging config of the job.", + "type": "object" }, - "description": { - "type": "string", - "description": "[Output-only] Short description of operation." + "DiagnoseClusterOutputLocation": { + "description": "The location where output from diagnostic command can be found.", + "type": "object", + "properties": { + "outputUri": { + "description": "Output-only The Google Cloud Storage URI of the diagnostic output. This will be a plain text file with summary of collected diagnostics.", + "type": "string" + } + }, + "id": "DiagnoseClusterOutputLocation" }, - "labels": { - "type": "object", - "description": "[Output-only] labels associated with the operation", - "additionalProperties": { - "type": "string" - } - } - } - }, - "ClusterOperationStatus": { - "id": "ClusterOperationStatus", - "type": "object", - "description": "The status of the operation.", - "properties": { - "state": { - "type": "string", - "description": "[Output-only] A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "Operation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "type": "object", + "properties": { + "done": { + "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", + "type": "boolean" + }, + "response": { + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should have the format of operations/some/unique/name.", + "type": "string" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any." + } + }, + "id": "Operation" }, - "innerState": { - "type": "string", - "description": "[Output-only] A message containing the detailed operation state." + "OperationStatus": { + "properties": { + "innerState": { + "description": "A message containing the detailed operation state.", + "type": "string" + }, + "stateStartTime": { + "description": "The time this state was entered.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "A message containing the operation state.", + "type": "string", + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is running.", + "The operation is done; either cancelled or completed." + ], + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ] + }, + "details": { + "description": "A message containing any operation metadata details.", + "type": "string" + } + }, + "id": "OperationStatus", + "description": "The status of the operation.", + "type": "object" }, - "details": { - "type": "string", - "description": "[Output-only]A message containing any operation metadata details." + "JobReference": { + "description": "Encapsulates the full scoping used to reference a job.", + "type": "object", + "properties": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "type": "string" + }, + "jobId": { + "description": "Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.", + "type": "string" + } + }, + "id": "JobReference" }, - "stateStartTime": { - "type": "string", - "description": "[Output-only] The time this state was entered." - } - } - }, - "DiagnoseClusterOutputLocation": { - "id": "DiagnoseClusterOutputLocation", - "type": "object", - "description": "The location where output from diagnostic command can be found.", - "properties": { - "outputUri": { - "type": "string", - "description": "[Output-only] The Google Cloud Storage URI of the diagnostic output. This will be a plain text file with summary of collected diagnostics." - } - } - }, - "OperationMetadata": { - "id": "OperationMetadata", - "type": "object", - "description": "Metadata describing the operation.", - "properties": { - "state": { - "type": "string", - "description": "A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "SubmitJobRequest": { + "type": "object", + "properties": { + "job": { + "$ref": "Job", + "description": "Required The job resource." + } + }, + "id": "SubmitJobRequest", + "description": "A request to submit a job." }, - "innerState": { - "type": "string", - "description": "A message containing the detailed operation state." + "Status": { + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc which can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting purpose.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "details": { + "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "id": "Status" }, - "details": { - "type": "string", - "description": "A message containing any operation metadata details." + "InstanceGroupConfig": { + "description": "Optional The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.", + "type": "object", + "properties": { + "diskConfig": { + "description": "Optional Disk option config settings.", + "$ref": "DiskConfig" + }, + "imageUri": { + "description": "Output-only The Google Compute Engine image resource used for cluster instances. Inferred from SoftwareConfig.image_version.", + "type": "string" + }, + "machineTypeUri": { + "description": "Required The Google Compute Engine machine type used for cluster instances. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2.", + "type": "string" + }, + "managedGroupConfig": { + "description": "Output-only The config for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + "$ref": "ManagedGroupConfig" + }, + "isPreemptible": { + "type": "boolean", + "description": "Optional Specifies that this instance group contains preemptible instances." + }, + "instanceNames": { + "description": "Optional The list of instance names. Cloud Dataproc derives the names from cluster_name, num_instances, and the instance group if not set by user (recommended practice is to let Cloud Dataproc derive the name).", + "type": "array", + "items": { + "type": "string" + } + }, + "numInstances": { + "type": "integer", + "description": "Required The number of VM instances in the instance group. For master instance groups, must be set to 1.", + "format": "int32" + } + }, + "id": "InstanceGroupConfig" }, - "insertTime": { - "type": "string", - "description": "The time that the operation was requested." + "JobScheduling": { + "description": "Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release.", + "type": "object", + "properties": { + "maxFailuresPerHour": { + "description": "Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.", + "format": "int32", + "type": "integer" + } + }, + "id": "JobScheduling" }, - "startTime": { - "type": "string", - "description": "The time that the operation was started by the server." + "ListJobsResponse": { + "description": "A list of jobs in a project.", + "type": "object", + "properties": { + "jobs": { + "description": "Output-only Jobs list.", + "type": "array", + "items": { + "$ref": "Job" + } + }, + "nextPageToken": { + "description": "Optional This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListJobsRequest\u003c/code\u003e.", + "type": "string" + } + }, + "id": "ListJobsResponse" + }, + "NodeInitializationAction": { + "properties": { + "executionTimeout": { + "description": "Optional Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + "format": "google-duration", + "type": "string" + }, + "executableFile": { + "description": "Required Google Cloud Storage URI of executable file.", + "type": "string" + } + }, + "id": "NodeInitializationAction", + "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", + "type": "object" }, - "endTime": { - "type": "string", - "description": "The time that the operation completed." + "CancelJobRequest": { + "description": "A request to cancel a job.", + "type": "object", + "properties": {}, + "id": "CancelJobRequest" + }, + "SparkSqlJob": { + "properties": { + "queryFileUri": { + "description": "The HCFS URI of the script that contains SQL queries.", + "type": "string" + }, + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "scriptVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", + "type": "object" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.", + "type": "array", + "items": { + "type": "string" + } + }, + "loggingConfig": { + "description": "Optional The runtime log config for job execution.", + "$ref": "LoggingConfig" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.", + "type": "object" + } + }, + "id": "SparkSqlJob", + "description": "A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries.", + "type": "object" }, - "clusterName": { - "type": "string", - "description": "Name of the cluster for the operation." + "Cluster": { + "description": "Describes the identifying information, config, and status of a cluster of Google Compute Engine instances.", + "type": "object", + "properties": { + "projectId": { + "description": "Required The Google Cloud Platform project ID that the cluster belongs to.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", + "type": "object" + }, + "metrics": { + "$ref": "ClusterMetrics", + "description": "Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release." + }, + "status": { + "description": "Output-only Cluster status.", + "$ref": "ClusterStatus" + }, + "statusHistory": { + "description": "Output-only The previous cluster status.", + "type": "array", + "items": { + "$ref": "ClusterStatus" + } + }, + "config": { + "$ref": "ClusterConfig", + "description": "Required The cluster config. Note that Cloud Dataproc may set default values, and values may change when clusters are updated." + }, + "clusterName": { + "description": "Required The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused.", + "type": "string" + }, + "clusterUuid": { + "description": "Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster.", + "type": "string" + } + }, + "id": "Cluster" }, - "clusterUuid": { - "type": "string", - "description": "Cluster UUId for the operation." + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "type": "array", + "items": { + "$ref": "Operation" + }, + "description": "A list of operations that matches the specified filter in the request." + } + }, + "id": "ListOperationsResponse" }, - "status": { - "$ref": "OperationStatus", - "description": "[Output-only] Current operation status." + "OperationMetadata": { + "description": "Metadata describing the operation.", + "type": "object", + "properties": { + "startTime": { + "description": "The time that the operation was started by the server.", + "format": "google-datetime", + "type": "string" + }, + "warnings": { + "description": "Output-only Errors encountered during operation execution.", + "type": "array", + "items": { + "type": "string" + } + }, + "insertTime": { + "description": "The time that the operation was requested.", + "format": "google-datetime", + "type": "string" + }, + "statusHistory": { + "description": "Output-only Previous operation status.", + "type": "array", + "items": { + "$ref": "OperationStatus" + } + }, + "operationType": { + "type": "string", + "description": "Output-only The operation type." + }, + "description": { + "type": "string", + "description": "Output-only Short description of operation." + }, + "status": { + "$ref": "OperationStatus", + "description": "Output-only Current operation status." + }, + "state": { + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ], + "description": "A message containing the operation state.", + "type": "string", + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is currently running.", + "The operation is done, either cancelled or completed." + ] + }, + "details": { + "description": "A message containing any operation metadata details.", + "type": "string" + }, + "clusterName": { + "description": "Name of the cluster for the operation.", + "type": "string" + }, + "clusterUuid": { + "description": "Cluster UUId for the operation.", + "type": "string" + }, + "innerState": { + "type": "string", + "description": "A message containing the detailed operation state." + }, + "endTime": { + "description": "The time that the operation completed.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "OperationMetadata" }, - "statusHistory": { - "type": "array", - "description": "[Output-only] Previous operation status.", - "items": { - "$ref": "OperationStatus" - } + "SoftwareConfig": { + "type": "object", + "properties": { + "properties": { + "description": "Optional The properties to set on daemon config files.Property keys are specified in prefix:property format, such as core:fs.defaultFS. The following are supported prefixes and their mappings:\ncore: core-site.xml\nhdfs: hdfs-site.xml\nmapred: mapred-site.xml\nyarn: yarn-site.xml\nhive: hive-site.xml\npig: pig.properties\nspark: spark-defaults.conf", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "imageVersion": { + "description": "Optional The version of software inside the cluster. It must match the regular expression [0-9]+\\.[0-9]+. If unspecified, it defaults to the latest version (see Cloud Dataproc Versioning).", + "type": "string" + } + }, + "id": "SoftwareConfig", + "description": "Specifies the selection and config of software inside the cluster." }, - "operationType": { - "type": "string", - "description": "[Output-only] The operation type." + "JobPlacement": { + "description": "Cloud Dataproc job config.", + "type": "object", + "properties": { + "clusterName": { + "description": "Required The name of the cluster where the job will be submitted.", + "type": "string" + }, + "clusterUuid": { + "description": "Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.", + "type": "string" + } + }, + "id": "JobPlacement" }, - "description": { - "type": "string", - "description": "[Output-only] Short description of operation." - } - } - }, - "OperationStatus": { - "id": "OperationStatus", - "type": "object", - "description": "The status of the operation.", - "properties": { - "state": { - "type": "string", - "description": "A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "PigJob": { + "description": "A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.", + "type": "object", + "properties": { + "continueOnFailure": { + "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" + }, + "queryList": { + "description": "A list of queries.", + "$ref": "QueryList" + }, + "queryFileUri": { + "description": "The HCFS URI of the script that contains the Pig queries.", + "type": "string" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + "type": "array", + "items": { + "type": "string" + } + }, + "scriptVariables": { + "description": "Optional Mapping of query variable names to values (equivalent to the Pig command: name=[value]).", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "loggingConfig": { + "description": "Optional The runtime log config for job execution.", + "$ref": "LoggingConfig" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + "type": "object" + } + }, + "id": "PigJob" }, - "innerState": { - "type": "string", - "description": "A message containing the detailed operation state." + "ClusterStatus": { + "description": "The status of a cluster and its instances.", + "type": "object", + "properties": { + "detail": { + "description": "Output-only Optional details of cluster's state.", + "type": "string" + }, + "state": { + "enum": [ + "UNKNOWN", + "CREATING", + "RUNNING", + "ERROR", + "DELETING", + "UPDATING" + ], + "description": "Output-only The cluster's state.", + "type": "string", + "enumDescriptions": [ + "The cluster state is unknown.", + "The cluster is being created and set up. It is not ready for use.", + "The cluster is currently running and healthy. It is ready for use.", + "The cluster encountered an error. It is not ready for use.", + "The cluster is being deleted. It cannot be used.", + "The cluster is being updated. It continues to accept and process jobs." + ] + }, + "stateStartTime": { + "description": "Output-only Time when this state was entered.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "ClusterStatus" }, - "details": { - "type": "string", - "description": "A message containing any operation metadata details." + "ListClustersResponse": { + "description": "The list of all clusters in a project.", + "type": "object", + "properties": { + "clusters": { + "description": "Output-only The clusters in the project.", + "type": "array", + "items": { + "$ref": "Cluster" + } + }, + "nextPageToken": { + "description": "Output-only This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListClustersRequest\u003c/code\u003e.", + "type": "string" + } + }, + "id": "ListClustersResponse" }, - "stateStartTime": { - "type": "string", - "description": "The time this state was entered." - } - } - } - }, - "resources": { - "projects": { - "resources": { - "regions": { - "resources": { - "clusters": { - "methods": { - "create": { - "id": "dataproc.projects.regions.clusters.create", - "path": "v1/projects/{projectId}/regions/{region}/clusters", - "httpMethod": "POST", - "description": "Creates a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" + "SparkJob": { + "description": "A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN.", + "type": "object", + "properties": { + "mainJarFileUri": { + "description": "The HCFS URI of the jar file that contains the main class.", + "type": "string" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region" - ], - "request": { - "$ref": "Cluster" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "patch": { - "id": "dataproc.projects.regions.clusters.patch", - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "PATCH", - "description": "Updates a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + }, + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional The runtime log config for job execution." + }, + "properties": { + "additionalProperties": { + "type": "string" }, - "updateMask": { - "type": "string", - "description": "[Required] Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the `PATCH` request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the `PATCH` request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } Note: Currently, config.worker_config.num_instances and config.secondary_worker_config.num_instances are the only fields that can be updated.", - "location": "query" + "description": "Optional A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + "type": "object" + }, + "args": { + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "request": { - "$ref": "Cluster" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "delete": { - "id": "dataproc.projects.regions.clusters.delete", - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "DELETE", - "description": "Deletes a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + }, + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "id": "dataproc.projects.regions.clusters.get", - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "GET", - "description": "Gets the resource representation for a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + }, + "mainClass": { + "type": "string", + "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris." + }, + "archiveUris": { + "description": "Optional HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "response": { - "$ref": "Cluster" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "list": { - "id": "dataproc.projects.regions.clusters.list", - "path": "v1/projects/{projectId}/regions/{region}/clusters", - "httpMethod": "GET", - "description": "Lists all regions/{region}/clusters in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "filter": { - "type": "string", - "description": "[Optional] A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax: field:value [field:value] ... or field = value [AND [field = value]] ... where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match all values. `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` contains the `DELETING` and `ERROR` states. `clusterName` is the name of the cluster provided at creation time. Only the logical `AND` operator is supported; space-separated items are treated as having an implicit `AND` operator. Example valid filters are: status.state:ACTIVE clusterName:mycluster labels.env:staging \\ labels.starred:* and status.state = ACTIVE AND clusterName = mycluster \\ AND labels.env = staging AND labels.starred = *", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "[Optional] The standard List page size.", - "format": "int32", - "location": "query" + } + }, + "id": "SparkJob" + }, + "Job": { + "description": "A Cloud Dataproc job resource.", + "type": "object", + "properties": { + "reference": { + "description": "Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a \u003ccode\u003ejob_id\u003c/code\u003e.", + "$ref": "JobReference" + }, + "hadoopJob": { + "$ref": "HadoopJob", + "description": "Job is a Hadoop job." + }, + "status": { + "$ref": "JobStatus", + "description": "Output-only The job status. Additional application-specific status information may be contained in the \u003ccode\u003etype_job\u003c/code\u003e and \u003ccode\u003eyarn_applications\u003c/code\u003e fields." + }, + "placement": { + "$ref": "JobPlacement", + "description": "Required Job information, including how, when, and where to run the job." + }, + "driverControlFilesUri": { + "description": "Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", + "type": "string" + }, + "scheduling": { + "description": "Optional Job scheduling configuration.", + "$ref": "JobScheduling" + }, + "pigJob": { + "$ref": "PigJob", + "description": "Job is a Pig job." + }, + "hiveJob": { + "$ref": "HiveJob", + "description": "Job is a Hive job." + }, + "labels": { + "additionalProperties": { + "type": "string" }, - "pageToken": { - "type": "string", - "description": "[Optional] The standard List page token.", - "location": "query" + "description": "Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.", + "type": "object" + }, + "driverOutputResourceUri": { + "description": "Output-only A URI pointing to the location of the stdout of the job's driver program.", + "type": "string" + }, + "sparkJob": { + "$ref": "SparkJob", + "description": "Job is a Spark job." + }, + "statusHistory": { + "description": "Output-only The previous job status.", + "type": "array", + "items": { + "$ref": "JobStatus" } - }, - "parameterOrder": [ - "projectId", - "region" - ], - "response": { - "$ref": "ListClustersResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "diagnose": { - "id": "dataproc.projects.regions.clusters.diagnose", - "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", - "httpMethod": "POST", - "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains `DiagnoseClusterOutputLocation`.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + }, + "sparkSqlJob": { + "$ref": "SparkSqlJob", + "description": "Job is a SparkSql job." + }, + "yarnApplications": { + "description": "Output-only The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "type": "array", + "items": { + "$ref": "YarnApplication" } - }, - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "request": { - "$ref": "DiagnoseClusterRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + }, + "pysparkJob": { + "description": "Job is a Pyspark job.", + "$ref": "PySparkJob" } - } }, - "jobs": { - "methods": { - "submit": { - "id": "dataproc.projects.regions.jobs.submit", - "path": "v1/projects/{projectId}/regions/{region}/jobs:submit", - "httpMethod": "POST", - "description": "Submits a job to a cluster.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" + "id": "Job" + }, + "JobStatus": { + "properties": { + "stateStartTime": { + "description": "Output-only The time when this state was entered.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "SETUP_DONE", + "RUNNING", + "CANCEL_PENDING", + "CANCEL_STARTED", + "CANCELLED", + "DONE", + "ERROR", + "ATTEMPT_FAILURE" + ], + "description": "Output-only A state message specifying the overall job state.", + "type": "string", + "enumDescriptions": [ + "The job state is unknown.", + "The job is pending; it has been submitted, but is not yet running.", + "Job has been received by the service and completed initial setup; it will soon be submitted to the cluster.", + "The job is running on the cluster.", + "A CancelJob request has been received, but is pending.", + "Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.", + "The job cancellation was successful.", + "The job has completed successfully.", + "The job has completed, but encountered an error.", + "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." + ] + }, + "details": { + "description": "Output-only Optional job state details, such as an error description if the state is \u003ccode\u003eERROR\u003c/code\u003e.", + "type": "string" + } + }, + "id": "JobStatus", + "description": "Cloud Dataproc job status.", + "type": "object" + }, + "ManagedGroupConfig": { + "description": "Specifies the resources used to actively manage an instance group.", + "type": "object", + "properties": { + "instanceGroupManagerName": { + "description": "Output-only The name of the Instance Group Manager for this group.", + "type": "string" + }, + "instanceTemplateName": { + "description": "Output-only The name of the Instance Template used for the Managed Instance Group.", + "type": "string" + } + }, + "id": "ManagedGroupConfig" + }, + "ClusterOperationStatus": { + "description": "The status of the operation.", + "type": "object", + "properties": { + "state": { + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ], + "description": "Output-only A message containing the operation state.", + "type": "string", + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is running.", + "The operation is done; either cancelled or completed." + ] + }, + "details": { + "description": "Output-onlyA message containing any operation metadata details.", + "type": "string" + }, + "innerState": { + "description": "Output-only A message containing the detailed operation state.", + "type": "string" + }, + "stateStartTime": { + "description": "Output-only The time this state was entered.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "ClusterOperationStatus" + }, + "YarnApplication": { + "properties": { + "state": { + "description": "Required The application state.", + "type": "string", + "enumDescriptions": [ + "Status is unspecified.", + "Status is NEW.", + "Status is NEW_SAVING.", + "Status is SUBMITTED.", + "Status is ACCEPTED.", + "Status is RUNNING.", + "Status is FINISHED.", + "Status is FAILED.", + "Status is KILLED." + ], + "enum": [ + "STATE_UNSPECIFIED", + "NEW", + "NEW_SAVING", + "SUBMITTED", + "ACCEPTED", + "RUNNING", + "FINISHED", + "FAILED", + "KILLED" + ] + }, + "name": { + "description": "Required The application name.", + "type": "string" + }, + "trackingUrl": { + "type": "string", + "description": "Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access." + }, + "progress": { + "description": "Required The numerical progress of the application, from 1 to 100.", + "format": "float", + "type": "number" + } + }, + "id": "YarnApplication", + "description": "A YARN application created by a job. Application information is a subset of \u003ccode\u003eorg.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto\u003c/code\u003e.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "type": "object" + }, + "QueryList": { + "description": "A list of queries to run on a cluster.", + "type": "object", + "properties": { + "queries": { + "description": "Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:\n\"hiveJob\": {\n \"queryList\": {\n \"queries\": [\n \"query1\",\n \"query2\",\n \"query3;query4\",\n ]\n }\n}\n", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region" - ], - "request": { - "$ref": "SubmitJobRequest" - }, - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "id": "dataproc.projects.regions.jobs.get", - "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", - "httpMethod": "GET", - "description": "Gets the resource representation for a job in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + } + }, + "id": "QueryList" + }, + "HadoopJob": { + "description": "A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).", + "type": "object", + "properties": { + "mainJarFileUri": { + "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + "type": "string" + }, + "jarFileUris": { + "description": "Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "jobId" - ], - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "list": { - "id": "dataproc.projects.regions.jobs.list", - "path": "v1/projects/{projectId}/regions/{region}/jobs", - "httpMethod": "GET", - "description": "Lists regions/{region}/jobs in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "pageSize": { - "type": "integer", - "description": "[Optional] The number of results to return in each response.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "[Optional] The page token, returned by a previous call, to request the next page of results.", - "location": "query" - }, - "clusterName": { - "type": "string", - "description": "[Optional] If set, the returned jobs list includes only jobs that were submitted to the named cluster.", - "location": "query" + }, + "loggingConfig": { + "$ref": "LoggingConfig", + "description": "Optional The runtime log config for job execution." + }, + "properties": { + "additionalProperties": { + "type": "string" }, - "jobStateMatcher": { - "type": "string", - "description": "[Optional] Specifies enumerated categories of jobs to list (default = match ALL jobs).", - "enum": [ - "ALL", - "ACTIVE", - "NON_ACTIVE" - ], - "location": "query" + "description": "Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + "type": "object" + }, + "args": { + "type": "array", + "items": { + "type": "string" }, - "filter": { - "type": "string", - "description": "[Optional] A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax: field:value] ... or [field = value] AND [field [= value]] ... where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match all values. `status.state` can be either `ACTIVE` or `INACTIVE`. Only the logical `AND` operator is supported; space-separated items are treated as having an implicit `AND` operator. Example valid filters are: status.state:ACTIVE labels.env:staging labels.starred:* and status.state = ACTIVE AND labels.env = staging AND labels.starred = *", - "location": "query" + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission." + }, + "fileUris": { + "description": "Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region" - ], - "response": { - "$ref": "ListJobsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "cancel": { - "id": "dataproc.projects.regions.jobs.cancel", - "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", - "httpMethod": "POST", - "description": "Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get).", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.", + "type": "string" + }, + "archiveUris": { + "type": "array", + "items": { + "type": "string" }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + "description": "Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip." + } + }, + "id": "HadoopJob" + }, + "DiagnoseClusterRequest": { + "type": "object", + "properties": {}, + "id": "DiagnoseClusterRequest", + "description": "A request to collect cluster diagnostic information." + }, + "DiskConfig": { + "description": "Specifies the config of disk options for a group of VM instances.", + "type": "object", + "properties": { + "bootDiskSizeGb": { + "description": "Optional Size in GB of the boot disk (default is 500GB).", + "format": "int32", + "type": "integer" + }, + "numLocalSsds": { + "description": "Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + "format": "int32", + "type": "integer" + } + }, + "id": "DiskConfig" + }, + "ClusterOperationMetadata": { + "description": "Metadata describing the operation.", + "type": "object", + "properties": { + "warnings": { + "description": "Output-only Errors encountered during operation execution.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "jobId" - ], - "request": { - "$ref": "CancelJobRequest" - }, - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "delete": { - "id": "dataproc.projects.regions.jobs.delete", - "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", - "httpMethod": "DELETE", - "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Cloud Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + }, + "labels": { + "description": "Output-only Labels associated with the operation", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "status": { + "$ref": "ClusterOperationStatus", + "description": "Output-only Current operation status." + }, + "statusHistory": { + "description": "Output-only The previous operation status.", + "type": "array", + "items": { + "$ref": "ClusterOperationStatus" } - }, - "parameterOrder": [ - "projectId", - "region", - "jobId" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + }, + "clusterUuid": { + "description": "Output-only Cluster UUID for the operation.", + "type": "string" + }, + "clusterName": { + "description": "Output-only Name of the cluster for the operation.", + "type": "string" + }, + "operationType": { + "description": "Output-only The operation type.", + "type": "string" + }, + "description": { + "description": "Output-only Short description of operation.", + "type": "string" } - } }, - "operations": { - "methods": { - "list": { - "id": "dataproc.projects.regions.operations.list", - "path": "v1/{+name}", - "httpMethod": "GET", - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding below allows API services to override the binding to use different resource name schemes, such as `users/*/operations`.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation collection.", - "required": true, - "pattern": "^projects/[^/]+/regions/[^/]+/operations$", - "location": "path" - }, - "filter": { - "type": "string", - "description": "The standard list filter.", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The standard list page size.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The standard list page token.", - "location": "query" - } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListOperationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "id": "dataproc.projects.regions.operations.get", - "path": "v1/{+name}", - "httpMethod": "GET", - "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource.", - "required": true, - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", - "location": "path" + "id": "ClusterOperationMetadata" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "HiveJob": { + "description": "A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.", + "type": "object", + "properties": { + "continueOnFailure": { + "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" + }, + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "queryFileUri": { + "type": "string", + "description": "The HCFS URI of the script that contains Hive queries." + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "delete": { - "id": "dataproc.projects.regions.operations.delete", - "path": "v1/{+name}", - "httpMethod": "DELETE", - "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource to be deleted.", - "required": true, - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", - "location": "path" + }, + "scriptVariables": { + "description": "Optional Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).", + "type": "object", + "additionalProperties": { + "type": "string" } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "cancel": { - "id": "dataproc.projects.regions.operations.cancel", - "path": "v1/{+name}:cancel", - "httpMethod": "POST", - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource to be cancelled.", - "required": true, - "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", - "location": "path" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + "type": "object" + } + }, + "id": "HiveJob" + }, + "DiagnoseClusterResults": { + "description": "The location of diagnostic output.", + "type": "object", + "properties": { + "outputUri": { + "description": "Output-only The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", + "type": "string" + } + }, + "id": "DiagnoseClusterResults" + }, + "ClusterConfig": { + "description": "The cluster config.", + "type": "object", + "properties": { + "softwareConfig": { + "$ref": "SoftwareConfig", + "description": "Optional The config settings for software inside the cluster." + }, + "masterConfig": { + "$ref": "InstanceGroupConfig", + "description": "Optional The Google Compute Engine config settings for the master instance in a cluster." + }, + "secondaryWorkerConfig": { + "$ref": "InstanceGroupConfig", + "description": "Optional The Google Compute Engine config settings for additional worker instances in a cluster." + }, + "initializationActions": { + "description": "Optional Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's \u003ccode\u003erole\u003c/code\u003e metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget):\nROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\nif [[ \"${ROLE}\" == 'Master' ]]; then\n ... master specific actions ...\nelse\n ... worker specific actions ...\nfi\n", + "type": "array", + "items": { + "$ref": "NodeInitializationAction" } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + }, + "configBucket": { + "description": "Optional A Google Cloud Storage staging bucket used for sharing generated SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, and then it will create and manage this project-level, per-location bucket for you.", + "type": "string" + }, + "workerConfig": { + "$ref": "InstanceGroupConfig", + "description": "Optional The Google Compute Engine config settings for worker instances in a cluster." + }, + "gceClusterConfig": { + "$ref": "GceClusterConfig", + "description": "Required The shared Google Compute Engine config settings for all instances in a cluster." + } + }, + "id": "ClusterConfig" + } + }, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "protocol": "rest", + "version": "v1", + "baseUrl": "https://dataproc.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" } - } } - } } - } - } - } + }, + "servicePath": "", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "kind": "discovery#restDescription", + "rootUrl": "https://dataproc.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "dataproc", + "batchPath": "batch", + "revision": "20170214", + "documentationLink": "https://cloud.google.com/dataproc/", + "id": "dataproc:v1", + "title": "Google Cloud Dataproc API" } diff --git a/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go b/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go index eb04dc5e7..bfa5dfdf8 100644 --- a/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go +++ b/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Regions = NewProjectsRegionsService(s) @@ -139,40 +144,40 @@ type CancelJobRequest struct { // Cluster: Describes the identifying information, config, and status of // a cluster of Google Compute Engine instances. type Cluster struct { - // ClusterName: [Required] The cluster name. Cluster names within a + // ClusterName: Required The cluster name. Cluster names within a // project must be unique. Names of deleted clusters can be reused. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] A cluster UUID (Unique Universal + // ClusterUuid: Output-only A cluster UUID (Unique Universal // Identifier). Cloud Dataproc generates this value when it creates the // cluster. ClusterUuid string `json:"clusterUuid,omitempty"` - // Config: [Required] The cluster config. Note that Cloud Dataproc may - // set default values, and values may change when clusters are updated. + // Config: Required The cluster config. Note that Cloud Dataproc may set + // default values, and values may change when clusters are updated. Config *ClusterConfig `json:"config,omitempty"` - // Labels: [Optional] The labels to associate with this cluster. Label - // **keys** must contain 1 to 63 characters, and must conform to [RFC - // 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be - // empty, but, if present, must contain 1 to 63 characters, and must - // conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more - // than 32 labels can be associated with a cluster. + // Labels: Optional The labels to associate with this cluster. Label + // keys must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, + // but, if present, must contain 1 to 63 characters, and must conform to + // RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 + // labels can be associated with a cluster. Labels map[string]string `json:"labels,omitempty"` - // Metrics: Contains cluster daemon metrics such as HDFS and YARN stats. - // **Beta Feature**: This report is available for testing purposes only. - // It may be changed before final release. + // Metrics: Contains cluster daemon metrics such as HDFS and YARN + // stats.Beta Feature: This report is available for testing purposes + // only. It may be changed before final release. Metrics *ClusterMetrics `json:"metrics,omitempty"` - // ProjectId: [Required] The Google Cloud Platform project ID that the + // ProjectId: Required The Google Cloud Platform project ID that the // cluster belongs to. ProjectId string `json:"projectId,omitempty"` - // Status: [Output-only] Cluster status. + // Status: Output-only Cluster status. Status *ClusterStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] The previous cluster status. + // StatusHistory: Output-only The previous cluster status. StatusHistory []*ClusterStatus `json:"statusHistory,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -204,8 +209,8 @@ func (s *Cluster) MarshalJSON() ([]byte, error) { // ClusterConfig: The cluster config. type ClusterConfig struct { - // ConfigBucket: [Optional] A Google Cloud Storage staging bucket used - // for sharing generated SSH keys and config. If you do not specify a + // ConfigBucket: Optional A Google Cloud Storage staging bucket used for + // sharing generated SSH keys and config. If you do not specify a // staging bucket, Cloud Dataproc will determine an appropriate Cloud // Storage location (US, ASIA, or EU) for your cluster's staging bucket // according to the Google Compute Engine zone where your cluster is @@ -213,34 +218,40 @@ type ClusterConfig struct { // per-location bucket for you. ConfigBucket string `json:"configBucket,omitempty"` - // GceClusterConfig: [Required] The shared Google Compute Engine config + // GceClusterConfig: Required The shared Google Compute Engine config // settings for all instances in a cluster. GceClusterConfig *GceClusterConfig `json:"gceClusterConfig,omitempty"` - // InitializationActions: [Optional] Commands to execute on each node + // InitializationActions: Optional Commands to execute on each node // after config is completed. By default, executables are run on master - // and all worker nodes. You can test a node's role metadata to run an - // executable on a master or worker node, as shown below using `curl` - // (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google + // and all worker nodes. You can test a node's role + // metadata to run an executable on a master or worker node, as shown + // below using curl (you can also use wget): + // ROLE=$(curl -H Metadata-Flavor:Google // http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) - // if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... - // else ... worker specific actions ... fi + // + // if [[ "${ROLE}" == 'Master' ]]; then + // ... master specific actions ... + // else + // ... worker specific actions ... + // fi + // InitializationActions []*NodeInitializationAction `json:"initializationActions,omitempty"` - // MasterConfig: [Optional] The Google Compute Engine config settings - // for the master instance in a cluster. + // MasterConfig: Optional The Google Compute Engine config settings for + // the master instance in a cluster. MasterConfig *InstanceGroupConfig `json:"masterConfig,omitempty"` - // SecondaryWorkerConfig: [Optional] The Google Compute Engine config + // SecondaryWorkerConfig: Optional The Google Compute Engine config // settings for additional worker instances in a cluster. SecondaryWorkerConfig *InstanceGroupConfig `json:"secondaryWorkerConfig,omitempty"` - // SoftwareConfig: [Optional] The config settings for software inside - // the cluster. + // SoftwareConfig: Optional The config settings for software inside the + // cluster. SoftwareConfig *SoftwareConfig `json:"softwareConfig,omitempty"` - // WorkerConfig: [Optional] The Google Compute Engine config settings - // for worker instances in a cluster. + // WorkerConfig: Optional The Google Compute Engine config settings for + // worker instances in a cluster. WorkerConfig *InstanceGroupConfig `json:"workerConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "ConfigBucket") to @@ -267,7 +278,7 @@ func (s *ClusterConfig) MarshalJSON() ([]byte, error) { } // ClusterMetrics: Contains cluster daemon metrics, such as HDFS and -// YARN stats. **Beta Feature**: This report is available for testing +// YARN stats.Beta Feature: This report is available for testing // purposes only. It may be changed before final release. type ClusterMetrics struct { // HdfsMetrics: The HDFS metrics. @@ -301,27 +312,30 @@ func (s *ClusterMetrics) MarshalJSON() ([]byte, error) { // ClusterOperationMetadata: Metadata describing the operation. type ClusterOperationMetadata struct { - // ClusterName: [Output-only] Name of the cluster for the operation. + // ClusterName: Output-only Name of the cluster for the operation. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] Cluster UUID for the operation. + // ClusterUuid: Output-only Cluster UUID for the operation. ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: [Output-only] Short description of operation. + // Description: Output-only Short description of operation. Description string `json:"description,omitempty"` - // Labels: [Output-only] labels associated with the operation + // Labels: Output-only Labels associated with the operation Labels map[string]string `json:"labels,omitempty"` - // OperationType: [Output-only] The operation type. + // OperationType: Output-only The operation type. OperationType string `json:"operationType,omitempty"` - // Status: [Output-only] Current operation status. + // Status: Output-only Current operation status. Status *ClusterOperationStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] The previous operation status. + // StatusHistory: Output-only The previous operation status. StatusHistory []*ClusterOperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output-only Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -347,24 +361,24 @@ func (s *ClusterOperationMetadata) MarshalJSON() ([]byte, error) { // ClusterOperationStatus: The status of the operation. type ClusterOperationStatus struct { - // Details: [Output-only]A message containing any operation metadata + // Details: Output-onlyA message containing any operation metadata // details. Details string `json:"details,omitempty"` - // InnerState: [Output-only] A message containing the detailed operation + // InnerState: Output-only A message containing the detailed operation // state. InnerState string `json:"innerState,omitempty"` - // State: [Output-only] A message containing the operation state. + // State: Output-only A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. State string `json:"state,omitempty"` - // StateStartTime: [Output-only] The time this state was entered. + // StateStartTime: Output-only The time this state was entered. StateStartTime string `json:"stateStartTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Details") to @@ -392,21 +406,25 @@ func (s *ClusterOperationStatus) MarshalJSON() ([]byte, error) { // ClusterStatus: The status of a cluster and its instances. type ClusterStatus struct { - // Detail: [Output-only] Optional details of cluster's state. + // Detail: Output-only Optional details of cluster's state. Detail string `json:"detail,omitempty"` - // State: [Output-only] The cluster's state. + // State: Output-only The cluster's state. // // Possible values: - // "UNKNOWN" - // "CREATING" - // "RUNNING" - // "ERROR" - // "DELETING" - // "UPDATING" + // "UNKNOWN" - The cluster state is unknown. + // "CREATING" - The cluster is being created and set up. It is not + // ready for use. + // "RUNNING" - The cluster is currently running and healthy. It is + // ready for use. + // "ERROR" - The cluster encountered an error. It is not ready for + // use. + // "DELETING" - The cluster is being deleted. It cannot be used. + // "UPDATING" - The cluster is being updated. It continues to accept + // and process jobs. State string `json:"state,omitempty"` - // StateStartTime: [Output-only] Time when this state was entered. + // StateStartTime: Output-only Time when this state was entered. StateStartTime string `json:"stateStartTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Detail") to @@ -435,9 +453,9 @@ func (s *ClusterStatus) MarshalJSON() ([]byte, error) { // DiagnoseClusterOutputLocation: The location where output from // diagnostic command can be found. type DiagnoseClusterOutputLocation struct { - // OutputUri: [Output-only] The Google Cloud Storage URI of the - // diagnostic output. This will be a plain text file with summary of - // collected diagnostics. + // OutputUri: Output-only The Google Cloud Storage URI of the diagnostic + // output. This will be a plain text file with summary of collected + // diagnostics. OutputUri string `json:"outputUri,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputUri") to @@ -470,9 +488,9 @@ type DiagnoseClusterRequest struct { // DiagnoseClusterResults: The location of diagnostic output. type DiagnoseClusterResults struct { - // OutputUri: [Output-only] The Google Cloud Storage URI of the - // diagnostic output. The output report is a plain text file with a - // summary of collected diagnostics. + // OutputUri: Output-only The Google Cloud Storage URI of the diagnostic + // output. The output report is a plain text file with a summary of + // collected diagnostics. OutputUri string `json:"outputUri,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputUri") to @@ -501,16 +519,16 @@ func (s *DiagnoseClusterResults) MarshalJSON() ([]byte, error) { // DiskConfig: Specifies the config of disk options for a group of VM // instances. type DiskConfig struct { - // BootDiskSizeGb: [Optional] Size in GB of the boot disk (default is + // BootDiskSizeGb: Optional Size in GB of the boot disk (default is // 500GB). BootDiskSizeGb int64 `json:"bootDiskSizeGb,omitempty"` - // NumLocalSsds: [Optional] Number of attached SSDs, from 0 to 4 - // (default is 0). If SSDs are not attached, the boot disk is used to - // store runtime logs and - // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) - // data. If one or more SSDs are attached, this runtime bulk data is - // spread across them, and the boot disk contains only basic config and + // NumLocalSsds: Optional Number of attached SSDs, from 0 to 4 (default + // is 0). If SSDs are not attached, the boot disk is used to store + // runtime logs and HDFS + // (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If + // one or more SSDs are attached, this runtime bulk data is spread + // across them, and the boot disk contains only basic config and // installed binaries. NumLocalSsds int64 `json:"numLocalSsds,omitempty"` @@ -541,9 +559,12 @@ func (s *DiskConfig) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for `Empty` is -// empty JSON object `{}`. +// instance: +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// The JSON representation for Empty is empty JSON object {}. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -554,60 +575,72 @@ type Empty struct { // Compute Engine cluster instances, applicable to all instances in the // cluster. type GceClusterConfig struct { - // InternalIpOnly: [Optional] If true, all instances in the cluster will + // InternalIpOnly: Optional If true, all instances in the cluster will // only have internal IP addresses. By default, clusters are not // restricted to internal IP addresses, and will have ephemeral external - // IP addresses assigned to each instance. This `internal_ip_only` + // IP addresses assigned to each instance. This internal_ip_only // restriction can only be enabled for subnetwork enabled networks, and // all off-cluster dependencies must be configured to be accessible // without external IP addresses. InternalIpOnly bool `json:"internalIpOnly,omitempty"` // Metadata: The Google Compute Engine metadata entries to add to all - // instances (see [Project and instance - // metadata](https://cloud.google.com/compute/docs/storing-retrieving-met - // adata#project_and_instance_metadata)). + // instances (see Project and instance metadata + // (https://cloud.google.com/compute/docs/storing-retrieving-metadata#pro + // ject_and_instance_metadata)). Metadata map[string]string `json:"metadata,omitempty"` - // NetworkUri: [Optional] The Google Compute Engine network to be used - // for machine communications. Cannot be specified with subnetwork_uri. - // If neither `network_uri` nor `subnetwork_uri` is specified, the - // "default" network of the project is used, if it exists. Cannot be a - // "Custom Subnet Network" (see [Using - // Subnetworks](/compute/docs/subnetworks) for more information). + // NetworkUri: Optional The Google Compute Engine network to be used for + // machine communications. Cannot be specified with subnetwork_uri. If + // neither network_uri nor subnetwork_uri is specified, the "default" + // network of the project is used, if it exists. Cannot be a "Custom + // Subnet Network" (see Using Subnetworks for more information). // Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/g - // lobal/default`. + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default. NetworkUri string `json:"networkUri,omitempty"` - // ServiceAccountScopes: [Optional] The URIs of service account scopes - // to be included in Google Compute Engine instances. The following base - // set of scopes is always included: * - // https://www.googleapis.com/auth/cloud.useraccounts.readonly * - // https://www.googleapis.com/auth/devstorage.read_write * - // https://www.googleapis.com/auth/logging.write If no scopes are - // specified, the following defaults are also provided: * - // https://www.googleapis.com/auth/bigquery * - // https://www.googleapis.com/auth/bigtable.admin.table * - // https://www.googleapis.com/auth/bigtable.data * + // ServiceAccount: Optional The service account of the instances. + // Defaults to the default Google Compute Engine service account. Custom + // service accounts need permissions equivalent to the folloing IAM + // roles: + // roles/logging.logWriter + // roles/storage.objectAdmin(see + // https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: + // [account_id]@[project_id].iam.gserviceaccount.com + ServiceAccount string `json:"serviceAccount,omitempty"` + + // ServiceAccountScopes: Optional The URIs of service account scopes to + // be included in Google Compute Engine instances. The following base + // set of scopes is always + // included: + // https://www.googleapis.com/auth/cloud.useraccounts.readonly + // + // https://www.googleapis.com/auth/devstorage.read_write + // https://www.goog + // leapis.com/auth/logging.writeIf no scopes are specified, the + // following defaults are also + // provided: + // https://www.googleapis.com/auth/bigquery + // https://www.googlea + // pis.com/auth/bigtable.admin.table + // https://www.googleapis.com/auth/bigt + // able.data // https://www.googleapis.com/auth/devstorage.full_control ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"` - // SubnetworkUri: [Optional] The Google Compute Engine subnetwork to be + // SubnetworkUri: Optional The Google Compute Engine subnetwork to be // used for machine communications. Cannot be specified with // network_uri. Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/u - // s-east1/sub0`. + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0. SubnetworkUri string `json:"subnetworkUri,omitempty"` // Tags: The Google Compute Engine tags to add to all instances (see - // [Tagging instances](/compute/docs/label-or-tag-resources#tags)). + // Tagging instances). Tags []string `json:"tags,omitempty"` - // ZoneUri: [Required] The zone where the Google Compute Engine cluster + // ZoneUri: Required The zone where the Google Compute Engine cluster // will be located. Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zo - // ne]`. + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]. ZoneUri string `json:"zoneUri,omitempty"` // ForceSendFields is a list of field names (e.g. "InternalIpOnly") to @@ -634,39 +667,39 @@ func (s *GceClusterConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HadoopJob: A Cloud Dataproc job for running [Apache Hadoop -// MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-cli -// ent/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on -// [Apache Hadoop -// YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-si -// te/YARN.html). +// HadoopJob: A Cloud Dataproc job for running Apache Hadoop MapReduce +// (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop +// -mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop +// YARN +// (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YA +// RN.html). type HadoopJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include - // arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job + // Args: Optional The arguments to pass to the driver. Do not include + // arguments, such as -libjars or -Dfoo=bar, that can be set as job // properties, since a collision may occur that causes an incorrect job // submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS (Hadoop Compatible Filesystem) URIs of - // files to be copied to the working directory of Hadoop drivers and + // FileUris: Optional HCFS (Hadoop Compatible Filesystem) URIs of files + // to be copied to the working directory of Hadoop drivers and // distributed tasks. Useful for naively parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] Jar file URIs to add to the CLASSPATHs of the + // JarFileUris: Optional Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: [Optional] The runtime log config for job execution. + // LoggingConfig: Optional The runtime log config for job execution. LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` // MainClass: The name of the driver's main class. The jar file // containing the class must be in the default CLASSPATH or specified in - // `jar_file_uris`. + // jar_file_uris. MainClass string `json:"mainClass,omitempty"` // MainJarFileUri: The HCFS URI of the jar file containing the main @@ -676,7 +709,7 @@ type HadoopJob struct { // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Hadoop. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site and classes in user code. @@ -705,22 +738,22 @@ func (s *HadoopJob) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HiveJob: A Cloud Dataproc job for running [Apache -// Hive](https://hive.apache.org/) queries on YARN. +// HiveJob: A Cloud Dataproc job for running Apache Hive +// (https://hive.apache.org/) queries on YARN. type HiveJob struct { - // ContinueOnFailure: [Optional] Whether to continue executing queries - // if a query fails. The default value is `false`. Setting to `true` can - // be useful when executing independent parallel queries. + // ContinueOnFailure: Optional Whether to continue executing queries if + // a query fails. The default value is false. Setting to true can be + // useful when executing independent parallel queries. ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can - // contain Hive SerDes and UDFs. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH + // of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive + // SerDes and UDFs. JarFileUris []string `json:"jarFileUris,omitempty"` - // Properties: [Optional] A mapping of property names and values, used - // to configure Hive. Properties that conflict with values set by the - // Cloud Dataproc API may be overwritten. Can include properties set in + // Properties: Optional A mapping of property names and values, used to + // configure Hive. Properties that conflict with values set by the Cloud + // Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and // classes in user code. Properties map[string]string `json:"properties,omitempty"` @@ -731,8 +764,8 @@ type HiveJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values - // (equivalent to the Hive command: `SET name="value";`). + // ScriptVariables: Optional Mapping of query variable names to values + // (equivalent to the Hive command: SET name="value";). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") @@ -759,39 +792,38 @@ func (s *HiveJob) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceGroupConfig: [Optional] The config settings for Google -// Compute Engine resources in an instance group, such as a master or -// worker group. +// InstanceGroupConfig: Optional The config settings for Google Compute +// Engine resources in an instance group, such as a master or worker +// group. type InstanceGroupConfig struct { - // DiskConfig: [Optional] Disk option config settings. + // DiskConfig: Optional Disk option config settings. DiskConfig *DiskConfig `json:"diskConfig,omitempty"` - // ImageUri: [Output-only] The Google Compute Engine image resource used - // for cluster instances. Inferred from `SoftwareConfig.image_version`. + // ImageUri: Output-only The Google Compute Engine image resource used + // for cluster instances. Inferred from SoftwareConfig.image_version. ImageUri string `json:"imageUri,omitempty"` - // InstanceNames: [Optional] The list of instance names. Cloud Dataproc - // derives the names from `cluster_name`, `num_instances`, and the - // instance group if not set by user (recommended practice is to let - // Cloud Dataproc derive the name). + // InstanceNames: Optional The list of instance names. Cloud Dataproc + // derives the names from cluster_name, num_instances, and the instance + // group if not set by user (recommended practice is to let Cloud + // Dataproc derive the name). InstanceNames []string `json:"instanceNames,omitempty"` - // IsPreemptible: [Optional] Specifies that this instance group contains + // IsPreemptible: Optional Specifies that this instance group contains // preemptible instances. IsPreemptible bool `json:"isPreemptible,omitempty"` - // MachineTypeUri: [Required] The Google Compute Engine machine type - // used for cluster instances. Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us- - // east1-a/machineTypes/n1-standard-2`. + // MachineTypeUri: Required The Google Compute Engine machine type used + // for cluster instances. Example: + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2. MachineTypeUri string `json:"machineTypeUri,omitempty"` - // ManagedGroupConfig: [Output-only] The config for Google Compute - // Engine Instance Group Manager that manages this group. This is only - // used for preemptible instance groups. + // ManagedGroupConfig: Output-only The config for Google Compute Engine + // Instance Group Manager that manages this group. This is only used for + // preemptible instance groups. ManagedGroupConfig *ManagedGroupConfig `json:"managedGroupConfig,omitempty"` - // NumInstances: [Required] The number of VM instances in the instance + // NumInstances: Required The number of VM instances in the instance // group. For master instance groups, must be set to 1. NumInstances int64 `json:"numInstances,omitempty"` @@ -820,13 +852,13 @@ func (s *InstanceGroupConfig) MarshalJSON() ([]byte, error) { // Job: A Cloud Dataproc job resource. type Job struct { - // DriverControlFilesUri: [Output-only] If present, the location of + // DriverControlFilesUri: Output-only If present, the location of // miscellaneous control files which may be used as part of job setup // and handling. If not present, control files may be placed in the same - // location as `driver_output_uri`. + // location as driver_output_uri. DriverControlFilesUri string `json:"driverControlFilesUri,omitempty"` - // DriverOutputResourceUri: [Output-only] A URI pointing to the location + // DriverOutputResourceUri: Output-only A URI pointing to the location // of the stdout of the job's driver program. DriverOutputResourceUri string `json:"driverOutputResourceUri,omitempty"` @@ -836,46 +868,49 @@ type Job struct { // HiveJob: Job is a Hive job. HiveJob *HiveJob `json:"hiveJob,omitempty"` - // Labels: [Optional] The labels to associate with this job. Label - // **keys** must contain 1 to 63 characters, and must conform to [RFC - // 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be - // empty, but, if present, must contain 1 to 63 characters, and must - // conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more - // than 32 labels can be associated with a job. + // Labels: Optional The labels to associate with this job. Label keys + // must contain 1 to 63 characters, and must conform to RFC 1035 + // (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, + // but, if present, must contain 1 to 63 characters, and must conform to + // RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 + // labels can be associated with a job. Labels map[string]string `json:"labels,omitempty"` // PigJob: Job is a Pig job. PigJob *PigJob `json:"pigJob,omitempty"` - // Placement: [Required] Job information, including how, when, and where + // Placement: Required Job information, including how, when, and where // to run the job. Placement *JobPlacement `json:"placement,omitempty"` // PysparkJob: Job is a Pyspark job. PysparkJob *PySparkJob `json:"pysparkJob,omitempty"` - // Reference: [Optional] The fully qualified reference to the job, which + // Reference: Optional The fully qualified reference to the job, which // can be used to obtain the equivalent REST path of the job resource. // If this property is not specified when a job is created, the server - // generates a job_id. + // generates a job_id. Reference *JobReference `json:"reference,omitempty"` + // Scheduling: Optional Job scheduling configuration. + Scheduling *JobScheduling `json:"scheduling,omitempty"` + // SparkJob: Job is a Spark job. SparkJob *SparkJob `json:"sparkJob,omitempty"` // SparkSqlJob: Job is a SparkSql job. SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` - // Status: [Output-only] The job status. Additional application-specific - // status information may be contained in the type_job and - // yarn_applications fields. + // Status: Output-only The job status. Additional application-specific + // status information may be contained in the type_job and + // yarn_applications fields. Status *JobStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] The previous job status. + // StatusHistory: Output-only The previous job status. StatusHistory []*JobStatus `json:"statusHistory,omitempty"` - // YarnApplications: [Output-only] The collection of YARN applications - // spun up by this job. **Beta** Feature: This report is available for + // YarnApplications: Output-only The collection of YARN applications + // spun up by this job.Beta Feature: This report is available for // testing purposes only. It may be changed before final release. YarnApplications []*YarnApplication `json:"yarnApplications,omitempty"` @@ -910,11 +945,11 @@ func (s *Job) MarshalJSON() ([]byte, error) { // JobPlacement: Cloud Dataproc job config. type JobPlacement struct { - // ClusterName: [Required] The name of the cluster where the job will be + // ClusterName: Required The name of the cluster where the job will be // submitted. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] A cluster UUID generated by the Cloud + // ClusterUuid: Output-only A cluster UUID generated by the Cloud // Dataproc service when the job is submitted. ClusterUuid string `json:"clusterUuid,omitempty"` @@ -943,16 +978,16 @@ func (s *JobPlacement) MarshalJSON() ([]byte, error) { // JobReference: Encapsulates the full scoping used to reference a job. type JobReference struct { - // JobId: [Optional] The job ID, which must be unique within the - // project. The job ID is generated by the server upon job submission or - // provided by the user as a means to perform retries without creating - // duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers - // (0-9), underscores (_), or hyphens (-). The maximum length is 512 + // JobId: Optional The job ID, which must be unique within the project. + // The job ID is generated by the server upon job submission or provided + // by the user as a means to perform retries without creating duplicate + // jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), or hyphens (-). The maximum length is 100 // characters. JobId string `json:"jobId,omitempty"` - // ProjectId: [Required] The ID of the Google Cloud Platform project - // that the job belongs to. + // ProjectId: Required The ID of the Google Cloud Platform project that + // the job belongs to. ProjectId string `json:"projectId,omitempty"` // ForceSendFields is a list of field names (e.g. "JobId") to @@ -978,28 +1013,70 @@ func (s *JobReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// JobScheduling: Job scheduling options.Beta Feature: These options are +// available for testing purposes only. They may be changed before final +// release. +type JobScheduling struct { + // MaxFailuresPerHour: Optional Maximum number of times per hour a + // driver may be restarted as a result of driver terminating with + // non-zero code before job is reported failed.A job may be reported as + // thrashing if driver exits with non-zero code 4 times within 10 minute + // window.Maximum value is 10. + MaxFailuresPerHour int64 `json:"maxFailuresPerHour,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxFailuresPerHour") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxFailuresPerHour") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobScheduling) MarshalJSON() ([]byte, error) { + type noMethod JobScheduling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // JobStatus: Cloud Dataproc job status. type JobStatus struct { - // Details: [Output-only] Optional job state details, such as an error - // description if the state is ERROR. + // Details: Output-only Optional job state details, such as an error + // description if the state is ERROR. Details string `json:"details,omitempty"` - // State: [Output-only] A state message specifying the overall job - // state. + // State: Output-only A state message specifying the overall job state. // // Possible values: - // "STATE_UNSPECIFIED" - // "PENDING" - // "SETUP_DONE" - // "RUNNING" - // "CANCEL_PENDING" - // "CANCEL_STARTED" - // "CANCELLED" - // "DONE" - // "ERROR" + // "STATE_UNSPECIFIED" - The job state is unknown. + // "PENDING" - The job is pending; it has been submitted, but is not + // yet running. + // "SETUP_DONE" - Job has been received by the service and completed + // initial setup; it will soon be submitted to the cluster. + // "RUNNING" - The job is running on the cluster. + // "CANCEL_PENDING" - A CancelJob request has been received, but is + // pending. + // "CANCEL_STARTED" - Transient in-flight resources have been + // canceled, and the request to cancel the running job has been issued + // to the cluster. + // "CANCELLED" - The job cancellation was successful. + // "DONE" - The job has completed successfully. + // "ERROR" - The job has completed, but encountered an error. + // "ATTEMPT_FAILURE" - Job attempt has failed. The detail field + // contains failure details for this attempt.Applies to restartable jobs + // only. State string `json:"state,omitempty"` - // StateStartTime: [Output-only] The time when this state was entered. + // StateStartTime: Output-only The time when this state was entered. StateStartTime string `json:"stateStartTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Details") to @@ -1027,13 +1104,13 @@ func (s *JobStatus) MarshalJSON() ([]byte, error) { // ListClustersResponse: The list of all clusters in a project. type ListClustersResponse struct { - // Clusters: [Output-only] The clusters in the project. + // Clusters: Output-only The clusters in the project. Clusters []*Cluster `json:"clusters,omitempty"` - // NextPageToken: [Output-only] This token is included in the response - // if there are more results to fetch. To fetch additional results, - // provide this value as the `page_token` in a subsequent - // ListClustersRequest. + // NextPageToken: Output-only This token is included in the response if + // there are more results to fetch. To fetch additional results, provide + // this value as the page_token in a subsequent + // ListClustersRequest. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1065,12 +1142,13 @@ func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { // ListJobsResponse: A list of jobs in a project. type ListJobsResponse struct { - // Jobs: [Output-only] Jobs list. + // Jobs: Output-only Jobs list. Jobs []*Job `json:"jobs,omitempty"` - // NextPageToken: [Optional] This token is included in the response if + // NextPageToken: Optional This token is included in the response if // there are more results to fetch. To fetch additional results, provide - // this value as the `page_token` in a subsequent ListJobsRequest. + // this value as the page_token in a subsequent + // ListJobsRequest. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1171,11 +1249,11 @@ func (s *LoggingConfig) MarshalJSON() ([]byte, error) { // ManagedGroupConfig: Specifies the resources used to actively manage // an instance group. type ManagedGroupConfig struct { - // InstanceGroupManagerName: [Output-only] The name of the Instance - // Group Manager for this group. + // InstanceGroupManagerName: Output-only The name of the Instance Group + // Manager for this group. InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"` - // InstanceTemplateName: [Output-only] The name of the Instance Template + // InstanceTemplateName: Output-only The name of the Instance Template // used for the Managed Instance Group. InstanceTemplateName string `json:"instanceTemplateName,omitempty"` @@ -1207,15 +1285,14 @@ func (s *ManagedGroupConfig) MarshalJSON() ([]byte, error) { // NodeInitializationAction: Specifies an executable to run on a fully // configured node and a timeout period for executable completion. type NodeInitializationAction struct { - // ExecutableFile: [Required] Google Cloud Storage URI of executable - // file. + // ExecutableFile: Required Google Cloud Storage URI of executable file. ExecutableFile string `json:"executableFile,omitempty"` - // ExecutionTimeout: [Optional] Amount of time executable has to - // complete. Default is 10 minutes. Cluster creation fails with an - // explanatory error message (the name of the executable that caused the - // error and the exceeded timeout period) if the executable is not - // completed at end of the timeout period. + // ExecutionTimeout: Optional Amount of time executable has to complete. + // Default is 10 minutes. Cluster creation fails with an explanatory + // error message (the name of the executable that caused the error and + // the exceeded timeout period) if the executable is not completed at + // end of the timeout period. ExecutionTimeout string `json:"executionTimeout,omitempty"` // ForceSendFields is a list of field names (e.g. "ExecutableFile") to @@ -1245,9 +1322,9 @@ func (s *NodeInitializationAction) MarshalJSON() ([]byte, error) { // Operation: This resource represents a long-running operation that is // the result of a network API call. type Operation struct { - // Done: If the value is `false`, it means the operation is still in - // progress. If true, the operation is completed, and either `error` or - // `response` is available. + // Done: If the value is false, it means the operation is still in + // progress. If true, the operation is completed, and either error or + // response is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or @@ -1263,18 +1340,17 @@ type Operation struct { // Name: The server-assigned name, which is only unique within the same // service that originally returns it. If you use the default HTTP - // mapping, the `name` should have the format of - // `operations/some/unique/name`. + // mapping, the name should have the format of + // operations/some/unique/name. Name string `json:"name,omitempty"` // Response: The normal response of the operation in case of success. If - // the original method returns no data on success, such as `Delete`, the - // response is `google.protobuf.Empty`. If the original method is - // standard `Get`/`Create`/`Update`, the response should be the - // resource. For other methods, the response should have the type - // `XxxResponse`, where `Xxx` is the original method name. For example, - // if the original method name is `TakeSnapshot()`, the inferred - // response type is `TakeSnapshotResponse`. + // the original method returns no data on success, such as Delete, the + // response is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other + // methods, the response should have the type XxxResponse, where Xxx is + // the original method name. For example, if the original method name is + // TakeSnapshot(), the inferred response type is TakeSnapshotResponse. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1312,7 +1388,7 @@ type OperationMetadata struct { // ClusterUuid: Cluster UUId for the operation. ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: [Output-only] Short description of operation. + // Description: Output-only Short description of operation. Description string `json:"description,omitempty"` // Details: A message containing any operation metadata details. @@ -1327,7 +1403,7 @@ type OperationMetadata struct { // InsertTime: The time that the operation was requested. InsertTime string `json:"insertTime,omitempty"` - // OperationType: [Output-only] The operation type. + // OperationType: Output-only The operation type. OperationType string `json:"operationType,omitempty"` // StartTime: The time that the operation was started by the server. @@ -1336,18 +1412,21 @@ type OperationMetadata struct { // State: A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is currently running. + // "DONE" - The operation is done, either cancelled or completed. State string `json:"state,omitempty"` - // Status: [Output-only] Current operation status. + // Status: Output-only Current operation status. Status *OperationStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] Previous operation status. + // StatusHistory: Output-only Previous operation status. StatusHistory []*OperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output-only Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1382,10 +1461,10 @@ type OperationStatus struct { // State: A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. State string `json:"state,omitempty"` // StateStartTime: The time this state was entered. @@ -1414,23 +1493,23 @@ func (s *OperationStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PigJob: A Cloud Dataproc job for running [Apache -// Pig](https://pig.apache.org/) queries on YARN. +// PigJob: A Cloud Dataproc job for running Apache Pig +// (https://pig.apache.org/) queries on YARN. type PigJob struct { - // ContinueOnFailure: [Optional] Whether to continue executing queries - // if a query fails. The default value is `false`. Setting to `true` can - // be useful when executing independent parallel queries. + // ContinueOnFailure: Optional Whether to continue executing queries if + // a query fails. The default value is false. Setting to true can be + // useful when executing independent parallel queries. ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can - // contain Pig UDFs. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH + // of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig + // UDFs. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: [Optional] The runtime log config for job execution. + // LoggingConfig: Optional The runtime log config for job execution. LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Pig. Properties that conflict with values set by the Cloud // Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and @@ -1444,8 +1523,8 @@ type PigJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values - // (equivalent to the Pig command: `name=[value]`). + // ScriptVariables: Optional Mapping of query variable names to values + // (equivalent to the Pig command: name=[value]). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") @@ -1472,42 +1551,42 @@ func (s *PigJob) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PySparkJob: A Cloud Dataproc job for running [Apache -// PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide. -// html) applications on YARN. +// PySparkJob: A Cloud Dataproc job for running Apache PySpark +// (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) +// applications on YARN. type PySparkJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include - // arguments, such as `--conf`, that can be set as job properties, since - // a collision may occur that causes an incorrect job submission. + // Args: Optional The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Python drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATHs of the Python driver and tasks. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs + // of the Python driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: [Optional] The runtime log config for job execution. + // LoggingConfig: Optional The runtime log config for job execution. LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // MainPythonFileUri: [Required] The HCFS URI of the main Python file to + // MainPythonFileUri: Required The HCFS URI of the main Python file to // use as the driver. Must be a .py file. MainPythonFileUri string `json:"mainPythonFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure PySpark. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `json:"properties,omitempty"` - // PythonFileUris: [Optional] HCFS file URIs of Python files to pass to + // PythonFileUris: Optional HCFS file URIs of Python files to pass to // the PySpark framework. Supported file types: .py, .egg, and .zip. PythonFileUris []string `json:"pythonFileUris,omitempty"` @@ -1536,12 +1615,21 @@ func (s *PySparkJob) MarshalJSON() ([]byte, error) { // QueryList: A list of queries to run on a cluster. type QueryList struct { - // Queries: [Required] The queries to execute. You do not need to + // Queries: Required The queries to execute. You do not need to // terminate a query with a semicolon. Multiple queries can be specified // in one string by separating each with a semicolon. Here is an example // of an Cloud Dataproc API snippet that uses a QueryList to specify a - // HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", - // "query3;query4", ] } } + // HiveJob: + // "hiveJob": { + // "queryList": { + // "queries": [ + // "query1", + // "query2", + // "query3;query4", + // ] + // } + // } + // Queries []string `json:"queries,omitempty"` // ForceSendFields is a list of field names (e.g. "Queries") to @@ -1570,18 +1658,22 @@ func (s *QueryList) MarshalJSON() ([]byte, error) { // SoftwareConfig: Specifies the selection and config of software inside // the cluster. type SoftwareConfig struct { - // ImageVersion: [Optional] The version of software inside the cluster. - // It must match the regular expression `[0-9]+\.[0-9]+`. If - // unspecified, it defaults to the latest version (see [Cloud Dataproc - // Versioning](/dataproc/versioning)). + // ImageVersion: Optional The version of software inside the cluster. It + // must match the regular expression [0-9]+\.[0-9]+. If unspecified, it + // defaults to the latest version (see Cloud Dataproc Versioning). ImageVersion string `json:"imageVersion,omitempty"` - // Properties: [Optional] The properties to set on daemon config files. - // Property keys are specified in `prefix:property` format, such as - // `core:fs.defaultFS`. The following are supported prefixes and their - // mappings: * core: `core-site.xml` * hdfs: `hdfs-site.xml` * mapred: - // `mapred-site.xml` * yarn: `yarn-site.xml` * hive: `hive-site.xml` * - // pig: `pig.properties` * spark: `spark-defaults.conf` + // Properties: Optional The properties to set on daemon config + // files.Property keys are specified in prefix:property format, such as + // core:fs.defaultFS. The following are supported prefixes and their + // mappings: + // core: core-site.xml + // hdfs: hdfs-site.xml + // mapred: mapred-site.xml + // yarn: yarn-site.xml + // hive: hive-site.xml + // pig: pig.properties + // spark: spark-defaults.conf Properties map[string]string `json:"properties,omitempty"` // ForceSendFields is a list of field names (e.g. "ImageVersion") to @@ -1607,41 +1699,41 @@ func (s *SoftwareConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SparkJob: A Cloud Dataproc job for running [Apache -// Spark](http://spark.apache.org/) applications on YARN. +// SparkJob: A Cloud Dataproc job for running Apache Spark +// (http://spark.apache.org/) applications on YARN. type SparkJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include - // arguments, such as `--conf`, that can be set as job properties, since - // a collision may occur that causes an incorrect job submission. + // Args: Optional The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Spark drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATHs of the Spark driver and tasks. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs + // of the Spark driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: [Optional] The runtime log config for job execution. + // LoggingConfig: Optional The runtime log config for job execution. LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` // MainClass: The name of the driver's main class. The jar file that // contains the class must be in the default CLASSPATH or specified in - // `jar_file_uris`. + // jar_file_uris. MainClass string `json:"mainClass,omitempty"` // MainJarFileUri: The HCFS URI of the jar file that contains the main // class. MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Spark. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. @@ -1670,17 +1762,17 @@ func (s *SparkJob) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SparkSqlJob: A Cloud Dataproc job for running [Apache Spark -// SQL](http://spark.apache.org/sql/) queries. +// SparkSqlJob: A Cloud Dataproc job for running Apache Spark SQL +// (http://spark.apache.org/sql/) queries. type SparkSqlJob struct { - // JarFileUris: [Optional] HCFS URIs of jar files to be added to the - // Spark CLASSPATH. + // JarFileUris: Optional HCFS URIs of jar files to be added to the Spark + // CLASSPATH. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfig: [Optional] The runtime log config for job execution. + // LoggingConfig: Optional The runtime log config for job execution. LoggingConfig *LoggingConfig `json:"loggingConfig,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Spark SQL's SparkConf. Properties that conflict with values // set by the Cloud Dataproc API may be overwritten. Properties map[string]string `json:"properties,omitempty"` @@ -1691,8 +1783,8 @@ type SparkSqlJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values - // (equivalent to the Spark SQL command: SET `name="value";`). + // ScriptVariables: Optional Mapping of query variable names to values + // (equivalent to the Spark SQL command: SET name="value";). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` // ForceSendFields is a list of field names (e.g. "JarFileUris") to @@ -1718,42 +1810,45 @@ func (s *SparkSqlJob) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Status: The `Status` type defines a logical error model that is +// Status: The Status type defines a logical error model that is // suitable for different programming environments, including REST APIs -// and RPC APIs. It is used by [gRPC](https://github.com/grpc). The -// error model is designed to be: - Simple to use and understand for -// most users - Flexible enough to meet unexpected needs # Overview The -// `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// google.rpc.Code, but it may accept additional error codes if needed. -// The error message should be a developer-facing English message that -// helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the -// error details or localize it in the client. The optional error -// details may contain arbitrary information about the error. There is a -// predefined set of error detail types in the package `google.rpc` -// which can be used for common error conditions. # Language mapping The -// `Status` message is the logical representation of the error model, -// but it is not necessarily the actual wire format. When the `Status` -// message is exposed in different client libraries and different wire -// protocols, it can be mapped differently. For example, it will likely -// be mapped to some exceptions in Java, but more likely mapped to some -// error codes in C. # Other uses The error model and the `Status` -// message can be used in a variety of environments, either with or -// without APIs, to provide a consistent developer experience across -// different environments. Example uses of this error model include: - +// and RPC APIs. It is used by gRPC (https://github.com/grpc). The error +// model is designed to be: +// Simple to use and understand for most users +// Flexible enough to meet unexpected needsOverviewThe Status message +// contains three pieces of data: error code, error message, and error +// details. The error code should be an enum value of google.rpc.Code, +// but it may accept additional error codes if needed. The error message +// should be a developer-facing English message that helps developers +// understand and resolve the error. If a localized user-facing error +// message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of +// error detail types in the package google.rpc which can be used for +// common error conditions.Language mappingThe Status message is the +// logical representation of the error model, but it is not necessarily +// the actual wire format. When the Status message is exposed in +// different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions in Java, but more likely mapped to some error codes in +// C.Other usesThe error model and the Status message can be used in a +// variety of environments, either with or without APIs, to provide a +// consistent developer experience across different environments.Example +// uses of this error model include: // Partial errors. If a service needs to return partial errors to the -// client, it may embed the `Status` in the normal response to indicate -// the partial errors. - Workflow errors. A typical workflow has -// multiple steps. Each step may have a `Status` message for error -// reporting purpose. - Batch operations. If a client uses batch request -// and batch response, the `Status` message should be used directly -// inside batch response, one for each error sub-response. - +// client, it may embed the Status in the normal response to indicate +// the partial errors. +// Workflow errors. A typical workflow has multiple steps. Each step may +// have a Status message for error reporting purpose. +// Batch operations. If a client uses batch request and batch response, +// the Status message should be used directly inside batch response, one +// for each error sub-response. // Asynchronous operations. If an API call embeds asynchronous operation // results in its response, the status of those operations should be -// represented directly using the `Status` message. - Logging. If some -// API errors are stored in logs, the message `Status` could be used -// directly after any stripping needed for security/privacy reasons. +// represented directly using the Status message. +// Logging. If some API errors are stored in logs, the message Status +// could be used directly after any stripping needed for +// security/privacy reasons. type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. @@ -1793,7 +1888,7 @@ func (s *Status) MarshalJSON() ([]byte, error) { // SubmitJobRequest: A request to submit a job. type SubmitJobRequest struct { - // Job: [Required] The job resource. + // Job: Required The job resource. Job *Job `json:"job,omitempty"` // ForceSendFields is a list of field names (e.g. "Job") to @@ -1821,32 +1916,32 @@ func (s *SubmitJobRequest) MarshalJSON() ([]byte, error) { // YarnApplication: A YARN application created by a job. Application // information is a subset of -// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. -// **Beta Feature**: This report is available for testing purposes only. -// It may be changed before final release. +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes +// only. It may be changed before final release. type YarnApplication struct { - // Name: [Required] The application name. + // Name: Required The application name. Name string `json:"name,omitempty"` - // Progress: [Required] The numerical progress of the application, from - // 1 to 100. + // Progress: Required The numerical progress of the application, from 1 + // to 100. Progress float64 `json:"progress,omitempty"` - // State: [Required] The application state. + // State: Required The application state. // // Possible values: - // "STATE_UNSPECIFIED" - // "NEW" - // "NEW_SAVING" - // "SUBMITTED" - // "ACCEPTED" - // "RUNNING" - // "FINISHED" - // "FAILED" - // "KILLED" + // "STATE_UNSPECIFIED" - Status is unspecified. + // "NEW" - Status is NEW. + // "NEW_SAVING" - Status is NEW_SAVING. + // "SUBMITTED" - Status is SUBMITTED. + // "ACCEPTED" - Status is ACCEPTED. + // "RUNNING" - Status is RUNNING. + // "FINISHED" - Status is FINISHED. + // "FAILED" - Status is FAILED. + // "KILLED" - Status is KILLED. State string `json:"state,omitempty"` - // TrackingUrl: [Optional] The HTTP URL of the ApplicationMaster, + // TrackingUrl: Optional The HTTP URL of the ApplicationMaster, // HistoryServer, or TimelineServer that provides application-specific // information. The URL uses the internal hostname, and requires a proxy // server for resolution and, possibly, access. @@ -1941,6 +2036,7 @@ func (c *ProjectsRegionsClustersCreateCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cluster) if err != nil { @@ -1998,6 +2094,7 @@ func (c *ProjectsRegionsClustersCreateCall) Do(opts ...googleapi.CallOption) (*O return ret, nil // { // "description": "Creates a cluster in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/clusters", // "httpMethod": "POST", // "id": "dataproc.projects.regions.clusters.create", // "parameterOrder": [ @@ -2006,13 +2103,13 @@ func (c *ProjectsRegionsClustersCreateCall) Do(opts ...googleapi.CallOption) (*O // ], // "parameters": { // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -2084,6 +2181,7 @@ func (c *ProjectsRegionsClustersDeleteCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}") @@ -2137,6 +2235,7 @@ func (c *ProjectsRegionsClustersDeleteCall) Do(opts ...googleapi.CallOption) (*O return ret, nil // { // "description": "Deletes a cluster in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", // "httpMethod": "DELETE", // "id": "dataproc.projects.regions.clusters.delete", // "parameterOrder": [ @@ -2146,19 +2245,19 @@ func (c *ProjectsRegionsClustersDeleteCall) Do(opts ...googleapi.CallOption) (*O // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -2190,7 +2289,7 @@ type ProjectsRegionsClustersDiagnoseCall struct { // Diagnose: Gets cluster diagnostic information. After the operation // completes, the Operation.response field contains -// `DiagnoseClusterOutputLocation`. +// DiagnoseClusterOutputLocation. func (r *ProjectsRegionsClustersService) Diagnose(projectId string, region string, clusterName string, diagnoseclusterrequest *DiagnoseClusterRequest) *ProjectsRegionsClustersDiagnoseCall { c := &ProjectsRegionsClustersDiagnoseCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -2231,6 +2330,7 @@ func (c *ProjectsRegionsClustersDiagnoseCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.diagnoseclusterrequest) if err != nil { @@ -2288,7 +2388,8 @@ func (c *ProjectsRegionsClustersDiagnoseCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains `DiagnoseClusterOutputLocation`.", + // "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", // "httpMethod": "POST", // "id": "dataproc.projects.regions.clusters.diagnose", // "parameterOrder": [ @@ -2298,19 +2399,19 @@ func (c *ProjectsRegionsClustersDiagnoseCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -2393,6 +2494,7 @@ func (c *ProjectsRegionsClustersGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2449,6 +2551,7 @@ func (c *ProjectsRegionsClustersGetCall) Do(opts ...googleapi.CallOption) (*Clus return ret, nil // { // "description": "Gets the resource representation for a cluster in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", // "httpMethod": "GET", // "id": "dataproc.projects.regions.clusters.get", // "parameterOrder": [ @@ -2458,19 +2561,19 @@ func (c *ProjectsRegionsClustersGetCall) Do(opts ...googleapi.CallOption) (*Clus // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -2507,35 +2610,32 @@ func (r *ProjectsRegionsClustersService) List(projectId string, region string) * return c } -// Filter sets the optional parameter "filter": [Optional] A filter +// Filter sets the optional parameter "filter": Optional A filter // constraining the clusters to list. Filters are case-sensitive and -// have the following syntax: field:value [field:value] ... or field = -// value [AND [field = value]] ... where **field** is one of -// `status.state`, `clusterName`, or `labels.[KEY]`, and `[KEY]` is a -// label key. **value** can be `*` to match all values. `status.state` -// can be one of the following: `ACTIVE`, `INACTIVE`, `CREATING`, -// `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` contains the -// `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` contains the -// `DELETING` and `ERROR` states. `clusterName` is the name of the -// cluster provided at creation time. Only the logical `AND` operator is -// supported; space-separated items are treated as having an implicit -// `AND` operator. Example valid filters are: status.state:ACTIVE -// clusterName:mycluster labels.env:staging \ labels.starred:* and -// status.state = ACTIVE AND clusterName = mycluster \ AND labels.env = -// staging AND labels.starred = * +// have the following syntax:field = value AND field = value ...where +// field is one of status.state, clusterName, or labels.[KEY], and [KEY] +// is a label key. value can be * to match all values. status.state can +// be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, +// DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and +// RUNNING states. INACTIVE contains the DELETING and ERROR states. +// clusterName is the name of the cluster provided at creation time. +// Only the logical AND operator is supported; space-separated items are +// treated as having an implicit AND operator.Example +// filter:status.state = ACTIVE AND clusterName = mycluster AND +// labels.env = staging AND labels.starred = * func (c *ProjectsRegionsClustersListCall) Filter(filter string) *ProjectsRegionsClustersListCall { c.urlParams_.Set("filter", filter) return c } -// PageSize sets the optional parameter "pageSize": [Optional] The +// PageSize sets the optional parameter "pageSize": Optional The // standard List page size. func (c *ProjectsRegionsClustersListCall) PageSize(pageSize int64) *ProjectsRegionsClustersListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": [Optional] The +// PageToken sets the optional parameter "pageToken": Optional The // standard List page token. func (c *ProjectsRegionsClustersListCall) PageToken(pageToken string) *ProjectsRegionsClustersListCall { c.urlParams_.Set("pageToken", pageToken) @@ -2583,6 +2683,7 @@ func (c *ProjectsRegionsClustersListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2638,6 +2739,7 @@ func (c *ProjectsRegionsClustersListCall) Do(opts ...googleapi.CallOption) (*Lis return ret, nil // { // "description": "Lists all regions/{region}/clusters in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/clusters", // "httpMethod": "GET", // "id": "dataproc.projects.regions.clusters.list", // "parameterOrder": [ @@ -2646,29 +2748,29 @@ func (c *ProjectsRegionsClustersListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "filter": { - // "description": "[Optional] A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax: field:value [field:value] ... or field = value [AND [field = value]] ... where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match all values. `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` contains the `DELETING` and `ERROR` states. `clusterName` is the name of the cluster provided at creation time. Only the logical `AND` operator is supported; space-separated items are treated as having an implicit `AND` operator. Example valid filters are: status.state:ACTIVE clusterName:mycluster labels.env:staging \\ labels.starred:* and status.state = ACTIVE AND clusterName = mycluster \\ AND labels.env = staging AND labels.starred = *", + // "description": "Optional A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING and ERROR states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "[Optional] The standard List page size.", + // "description": "Optional The standard List page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "[Optional] The standard List page token.", + // "description": "Optional The standard List page token.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -2729,20 +2831,34 @@ func (r *ProjectsRegionsClustersService) Patch(projectId string, region string, return c } -// UpdateMask sets the optional parameter "updateMask": [Required] -// Specifies the path, relative to Cluster, of the field to update. For -// example, to change the number of workers in a cluster to 5, the -// update_mask parameter would be specified as -// config.worker_config.num_instances, and the `PATCH` request body -// would specify the new value, as follows: { "config":{ -// "workerConfig":{ "numInstances":"5" } } } Similarly, to change the -// number of preemptible workers in a cluster to 5, the update_mask -// parameter would be config.secondary_worker_config.num_instances, and -// the `PATCH` request body would be set as follows: { "config":{ -// "secondaryWorkerConfig":{ "numInstances":"5" } } } Note: Currently, -// config.worker_config.num_instances and -// config.secondary_worker_config.num_instances are the only fields that -// can be updated. +// UpdateMask sets the optional parameter "updateMask": Required +// Specifies the path, relative to Cluster, of the field to +// update. For example, to change the number of workers in a cluster to +// 5, the update_mask parameter would be specified as +// config.worker_config.num_instances, and the PATCH +// request body would specify the new value, as follows: +// { +// "config":{ +// "workerConfig":{ +// "numInstances":"5" +// } +// } +// } +// Similarly, to change the number of preemptible workers in a cluster +// to 5, the update_mask parameter would be +// config.secondary_worker_config.num_instances, and the +// PATCH request body would be set as follows: +// { +// "config":{ +// "secondaryWorkerConfig":{ +// "numInstances":"5" +// } +// } +// } +// Note: Currently, +// config.worker_config.num_instances and +// config.secondary_worker_config.num_instances are the +// only fields that can be updated. func (c *ProjectsRegionsClustersPatchCall) UpdateMask(updateMask string) *ProjectsRegionsClustersPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -2779,6 +2895,7 @@ func (c *ProjectsRegionsClustersPatchCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cluster) if err != nil { @@ -2837,6 +2954,7 @@ func (c *ProjectsRegionsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Op return ret, nil // { // "description": "Updates a cluster in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", // "httpMethod": "PATCH", // "id": "dataproc.projects.regions.clusters.patch", // "parameterOrder": [ @@ -2846,25 +2964,26 @@ func (c *ProjectsRegionsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Op // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "[Required] Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the `PATCH` request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the `PATCH` request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } Note: Currently, config.worker_config.num_instances and config.secondary_worker_config.num_instances are the only fields that can be updated.", + // "description": "Required Specifies the path, relative to \u003ccode\u003eCluster\u003c/code\u003e, of the field to update. For example, to change the number of workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003econfig.worker_config.num_instances\u003c/code\u003e, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003econfig.worker_config.num_instances\u003c/code\u003e and \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e are the only fields that can be updated.", + // "format": "google-fieldmask", // "location": "query", // "type": "string" // } @@ -2897,11 +3016,8 @@ type ProjectsRegionsJobsCancelCall struct { } // Cancel: Starts a job cancellation request. To access the job resource -// after cancellation, call -// [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regi -// ons.jobs/list) or -// [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regio -// ns.jobs/get). +// after cancellation, call regions/{region}/jobs.list or +// regions/{region}/jobs.get. func (r *ProjectsRegionsJobsService) Cancel(projectId string, region string, jobId string, canceljobrequest *CancelJobRequest) *ProjectsRegionsJobsCancelCall { c := &ProjectsRegionsJobsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -2942,6 +3058,7 @@ func (c *ProjectsRegionsJobsCancelCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceljobrequest) if err != nil { @@ -2999,7 +3116,8 @@ func (c *ProjectsRegionsJobsCancelCall) Do(opts ...googleapi.CallOption) (*Job, } return ret, nil // { - // "description": "Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get).", + // "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", // "httpMethod": "POST", // "id": "dataproc.projects.regions.jobs.cancel", // "parameterOrder": [ @@ -3009,19 +3127,19 @@ func (c *ProjectsRegionsJobsCancelCall) Do(opts ...googleapi.CallOption) (*Job, // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3054,7 +3172,7 @@ type ProjectsRegionsJobsDeleteCall struct { } // Delete: Deletes the job from the project. If the job is active, the -// delete fails, and the response returns `FAILED_PRECONDITION`. +// delete fails, and the response returns FAILED_PRECONDITION. func (r *ProjectsRegionsJobsService) Delete(projectId string, region string, jobId string) *ProjectsRegionsJobsDeleteCall { c := &ProjectsRegionsJobsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -3094,6 +3212,7 @@ func (c *ProjectsRegionsJobsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/regions/{region}/jobs/{jobId}") @@ -3146,7 +3265,8 @@ func (c *ProjectsRegionsJobsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty } return ret, nil // { - // "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", + // "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", // "httpMethod": "DELETE", // "id": "dataproc.projects.regions.jobs.delete", // "parameterOrder": [ @@ -3156,19 +3276,19 @@ func (c *ProjectsRegionsJobsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3248,6 +3368,7 @@ func (c *ProjectsRegionsJobsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3304,6 +3425,7 @@ func (c *ProjectsRegionsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, err return ret, nil // { // "description": "Gets the resource representation for a job in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", // "httpMethod": "GET", // "id": "dataproc.projects.regions.jobs.get", // "parameterOrder": [ @@ -3313,19 +3435,19 @@ func (c *ProjectsRegionsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, err // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3362,7 +3484,7 @@ func (r *ProjectsRegionsJobsService) List(projectId string, region string) *Proj return c } -// ClusterName sets the optional parameter "clusterName": [Optional] If +// ClusterName sets the optional parameter "clusterName": Optional If // set, the returned jobs list includes only jobs that were submitted to // the named cluster. func (c *ProjectsRegionsJobsListCall) ClusterName(clusterName string) *ProjectsRegionsJobsListCall { @@ -3370,23 +3492,22 @@ func (c *ProjectsRegionsJobsListCall) ClusterName(clusterName string) *ProjectsR return c } -// Filter sets the optional parameter "filter": [Optional] A filter +// Filter sets the optional parameter "filter": Optional A filter // constraining the jobs to list. Filters are case-sensitive and have -// the following syntax: field:value] ... or [field = value] AND [field -// [= value]] ... where **field** is `status.state` or `labels.[KEY]`, -// and `[KEY]` is a label key. **value** can be `*` to match all values. -// `status.state` can be either `ACTIVE` or `INACTIVE`. Only the logical -// `AND` operator is supported; space-separated items are treated as -// having an implicit `AND` operator. Example valid filters are: -// status.state:ACTIVE labels.env:staging labels.starred:* and -// status.state = ACTIVE AND labels.env = staging AND labels.starred = * +// the following syntax:field = value AND field = value ...where field +// is status.state or labels.[KEY], and [KEY] is a label key. value can +// be * to match all values. status.state can be either ACTIVE or +// INACTIVE. Only the logical AND operator is supported; space-separated +// items are treated as having an implicit AND operator.Example +// filter:status.state = ACTIVE AND labels.env = staging AND +// labels.starred = * func (c *ProjectsRegionsJobsListCall) Filter(filter string) *ProjectsRegionsJobsListCall { c.urlParams_.Set("filter", filter) return c } // JobStateMatcher sets the optional parameter "jobStateMatcher": -// [Optional] Specifies enumerated categories of jobs to list (default = +// Optional Specifies enumerated categories of jobs to list (default = // match ALL jobs). // // Possible values: @@ -3398,15 +3519,15 @@ func (c *ProjectsRegionsJobsListCall) JobStateMatcher(jobStateMatcher string) *P return c } -// PageSize sets the optional parameter "pageSize": [Optional] The -// number of results to return in each response. +// PageSize sets the optional parameter "pageSize": Optional The number +// of results to return in each response. func (c *ProjectsRegionsJobsListCall) PageSize(pageSize int64) *ProjectsRegionsJobsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": [Optional] The -// page token, returned by a previous call, to request the next page of +// PageToken sets the optional parameter "pageToken": Optional The page +// token, returned by a previous call, to request the next page of // results. func (c *ProjectsRegionsJobsListCall) PageToken(pageToken string) *ProjectsRegionsJobsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -3454,6 +3575,7 @@ func (c *ProjectsRegionsJobsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3509,6 +3631,7 @@ func (c *ProjectsRegionsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJob return ret, nil // { // "description": "Lists regions/{region}/jobs in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/jobs", // "httpMethod": "GET", // "id": "dataproc.projects.regions.jobs.list", // "parameterOrder": [ @@ -3517,17 +3640,17 @@ func (c *ProjectsRegionsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJob // ], // "parameters": { // "clusterName": { - // "description": "[Optional] If set, the returned jobs list includes only jobs that were submitted to the named cluster.", + // "description": "Optional If set, the returned jobs list includes only jobs that were submitted to the named cluster.", // "location": "query", // "type": "string" // }, // "filter": { - // "description": "[Optional] A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax: field:value] ... or [field = value] AND [field [= value]] ... where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match all values. `status.state` can be either `ACTIVE` or `INACTIVE`. Only the logical `AND` operator is supported; space-separated items are treated as having an implicit `AND` operator. Example valid filters are: status.state:ACTIVE labels.env:staging labels.starred:* and status.state = ACTIVE AND labels.env = staging AND labels.starred = *", + // "description": "Optional A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or INACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *", // "location": "query", // "type": "string" // }, // "jobStateMatcher": { - // "description": "[Optional] Specifies enumerated categories of jobs to list (default = match ALL jobs).", + // "description": "Optional Specifies enumerated categories of jobs to list (default = match ALL jobs).", // "enum": [ // "ALL", // "ACTIVE", @@ -3537,24 +3660,24 @@ func (c *ProjectsRegionsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJob // "type": "string" // }, // "pageSize": { - // "description": "[Optional] The number of results to return in each response.", + // "description": "Optional The number of results to return in each response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "[Optional] The page token, returned by a previous call, to request the next page of results.", + // "description": "Optional The page token, returned by a previous call, to request the next page of results.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3592,6 +3715,179 @@ func (c *ProjectsRegionsJobsListCall) Pages(ctx context.Context, f func(*ListJob } } +// method id "dataproc.projects.regions.jobs.patch": + +type ProjectsRegionsJobsPatchCall struct { + s *Service + projectId string + region string + jobId string + job *Job + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a job in a project. +func (r *ProjectsRegionsJobsService) Patch(projectId string, region string, jobId string, job *Job) *ProjectsRegionsJobsPatchCall { + c := &ProjectsRegionsJobsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.region = region + c.jobId = jobId + c.job = job + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required +// Specifies the path, relative to Job, of the field to +// update. For example, to update the labels of a Job the +// update_mask parameter would be specified as +// labels, and the PATCH request body would specify the new +// value. Note: Currently, labels is the +// only field that can be updated. +func (c *ProjectsRegionsJobsPatchCall) UpdateMask(updateMask string) *ProjectsRegionsJobsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsRegionsJobsPatchCall) Fields(s ...googleapi.Field) *ProjectsRegionsJobsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsRegionsJobsPatchCall) Context(ctx context.Context) *ProjectsRegionsJobsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsRegionsJobsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsRegionsJobsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/regions/{region}/jobs/{jobId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "region": c.region, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.regions.jobs.patch" call. +// Exactly one of *Job or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Job.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsRegionsJobsPatchCall) Do(opts ...googleapi.CallOption) (*Job, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Job{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a job in a project.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + // "httpMethod": "PATCH", + // "id": "dataproc.projects.regions.jobs.patch", + // "parameterOrder": [ + // "projectId", + // "region", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "Required The job ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Required The Cloud Dataproc region in which to handle the request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Required Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", + // "request": { + // "$ref": "Job" + // }, + // "response": { + // "$ref": "Job" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "dataproc.projects.regions.jobs.submit": type ProjectsRegionsJobsSubmitCall struct { @@ -3644,6 +3940,7 @@ func (c *ProjectsRegionsJobsSubmitCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.submitjobrequest) if err != nil { @@ -3701,6 +3998,7 @@ func (c *ProjectsRegionsJobsSubmitCall) Do(opts ...googleapi.CallOption) (*Job, return ret, nil // { // "description": "Submits a job to a cluster.", + // "flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submit", // "httpMethod": "POST", // "id": "dataproc.projects.regions.jobs.submit", // "parameterOrder": [ @@ -3709,13 +4007,13 @@ func (c *ProjectsRegionsJobsSubmitCall) Do(opts ...googleapi.CallOption) (*Job, // ], // "parameters": { // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Cloud Dataproc region in which to handle the request.", + // "description": "Required The Cloud Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3748,13 +4046,13 @@ type ProjectsRegionsOperationsCancelCall struct { // Cancel: Starts asynchronous cancellation on a long-running operation. // The server makes a best effort to cancel the operation, but success // is not guaranteed. If the server doesn't support this method, it -// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// returns google.rpc.Code.UNIMPLEMENTED. Clients can use // Operations.GetOperation or other methods to check whether the // cancellation succeeded or whether the operation completed despite // cancellation. On successful cancellation, the operation is not // deleted; instead, it becomes an operation with an Operation.error // value with a google.rpc.Status.code of 1, corresponding to -// `Code.CANCELLED`. +// Code.CANCELLED. func (r *ProjectsRegionsOperationsService) Cancel(name string) *ProjectsRegionsOperationsCancelCall { c := &ProjectsRegionsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3792,6 +4090,7 @@ func (c *ProjectsRegionsOperationsCancelCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") @@ -3842,7 +4141,8 @@ func (c *ProjectsRegionsOperationsCancelCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "dataproc.projects.regions.operations.cancel", // "parameterOrder": [ @@ -3881,7 +4181,7 @@ type ProjectsRegionsOperationsDeleteCall struct { // Delete: Deletes a long-running operation. This method indicates that // the client is no longer interested in the operation result. It does // not cancel the operation. If the server doesn't support this method, -// it returns `google.rpc.Code.UNIMPLEMENTED`. +// it returns google.rpc.Code.UNIMPLEMENTED. func (r *ProjectsRegionsOperationsService) Delete(name string) *ProjectsRegionsOperationsDeleteCall { c := &ProjectsRegionsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3919,6 +4219,7 @@ func (c *ProjectsRegionsOperationsDeleteCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") @@ -3969,7 +4270,8 @@ func (c *ProjectsRegionsOperationsDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", + // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "dataproc.projects.regions.operations.delete", // "parameterOrder": [ @@ -4056,6 +4358,7 @@ func (c *ProjectsRegionsOperationsGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4110,6 +4413,7 @@ func (c *ProjectsRegionsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Op return ret, nil // { // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "dataproc.projects.regions.operations.get", // "parameterOrder": [ @@ -4148,9 +4452,9 @@ type ProjectsRegionsOperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `UNIMPLEMENTED`. NOTE: the `name` binding below allows API services -// to override the binding to use different resource name schemes, such -// as `users/*/operations`. +// UNIMPLEMENTED.NOTE: the name binding below allows API services to +// override the binding to use different resource name schemes, such as +// users/*/operations. func (r *ProjectsRegionsOperationsService) List(name string) *ProjectsRegionsOperationsListCall { c := &ProjectsRegionsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4219,6 +4523,7 @@ func (c *ProjectsRegionsOperationsListCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4272,7 +4577,8 @@ func (c *ProjectsRegionsOperationsListCall) Do(opts ...googleapi.CallOption) (*L } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding below allows API services to override the binding to use different resource name schemes, such as `users/*/operations`.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.", + // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", // "httpMethod": "GET", // "id": "dataproc.projects.regions.operations.list", // "parameterOrder": [ diff --git a/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-api.json b/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-api.json index 43865b59f..9ba049e60 100644 --- a/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-api.json +++ b/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-api.json @@ -1,1822 +1,2029 @@ { - "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/r7FI1XKe3nwqujSEPgINO6f6aJQ\"", - "discoveryVersion": "v1", - "id": "dataproc:v1alpha1", - "name": "dataproc", - "version": "v1alpha1", - "revision": "20161102", - "title": "Google Cloud Dataproc API", - "description": "An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/dataproc/", - "protocol": "rest", - "baseUrl": "https://dataproc.googleapis.com/", - "basePath": "", - "rootUrl": "https://dataproc.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" - }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" - }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" - }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "schemas": { - "Cluster": { - "id": "Cluster", - "type": "object", - "description": "Describes the identifying information, configuration, and status of a cluster of Google Compute Engine instances.", - "properties": { - "projectId": { - "type": "string", - "description": "[Required] The Google Cloud Platform project ID that the cluster belongs to." - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name. Cluster names within a project must be unique. Names from deleted clusters can be reused." - }, - "configuration": { - "$ref": "ClusterConfiguration", - "description": "[Required] The cluster configuration. It may differ from a user's initial configuration due to Cloud Dataproc setting of default values and updating clusters." - }, - "labels": { - "type": "object", - "description": "[Optional] The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given cluster.", - "additionalProperties": { - "type": "string" - } - }, - "status": { - "$ref": "ClusterStatus", - "description": "[Output-only] Cluster status." - }, - "statusHistory": { - "type": "array", - "description": "[Output-only] Previous cluster statuses.", - "items": { - "$ref": "ClusterStatus" - } - }, - "createTime": { - "type": "string", - "description": "[Output-only] The timestamp of cluster creation." - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster." - }, - "metrics": { - "$ref": "ClusterMetrics", - "description": "Contains cluster daemon metrics such as HDFS and YARN stats." - } - } - }, - "ClusterConfiguration": { - "id": "ClusterConfiguration", - "type": "object", - "description": "The cluster configuration.", - "properties": { - "configurationBucket": { - "type": "string", - "description": "[Optional] A Google Cloud Storage staging bucket used for sharing generated SSH keys and configuration. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, then it will create and manage this project-level, per-location bucket for you." - }, - "gceConfiguration": { - "$ref": "GceConfiguration", - "description": "[Deprecated] The Google Compute Engine configuration settings for cluster resources." - }, - "numWorkers": { - "type": "integer", - "description": "[Deprecated] The number of worker nodes in the cluster.", - "format": "int32" - }, - "workers": { - "type": "array", - "description": "[Deprecated] The list of worker node names. Dataproc derives the names from cluster_name and num_workers if not set by user (recommended practice is to let Dataproc derive the name). Derived worker node name example: hadoop-w-0.", - "items": { - "type": "string" - } - }, - "masterName": { - "type": "string", - "description": "[Deprecated] The Master's hostname. Dataproc derives the name from cluster_name if not set by user (recommended practice is to let Dataproc derive the name). Derived master name example: hadoop-m." - }, - "masterDiskConfiguration": { - "$ref": "DiskConfiguration", - "description": "[Deprecated] The configuration settings of master node disk options." - }, - "workerDiskConfiguration": { - "$ref": "DiskConfiguration", - "description": "[Deprecated] The configuration settings of worker node disk options." - }, - "gceClusterConfiguration": { - "$ref": "GceClusterConfiguration", - "description": "[Optional] The shared Google Compute Engine configuration settings for all instances in a cluster." - }, - "masterConfiguration": { - "$ref": "InstanceGroupConfiguration", - "description": "[Optional] The Google Compute Engine configuration settings for the master instance in a cluster." - }, - "workerConfiguration": { - "$ref": "InstanceGroupConfiguration", - "description": "[Optional] The Google Compute Engine configuration settings for worker instances in a cluster." - }, - "secondaryWorkerConfiguration": { - "$ref": "InstanceGroupConfiguration", - "description": "[Optional] The Google Compute Engine configuration settings for additional worker instances in a cluster." - }, - "softwareConfiguration": { - "$ref": "SoftwareConfiguration", - "description": "[Optional] The configuration settings for software inside the cluster." - }, - "initializationActions": { - "type": "array", - "description": "[Optional] Commands to execute on each node after configuration is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below: ROLE=$(/usr/share/google/get_metadata_value attributes/role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", - "items": { - "$ref": "NodeInitializationAction" - } - } - } - }, - "GceConfiguration": { - "id": "GceConfiguration", - "type": "object", - "description": "[Deprecated] Common configuration settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", - "properties": { - "imageUri": { - "type": "string", - "description": "[Deprecated] The Google Compute Engine image resource used for cluster instances. Example: \"compute.googleapis.com/projects/debian-cloud /global/images/backports-debian-7-wheezy-v20140904\"." - }, - "machineTypeUri": { - "type": "string", - "description": "[Deprecated] The Google Compute Engine machine type used for cluster instances. Example: \"compute.googleapis.com/projects/[project_id] /zones/us-east1-a/machineTypes/n1-standard-2\"." - }, - "zoneUri": { - "type": "string", - "description": "[Deprecated] The zone where the Google Compute Engine cluster will be located. Example: \"compute.googleapis.com/projects/[project_id] /zones/us-east1-a\"." - }, - "networkUri": { - "type": "string", - "description": "[Deprecated] The Google Compute Engine network to be used for machine communications. Inbound SSH connections are necessary to complete cluster configuration. Example \"compute.googleapis.com/projects/[project_id] /zones/us-east1-a/default\"." - }, - "serviceAccountScopes": { - "type": "array", - "description": "[Deprecated] The service account scopes included in Google Compute Engine instances. Must include devstorage.full_control to enable the Google Cloud Storage connector. Example \"auth.googleapis.com/compute\" and \"auth.googleapis.com/devstorage.full_control\".", - "items": { - "type": "string" - } - } - } - }, - "DiskConfiguration": { - "id": "DiskConfiguration", - "type": "object", - "description": "Specifies the configuration of disk options for a group of VM instances.", - "properties": { - "bootDiskSizeGb": { - "type": "integer", - "description": "[Optional] Size in GB of the boot disk (default is 500GB).", - "format": "int32" - }, - "numLocalSsds": { - "type": "integer", - "description": "[Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs, and HDFS data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic configuration and installed binaries.", - "format": "int32" - } - } - }, - "GceClusterConfiguration": { - "id": "GceClusterConfiguration", - "type": "object", - "description": "Common configuration settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", - "properties": { - "zoneUri": { - "type": "string", - "description": "[Required] The zone where the Google Compute Engine cluster will be located. Example: \"compute.googleapis.com/projects/[project_id] /zones/us-east1-a\"." - }, - "networkUri": { - "type": "string", - "description": "The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see https://cloud.google.com/compute/docs/subnetworks for more information). Example: `compute.googleapis.com/projects/[project_id]/regions/global/default`." - }, - "subnetworkUri": { - "type": "string", - "description": "The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: `compute.googleapis.com/projects/[project_id]/regions/us-east1/sub0`." - }, - "internalIpOnly": { - "type": "boolean", - "description": "If true, all instances in the cluser will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses." - }, - "serviceAccountScopes": { - "type": "array", - "description": "The service account scopes included in Google Compute Engine instances. Must include devstorage.full_control to enable the Google Cloud Storage connector. Example \"auth.googleapis.com/compute\" and \"auth.googleapis.com/devstorage.full_control\".", - "items": { - "type": "string" - } - }, - "tags": { - "type": "array", - "description": "The Google Compute Engine tags to add to all instances.", - "items": { - "type": "string" - } - }, - "metadata": { - "type": "object", - "description": "The Google Compute Engine metadata entries to add to all instances.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "InstanceGroupConfiguration": { - "id": "InstanceGroupConfiguration", - "type": "object", - "description": "The configuration settings for Google Compute Engine resources in an instance group, such as a master or worker group.", - "properties": { - "numInstances": { - "type": "integer", - "description": "The number of VM instances in the instance group. For master instance groups, must be set to 1.", - "format": "int32" - }, - "instanceNames": { - "type": "array", - "description": "The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group if not set by user (recommended practice is to let Dataproc derive the name).", - "items": { - "type": "string" - } - }, - "imageUri": { - "type": "string", - "description": "[Output-only] The Google Compute Engine image resource used for cluster instances. Inferred from SoftwareConfiguration.image_version. Example: \"compute.googleapis.com/projects/debian-cloud /global/images/backports-debian-7-wheezy-v20140904\"." - }, - "machineTypeUri": { - "type": "string", - "description": "The Google Compute Engine machine type used for cluster instances. Example: \"compute.googleapis.com/projects/[project_id] /zones/us-east1-a/machineTypes/n1-standard-2\"." - }, - "diskConfiguration": { - "$ref": "DiskConfiguration", - "description": "Disk option configuration settings." - }, - "isPreemptible": { - "type": "boolean", - "description": "Specifies that this instance group contains Preemptible Instances." - }, - "managedGroupConfiguration": { - "$ref": "ManagedGroupConfiguration", - "description": "[Output-only] The configuration for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups." + "version": "v1alpha1", + "baseUrl": "https://dataproc.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } - } }, - "ManagedGroupConfiguration": { - "id": "ManagedGroupConfiguration", - "type": "object", - "description": "Specifies the resources used to actively manage an instance group.", - "properties": { - "instanceTemplateName": { - "type": "string", - "description": "[Output-only] The name of Instance Template used for Managed Instance Group." + "servicePath": "", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "kind": "discovery#restDescription", + "rootUrl": "https://dataproc.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "dataproc", + "batchPath": "batch", + "revision": "20170214", + "documentationLink": "https://cloud.google.com/dataproc/", + "id": "dataproc:v1alpha1", + "title": "Google Cloud Dataproc API", + "discoveryVersion": "v1", + "ownerName": "Google", + "resources": { + "operations": { + "methods": { + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^operations/.+$", + "location": "path", + "description": "The operation resource name." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha1/operations/{operationsId}", + "id": "dataproc.operations.get", + "path": "v1alpha1/{+name}", + "description": "Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service." + }, + "list": { + "response": { + "$ref": "ListOperationsResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^operations$", + "location": "path", + "description": "The operation collection name." + }, + "pageToken": { + "description": "The standard List page token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "description": "The standard List page size.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "filter": { + "location": "query", + "description": "Required A JSON object that contains filters for the list operation, in the format {\"key1\":\"value1\",\"key2\":\"value2\", ..., }. Possible keys include project_id, cluster_name, and operation_state_matcher.If project_id is set, requests the list of operations that belong to the specified Google Cloud Platform project ID. This key is required.If cluster_name is set, requests the list of operations that were submitted to the specified cluster name. This key is optional.If operation_state_matcher is set, requests the list of operations that match one of the following status options: ALL, ACTIVE, or NON_ACTIVE.", + "type": "string" + } + }, + "flatPath": "v1alpha1/operations", + "path": "v1alpha1/{+name}", + "id": "dataproc.operations.list", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED." + }, + "cancel": { + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "location": "path", + "description": "The name of the operation resource to be cancelled.", + "required": true, + "type": "string", + "pattern": "^operations/.+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha1/operations/{operationsId}:cancel", + "id": "dataproc.operations.cancel", + "path": "v1alpha1/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients may use Operations.GetOperation or other methods to check whether the cancellation succeeded or the operation completed despite cancellation." + }, + "delete": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "The name of the operation resource to be deleted.", + "required": true, + "type": "string", + "pattern": "^operations/.+$" + } + }, + "flatPath": "v1alpha1/operations/{operationsId}", + "path": "v1alpha1/{+name}", + "id": "dataproc.operations.delete", + "description": "Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation." + } + } }, - "instanceGroupManagerName": { - "type": "string", - "description": "[Output-only] The name of Instance Group Manager managing this group." + "projects": { + "resources": { + "regions": { + "resources": { + "jobs": { + "methods": { + "get": { + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "id": "dataproc.projects.regions.jobs.get", + "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "description": "Gets the resource representation for a job in a project.", + "httpMethod": "GET", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "response": { + "$ref": "Job" + }, + "parameters": { + "region": { + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "jobId": { + "description": "Required The job ID.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "httpMethod": "PATCH", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "region": { + "location": "path", + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "id": "dataproc.projects.regions.jobs.patch", + "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "description": "Updates a job in a project.", + "request": { + "$ref": "Job" + } + }, + "submit": { + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs:submit", + "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs:submit", + "id": "dataproc.projects.regions.jobs.submit", + "description": "Submits a job to a cluster.", + "request": { + "$ref": "SubmitJobRequest" + }, + "response": { + "$ref": "Job" + }, + "parameterOrder": [ + "projectId", + "region" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "region": { + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string", + "location": "path" + } + } + }, + "delete": { + "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", + "httpMethod": "DELETE", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + }, + "region": { + "location": "path", + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + "id": "dataproc.projects.regions.jobs.delete", + "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}" + }, + "list": { + "response": { + "$ref": "ListJobsResponse" + }, + "parameterOrder": [ + "projectId", + "region" + ], + "httpMethod": "POST", + "parameters": { + "region": { + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs:list", + "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs:list", + "id": "dataproc.projects.regions.jobs.list", + "request": { + "$ref": "ListJobsRequest" + }, + "description": "Lists regions/{region}/jobs in a project." + }, + "cancel": { + "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs:list or regions/{region}/jobs:get.", + "request": { + "$ref": "CancelJobRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "projectId", + "region", + "jobId" + ], + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "region": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The Dataproc region in which to handle the request." + }, + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", + "id": "dataproc.projects.regions.jobs.cancel", + "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel" + } + } + }, + "clusters": { + "methods": { + "create": { + "id": "dataproc.projects.regions.clusters.create", + "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters", + "description": "Request to create a cluster in a project.", + "request": { + "$ref": "Cluster" + }, + "httpMethod": "POST", + "parameterOrder": [ + "projectId", + "region" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string" + }, + "region": { + "location": "path", + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters" + }, + "delete": { + "parameters": { + "region": { + "location": "path", + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string" + }, + "clusterName": { + "description": "Required The cluster name.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "id": "dataproc.projects.regions.clusters.delete", + "description": "Request to delete a cluster in a project.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "httpMethod": "DELETE" + }, + "patch": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "httpMethod": "PATCH", + "parameters": { + "projectId": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The ID of the Google Cloud Platform project the cluster belongs to." + }, + "region": { + "location": "path", + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string" + }, + "updateMask": { + "location": "query", + "description": "Required Specifies the path, relative to \u003ccode\u003eCluster\u003c/code\u003e, of the field to update. For example, to change the number of workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003e\"configuration.worker_configuration.num_instances,\"\u003c/code\u003e and the PATCH request body would specify the new value, as follows:\n{\n \"configuration\":{\n \"workerConfiguration\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003econfiguration.worker_configuration.num_instances\u003c/code\u003e is the only field that can be updated.", + "format": "google-fieldmask", + "type": "string" + }, + "clusterName": { + "location": "path", + "description": "Required The cluster name.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "id": "dataproc.projects.regions.clusters.patch", + "request": { + "$ref": "Cluster" + }, + "description": "Request to update a cluster in a project." + }, + "get": { + "description": "Request to get the resource representation for a cluster in a project.", + "response": { + "$ref": "Cluster" + }, + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterName": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The cluster name." + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "region": { + "location": "path", + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", + "id": "dataproc.projects.regions.clusters.get" + }, + "list": { + "httpMethod": "GET", + "response": { + "$ref": "ListClustersResponse" + }, + "parameterOrder": [ + "projectId", + "region" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "The standard List page token.", + "type": "string" + }, + "pageSize": { + "description": "The standard List page size.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "projectId": { + "required": true, + "type": "string", + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to." + }, + "region": { + "location": "path", + "description": "Required The Dataproc region in which to handle the request.", + "required": true, + "type": "string" + }, + "filter": { + "description": "Optional A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters", + "id": "dataproc.projects.regions.clusters.list", + "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters", + "description": "Request a list of all regions/{region}/clusters in a project." + } + } + } + } + } + } } - } }, - "SoftwareConfiguration": { - "id": "SoftwareConfiguration", - "type": "object", - "description": "Specifies the selection and configuration of software inside the cluster.", - "properties": { - "imageVersion": { - "type": "string", - "description": "[Optional] The version of software inside the cluster. It must match the regular expression [0-9]+\\.[0-9]+. If unspecified it will default to latest version." - }, - "properties": { - "type": "object", - "description": "[Optional] The properties to set on daemon configuration files. Property keys are specified in \"prefix:property\" format, such as \"core:fs.defaultFS\". The following are supported prefixes and their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - pig.properties spark - spark-defaults.conf", - "additionalProperties": { + "parameters": { + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", "type": "string" - } - } - } - }, - "NodeInitializationAction": { - "id": "NodeInitializationAction", - "type": "object", - "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", - "properties": { - "executableFile": { - "type": "string", - "description": "[Required] Google Cloud Storage URI of executable file." - }, - "executionTimeout": { - "type": "string", - "description": "[Optional] Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period." - } - } - }, - "ClusterStatus": { - "id": "ClusterStatus", - "type": "object", - "description": "The status of a cluster and its instances.", - "properties": { - "state": { - "type": "string", - "description": "The cluster's state.", - "enum": [ - "UNKNOWN", - "CREATING", - "RUNNING", - "ERROR", - "DELETING", - "UPDATING" - ] - }, - "detail": { - "type": "string", - "description": "Optional details of cluster's state." }, - "stateStartTime": { - "type": "string", - "description": "Time when this state was entered." - } - } - }, - "ClusterMetrics": { - "id": "ClusterMetrics", - "type": "object", - "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.", - "properties": { - "hdfsMetrics": { - "type": "object", - "description": "The HDFS metrics.", - "additionalProperties": { + "oauth_token": { "type": "string", - "format": "int64" - } + "location": "query", + "description": "OAuth 2.0 token for the current user." }, - "yarnMetrics": { - "type": "object", - "description": "The YARN metrics.", - "additionalProperties": { + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", - "format": "int64" - } - } - } - }, - "Operation": { - "id": "Operation", - "type": "object", - "description": "An asynchronous operation in a project that runs over a given cluster. Used to track the progress of a user request that is running asynchronously. Examples include creating a cluster, updating a cluster, and deleting a cluster.", - "properties": { - "name": { - "type": "string", - "description": "The name of the operation resource, in the format projects/[project_id]/operations/[operation_id]" - }, - "metadata": { - "type": "object", - "description": "Service-specific metadata associated with the operation.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - }, - "done": { - "type": "boolean", - "description": "Indicates if the operation is done. If true, the operation is complete and the `result` is available. If false, the operation is still in progress." - }, - "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure." - }, - "response": { - "type": "object", - "description": "The operation response. If the called method returns no data on success, the response is `google.protobuf.Empty`. If the called method is `Get`,`Create` or `Update`, the response is the resource. For all other methods, the response type is a concatenation of the method name and \"Response\". For example, if the called method is `TakeSnapshot()`, the response type is `TakeSnapshotResponse`.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - }, - "Status": { - "id": "Status", - "type": "object", - "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). The error model is designed to be: - Simple to use and understand for most users - Flexible enough to meet unexpected needs # Overview The `Status` message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers *understand* and *resolve* the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package `google.rpc` which can be used for common error conditions. # Language mapping The `Status` message is the logical representation of the error model, but it is not necessarily the actual wire format. When the `Status` message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C. # Other uses The error model and the `Status` message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments. Example uses of this error model include: - Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors. - Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose. - Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response. - Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message. - Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.", - "properties": { - "code": { - "type": "integer", - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32" - }, - "message": { - "type": "string", - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client." - }, - "details": { - "type": "array", - "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", - "items": { - "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - } - }, - "ListClustersResponse": { - "id": "ListClustersResponse", - "type": "object", - "description": "The list of all clusters in a project.", - "properties": { - "clusters": { - "type": "array", - "description": "[Output-only] The clusters in the project.", - "items": { - "$ref": "Cluster" - } - }, - "nextPageToken": { - "type": "string", - "description": "The standard List next-page token." - } - } - }, - "SubmitJobRequest": { - "id": "SubmitJobRequest", - "type": "object", - "description": "A job submission request.", - "properties": { - "job": { - "$ref": "Job", - "description": "[Required] The job resource." - } - } - }, - "Job": { - "id": "Job", - "type": "object", - "description": "A Cloud Dataproc job resource.", - "properties": { - "reference": { - "$ref": "JobReference", - "description": "[Optional] The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." - }, - "placement": { - "$ref": "JobPlacement", - "description": "[Required] Job information, including how, when, and where to run the job." - }, - "hadoopJob": { - "$ref": "HadoopJob", - "description": "Job is a Hadoop job." - }, - "sparkJob": { - "$ref": "SparkJob", - "description": "Job is a Spark job." - }, - "pysparkJob": { - "$ref": "PySparkJob", - "description": "Job is a Pyspark job." + "location": "query" }, - "hiveJob": { - "$ref": "HiveJob", - "description": "Job is a Hive job." + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "pigJob": { - "$ref": "PigJob", - "description": "Job is a Pig job." - }, - "sparkSqlJob": { - "$ref": "SparkSqlJob", - "description": "Job is a SparkSql job." - }, - "status": { - "$ref": "JobStatus", - "description": "[Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields." - }, - "statusHistory": { - "type": "array", - "description": "[Output-only] The previous job status.", - "items": { - "$ref": "JobStatus" - } - }, - "yarnApplications": { - "type": "array", - "description": "[Output-only] The collection of Yarn applications spun up by this job.", - "items": { - "$ref": "YarnApplication" - } - }, - "submittedBy": { - "type": "string", - "description": "[Output-only] The email address of the user submitting the job. For jobs submitted on the cluster, the address is username@hostname." - }, - "driverOutputUri": { - "type": "string", - "description": "[Output-only] A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, gs://sysbucket123/foo-cluster/jobid-123/driver/output." - }, - "driverInputResourceUri": { - "type": "string", - "description": "[Output-only] A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive." - }, - "driverOutputResourceUri": { - "type": "string", - "description": "[Output-only] A URI pointing to the location of the stdout of the job's driver program." - }, - "driverControlFilesUri": { - "type": "string", - "description": "[Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri." - }, - "interactive": { - "type": "boolean", - "description": "[Optional] If set to true, then the driver's stdin will be kept open and driver_input_uri will be set to provide a path at which additional input can be sent to the driver." - }, - "labels": { - "type": "object", - "description": "[Optional] The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given job.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "JobReference": { - "id": "JobReference", - "type": "object", - "description": "Encapsulates the full scoping used to reference a job.", - "properties": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to." - }, - "jobId": { - "type": "string", - "description": "[Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 512 characters." - } - } - }, - "JobPlacement": { - "id": "JobPlacement", - "type": "object", - "description": "Cloud Dataproc job configuration.", - "properties": { - "clusterName": { - "type": "string", - "description": "[Required] The name of the cluster where the job will be submitted." - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] A cluster UUID generated by the Dataproc service when the job is submitted." - } - } - }, - "HadoopJob": { - "id": "HadoopJob", - "type": "object", - "description": "A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.", - "properties": { - "mainJarFileUri": { - "type": "string", - "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar" - }, - "mainClass": { - "type": "string", - "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", - "items": { + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", "type": "string" - } }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", - "additionalProperties": { - "type": "string" - } + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "LoggingConfiguration": { - "id": "LoggingConfiguration", - "type": "object", - "description": "The runtime logging configuration of the job.", - "properties": { - "driverLogLevels": { - "type": "object", - "description": "The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG", - "additionalProperties": { + "$.xgafv": { + "description": "V1 error format.", "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", "enum": [ - "LEVEL_UNSPECIFIED", - "ALL", - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL", - "OFF" + "1", + "2" ] - } - } - } - }, - "SparkJob": { - "id": "SparkJob", - "type": "object", - "description": "A Cloud Dataproc job for running Spark applications on YARN.", - "properties": { - "mainJarFileUri": { - "type": "string", - "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class." - }, - "mainClass": { - "type": "string", - "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } - }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "PySparkJob": { - "id": "PySparkJob", - "type": "object", - "description": "A Cloud Dataproc job for running PySpark applications on YARN.", - "properties": { - "mainPythonFileUri": { - "type": "string", - "description": "[Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } - }, - "pythonFileUris": { - "type": "array", - "description": "[Optional] HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", - "items": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } - }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "HiveJob": { - "id": "HiveJob", - "type": "object", - "description": "A Cloud Dataproc job for running Hive queries on YARN.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains Hive queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "continueOnFailure": { - "type": "boolean", - "description": "[Optional] Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Hive command: 'SET name=\"value\";').", - "additionalProperties": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", - "items": { - "type": "string" - } - } - } - }, - "QueryList": { - "id": "QueryList", - "type": "object", - "description": "A list of queries to run on a cluster.", - "properties": { - "queries": { - "type": "array", - "description": "[Required] The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - "items": { - "type": "string" - } - } - } - }, - "PigJob": { - "id": "PigJob", - "type": "object", - "description": "A Cloud Dataproc job for running Pig queries on YARN.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains the Pig queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "continueOnFailure": { - "type": "boolean", - "description": "[Optional] Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Pig command: \"name=[value]\").", - "additionalProperties": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", - "items": { - "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "SparkSqlJob": { - "id": "SparkSqlJob", - "type": "object", - "description": "A Cloud Dataproc job for running Spark SQL queries.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains SQL queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", - "additionalProperties": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.", - "additionalProperties": { - "type": "string" - } }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH.", - "items": { - "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "JobStatus": { - "id": "JobStatus", - "type": "object", - "description": "Cloud Dataproc job status.", - "properties": { - "state": { - "type": "string", - "description": "[Required] A state message specifying the overall job state.", - "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "SETUP_DONE", - "RUNNING", - "CANCEL_PENDING", - "CANCEL_STARTED", - "CANCELLED", - "DONE", - "ERROR" - ] - }, - "details": { - "type": "string", - "description": "[Optional] Job state details, such as an error description if the state is ERROR." - }, - "insertTime": { - "type": "string", - "description": "The time of the job request." - }, - "startTime": { - "type": "string", - "description": "The time when the server started the job." - }, - "endTime": { - "type": "string", - "description": "The time when the job completed." - }, - "stateStartTime": { - "type": "string", - "description": "[Output-only] The time when this state was entered." - } - } - }, - "YarnApplication": { - "id": "YarnApplication", - "type": "object", - "description": "A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.", - "properties": { - "name": { - "type": "string", - "description": "[Required] The application name." - }, - "state": { - "type": "string", - "description": "[Required] The application state.", - "enum": [ - "STATE_UNSPECIFIED", - "NEW", - "NEW_SAVING", - "SUBMITTED", - "ACCEPTED", - "RUNNING", - "FINISHED", - "FAILED", - "KILLED" - ] - }, - "progress": { - "type": "number", - "description": "[Required] The numerical progress of the application, from 1 to 100.", - "format": "float" + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" }, - "trackingUrl": { - "type": "string", - "description": "[Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access." - } - } - }, - "ListJobsResponse": { - "id": "ListJobsResponse", - "type": "object", - "description": "A response to a request to list jobs in a project.", - "properties": { - "jobs": { - "type": "array", - "description": "[Output-only] Jobs list.", - "items": { - "$ref": "Job" - } + "key": { + "type": "string", + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token." }, - "nextPageToken": { - "type": "string", - "description": "[Optional] This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListJobsRequest." + "quotaUser": { + "type": "string", + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" } - } }, - "ListJobsRequest": { - "id": "ListJobsRequest", - "type": "object", - "description": "A request to list jobs in a project.", - "properties": { - "pageSize": { - "type": "integer", - "description": "[Optional] The number of results to return in each response.", - "format": "int32" - }, - "pageToken": { - "type": "string", - "description": "[Optional] The page token, returned by a previous call, to request the next page of results." - }, - "clusterName": { - "type": "string", - "description": "[Optional] If set, the returned jobs list includes only jobs that were submitted to the named cluster." - }, - "jobStateMatcher": { - "type": "string", - "description": "[Optional] Specifies enumerated categories of jobs to list.", - "enum": [ - "ALL", - "ACTIVE", - "NON_ACTIVE" - ] + "schemas": { + "NodeInitializationAction": { + "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", + "type": "object", + "properties": { + "executionTimeout": { + "type": "string", + "description": "Optional Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + "format": "google-duration" + }, + "executableFile": { + "description": "Required Google Cloud Storage URI of executable file.", + "type": "string" + } + }, + "id": "NodeInitializationAction" }, - "filter": { - "type": "string", - "description": "[Optional] A filter constraining which jobs to list. Valid filters contain job state and label terms such as: labels.key1 = val1 AND (labels.k2 = val2 OR labels.k3 = val3)" - } - } - }, - "CancelJobRequest": { - "id": "CancelJobRequest", - "type": "object", - "description": "A request to cancel a job." - }, - "ListOperationsResponse": { - "id": "ListOperationsResponse", - "type": "object", - "description": "The response message for Operations.ListOperations.", - "properties": { - "operations": { - "type": "array", - "description": "A list of operations that match the specified filter in the request.", - "items": { - "$ref": "Operation" - } + "ListJobsResponse": { + "description": "A response to a request to list jobs in a project.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "Optional This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListJobsRequest\u003c/code\u003e.", + "type": "string" + }, + "jobs": { + "type": "array", + "items": { + "$ref": "Job" + }, + "description": "Output-only Jobs list." + } + }, + "id": "ListJobsResponse" }, - "nextPageToken": { - "type": "string", - "description": "The standard List next-page token." - } - } - }, - "CancelOperationRequest": { - "id": "CancelOperationRequest", - "type": "object", - "description": "The request message for Operations.CancelOperation." - }, - "Empty": { - "id": "Empty", - "type": "object", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`." - }, - "DiagnoseClusterResults": { - "id": "DiagnoseClusterResults", - "type": "object", - "description": "The location of diagnostic output.", - "properties": { - "outputUri": { - "type": "string", - "description": "[Output-only] The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics." - } - } - }, - "ClusterOperationMetadata": { - "id": "ClusterOperationMetadata", - "type": "object", - "description": "Metadata describing the operation.", - "properties": { - "clusterName": { - "type": "string", - "description": "[Output-only] Name of the cluster for the operation." + "CancelJobRequest": { + "description": "A request to cancel a job.", + "type": "object", + "properties": {}, + "id": "CancelJobRequest" }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] Cluster UUID for the operation." + "SparkSqlJob": { + "description": "A Cloud Dataproc job for running Spark SQL queries.", + "type": "object", + "properties": { + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "queryFileUri": { + "type": "string", + "description": "The HCFS URI of the script that contains SQL queries." + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.", + "type": "array", + "items": { + "type": "string" + } + }, + "scriptVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", + "type": "object" + }, + "loggingConfiguration": { + "description": "Optional The runtime log configuration for job execution.", + "$ref": "LoggingConfiguration" + }, + "properties": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten." + } + }, + "id": "SparkSqlJob" }, - "status": { - "$ref": "ClusterOperationStatus", - "description": "[Output-only] Current operation status." + "Cluster": { + "id": "Cluster", + "description": "Describes the identifying information, configuration, and status of a cluster of Google Compute Engine instances.", + "type": "object", + "properties": { + "metrics": { + "$ref": "ClusterMetrics", + "description": "Contains cluster daemon metrics such as HDFS and YARN stats." + }, + "status": { + "$ref": "ClusterStatus", + "description": "Output-only Cluster status." + }, + "statusHistory": { + "description": "Output-only Previous cluster statuses.", + "type": "array", + "items": { + "$ref": "ClusterStatus" + } + }, + "clusterName": { + "description": "Required The cluster name. Cluster names within a project must be unique. Names from deleted clusters can be reused.", + "type": "string" + }, + "clusterUuid": { + "description": "Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster.", + "type": "string" + }, + "projectId": { + "description": "Required The Google Cloud Platform project ID that the cluster belongs to.", + "type": "string" + }, + "configuration": { + "description": "Required The cluster configuration. It may differ from a user's initial configuration due to Cloud Dataproc setting of default values and updating clusters.", + "$ref": "ClusterConfiguration" + }, + "createTime": { + "description": "Output-only The timestamp of cluster creation.", + "format": "google-datetime", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 64 labels can be associated with a given cluster.", + "type": "object" + } + } }, - "statusHistory": { - "type": "array", - "description": "[Output-only] The previous operation status.", - "items": { - "$ref": "ClusterOperationStatus" - } + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "description": "A list of operations that match the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Operation" + } + } + }, + "id": "ListOperationsResponse" }, - "operationType": { - "type": "string", - "description": "[Output-only] The operation type." + "OperationMetadata": { + "type": "object", + "properties": { + "description": { + "description": "Output-only Short description of operation.", + "type": "string" + }, + "status": { + "$ref": "OperationStatus", + "description": "Output-only Current operation status." + }, + "state": { + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is currently running.", + "The operation is done, either cancelled or completed." + ], + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ], + "description": "A message containing the operation state.", + "type": "string" + }, + "details": { + "description": "A message containing any operation metadata details.", + "type": "string" + }, + "clusterUuid": { + "description": "Cluster UUId for the operation.", + "type": "string" + }, + "clusterName": { + "description": "Name of the cluster for the operation.", + "type": "string" + }, + "innerState": { + "description": "A message containing the detailed operation state.", + "type": "string" + }, + "endTime": { + "type": "string", + "description": "The time that the operation completed.", + "format": "google-datetime" + }, + "startTime": { + "description": "The time that the operation was started by the server.", + "format": "google-datetime", + "type": "string" + }, + "warnings": { + "description": "Output-only Errors encountered during operation execution.", + "type": "array", + "items": { + "type": "string" + } + }, + "insertTime": { + "description": "The time that the operation was requested.", + "format": "google-datetime", + "type": "string" + }, + "statusHistory": { + "description": "Output-only Previous operation status.", + "type": "array", + "items": { + "$ref": "OperationStatus" + } + }, + "operationType": { + "description": "Output-only The operation type.", + "type": "string" + } + }, + "id": "OperationMetadata", + "description": "Metadata describing the operation." }, - "description": { - "type": "string", - "description": "[Output-only] Short description of operation." + "JobPlacement": { + "description": "Cloud Dataproc job configuration.", + "type": "object", + "properties": { + "clusterName": { + "type": "string", + "description": "Required The name of the cluster where the job will be submitted." + }, + "clusterUuid": { + "description": "Output-only A cluster UUID generated by the Dataproc service when the job is submitted.", + "type": "string" + } + }, + "id": "JobPlacement" }, - "labels": { - "type": "object", - "description": "[Output-only] labels associated with the operation", - "additionalProperties": { - "type": "string" - } - } - } - }, - "ClusterOperationStatus": { - "id": "ClusterOperationStatus", - "type": "object", - "description": "The status of the operation.", - "properties": { - "state": { - "type": "string", - "description": "[Output-only] A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "ClusterStatus": { + "description": "The status of a cluster and its instances.", + "type": "object", + "properties": { + "state": { + "type": "string", + "enumDescriptions": [ + "The cluster state is unknown.", + "The cluster is being created and set up. It is not ready for use.", + "The cluster is currently running and healthy. It is ready for use.", + "The cluster encountered an error. It is not ready for use.", + "The cluster is being deleted. It cannot be used.", + "The cluster is being updated. It continues to accept and process jobs." + ], + "enum": [ + "UNKNOWN", + "CREATING", + "RUNNING", + "ERROR", + "DELETING", + "UPDATING" + ], + "description": "The cluster's state." + }, + "stateStartTime": { + "description": "Time when this state was entered.", + "format": "google-datetime", + "type": "string" + }, + "detail": { + "description": "Optional details of cluster's state.", + "type": "string" + } + }, + "id": "ClusterStatus" }, - "innerState": { - "type": "string", - "description": "[Output-only] A message containing the detailed operation state." + "PigJob": { + "description": "A Cloud Dataproc job for running Pig queries on YARN.", + "type": "object", + "properties": { + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "queryFileUri": { + "description": "The HCFS URI of the script that contains the Pig queries.", + "type": "string" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + "type": "array", + "items": { + "type": "string" + } + }, + "scriptVariables": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional Mapping of query variable names to values (equivalent to the Pig command: \"name=value\")." + }, + "loggingConfiguration": { + "$ref": "LoggingConfiguration", + "description": "Optional The runtime log configuration for job execution." + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + "type": "object" + }, + "continueOnFailure": { + "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" + } + }, + "id": "PigJob" }, - "details": { - "type": "string", - "description": "[Output-only]A message containing any operation metadata details." + "ManagedGroupConfiguration": { + "id": "ManagedGroupConfiguration", + "description": "Specifies the resources used to actively manage an instance group.", + "type": "object", + "properties": { + "instanceGroupManagerName": { + "description": "Output-only The name of Instance Group Manager managing this group.", + "type": "string" + }, + "instanceTemplateName": { + "description": "Output-only The name of Instance Template used for Managed Instance Group.", + "type": "string" + } + } }, - "stateStartTime": { - "type": "string", - "description": "[Output-only] The time this state was entered." - } - } - }, - "DiagnoseClusterOutputLocation": { - "id": "DiagnoseClusterOutputLocation", - "type": "object", - "description": "The location where output from diagnostic command can be found.", - "properties": { - "outputUri": { - "type": "string", - "description": "[Output-only] The Google Cloud Storage URI of the diagnostic output. This will be a plain text file with summary of collected diagnostics." - } - } - }, - "OperationMetadata": { - "id": "OperationMetadata", - "type": "object", - "description": "Metadata describing the operation.", - "properties": { - "state": { - "type": "string", - "description": "A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "ListClustersResponse": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "$ref": "Cluster" + }, + "description": "Output-only The clusters in the project." + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + } + }, + "id": "ListClustersResponse", + "description": "The list of all clusters in a project." }, - "innerState": { - "type": "string", - "description": "A message containing the detailed operation state." + "SparkJob": { + "description": "A Cloud Dataproc job for running Spark applications on YARN.", + "type": "object", + "properties": { + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + "type": "object" + }, + "args": { + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "type": "array", + "items": { + "type": "string" + } + }, + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.", + "type": "string" + }, + "archiveUris": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip." + }, + "mainJarFileUri": { + "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class.", + "type": "string" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "loggingConfiguration": { + "$ref": "LoggingConfiguration", + "description": "Optional The runtime log configuration for job execution." + } + }, + "id": "SparkJob" }, - "details": { - "type": "string", - "description": "A message containing any operation metadata details." + "Job": { + "description": "A Cloud Dataproc job resource.", + "type": "object", + "properties": { + "reference": { + "$ref": "JobReference", + "description": "Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a \u003ccode\u003ejob_id\u003c/code\u003e." + }, + "interactive": { + "description": "Optional If set to true, then the driver's stdin will be kept open and driver_input_uri will be set to provide a path at which additional input can be sent to the driver.", + "type": "boolean" + }, + "driverInputResourceUri": { + "description": "Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.", + "type": "string" + }, + "hadoopJob": { + "$ref": "HadoopJob", + "description": "Job is a Hadoop job." + }, + "status": { + "$ref": "JobStatus", + "description": "Output-only The job status. Additional application-specific status information may be contained in the \u003ccode\u003etype_job\u003c/code\u003e and \u003ccode\u003eyarn_applications\u003c/code\u003e fields." + }, + "placement": { + "description": "Required Job information, including how, when, and where to run the job.", + "$ref": "JobPlacement" + }, + "driverControlFilesUri": { + "description": "Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", + "type": "string" + }, + "submittedBy": { + "description": "Output-only The email address of the user submitting the job. For jobs submitted on the cluster, the address is \u003ccode\u003eusername@hostname\u003c/code\u003e.", + "type": "string" + }, + "scheduling": { + "description": "Optional Job scheduling configuration.", + "$ref": "JobScheduling" + }, + "pigJob": { + "description": "Job is a Pig job.", + "$ref": "PigJob" + }, + "driverOutputUri": { + "type": "string", + "description": "Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, \u003ccode\u003egs://sysbucket123/foo-cluster/jobid-123/driver/output\u003c/code\u003e." + }, + "hiveJob": { + "description": "Job is a Hive job.", + "$ref": "HiveJob" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 64 labels can be associated with a given job.", + "type": "object" + }, + "driverOutputResourceUri": { + "description": "Output-only A URI pointing to the location of the stdout of the job's driver program.", + "type": "string" + }, + "sparkJob": { + "$ref": "SparkJob", + "description": "Job is a Spark job." + }, + "sparkSqlJob": { + "$ref": "SparkSqlJob", + "description": "Job is a SparkSql job." + }, + "statusHistory": { + "description": "Output-only The previous job status.", + "type": "array", + "items": { + "$ref": "JobStatus" + } + }, + "yarnApplications": { + "description": "Output-only The collection of Yarn applications spun up by this job.", + "type": "array", + "items": { + "$ref": "YarnApplication" + } + }, + "pysparkJob": { + "$ref": "PySparkJob", + "description": "Job is a Pyspark job." + } + }, + "id": "Job" + }, + "DiskConfiguration": { + "properties": { + "numLocalSsds": { + "description": "Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs, and HDFS data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic configuration and installed binaries.", + "format": "int32", + "type": "integer" + }, + "bootDiskSizeGb": { + "description": "Optional Size in GB of the boot disk (default is 500GB).", + "format": "int32", + "type": "integer" + } + }, + "id": "DiskConfiguration", + "description": "Specifies the configuration of disk options for a group of VM instances.", + "type": "object" }, - "insertTime": { - "type": "string", - "description": "The time that the operation was requested." + "JobStatus": { + "description": "Cloud Dataproc job status.", + "type": "object", + "properties": { + "state": { + "type": "string", + "enumDescriptions": [ + "The job state is unknown.", + "The job is pending; it has been submitted, but is not yet running.", + "Job has been received by the service and completed initial setup; it will shortly be submitted to the cluster.", + "The job is running on the cluster.", + "A CancelJob request has been received, but is pending.", + "Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.", + "The job cancelation was successful.", + "The job has completed successfully.", + "The job has completed, but encountered an error.", + "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." + ], + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "SETUP_DONE", + "RUNNING", + "CANCEL_PENDING", + "CANCEL_STARTED", + "CANCELLED", + "DONE", + "ERROR", + "ATTEMPT_FAILURE" + ], + "description": "Required A state message specifying the overall job state." + }, + "details": { + "description": "Optional Job state details, such as an error description if the state is \u003ccode\u003eERROR\u003c/code\u003e.", + "type": "string" + }, + "stateStartTime": { + "description": "Output-only The time when this state was entered.", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "The time when the job completed.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "The time when the server started the job.", + "format": "google-datetime", + "type": "string" + }, + "insertTime": { + "description": "The time of the job request.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "JobStatus" }, - "startTime": { - "type": "string", - "description": "The time that the operation was started by the server." + "ClusterOperationStatus": { + "description": "The status of the operation.", + "type": "object", + "properties": { + "state": { + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is running.", + "The operation is done; either cancelled or completed." + ], + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ], + "description": "Output-only A message containing the operation state.", + "type": "string" + }, + "details": { + "description": "Output-onlyA message containing any operation metadata details.", + "type": "string" + }, + "innerState": { + "type": "string", + "description": "Output-only A message containing the detailed operation state." + }, + "stateStartTime": { + "description": "Output-only The time this state was entered.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "ClusterOperationStatus" }, - "endTime": { - "type": "string", - "description": "The time that the operation completed." + "HadoopJob": { + "description": "A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.", + "type": "object", + "properties": { + "mainJarFileUri": { + "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar", + "type": "string" + }, + "jarFileUris": { + "description": "Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "loggingConfiguration": { + "description": "Optional The runtime log configuration for job execution.", + "$ref": "LoggingConfiguration" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + "type": "object" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission." + }, + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.", + "type": "string" + }, + "archiveUris": { + "description": "Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "HadoopJob" }, - "clusterName": { - "type": "string", - "description": "Name of the cluster for the operation." + "QueryList": { + "description": "A list of queries to run on a cluster.", + "type": "object", + "properties": { + "queries": { + "description": "Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:\n\"hiveJob\": {\n \"queryList\": {\n \"queries\": [\n \"query1\",\n \"query2\",\n \"query3;query4\",\n ]\n }\n}\n", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "QueryList" }, - "clusterUuid": { - "type": "string", - "description": "Cluster UUId for the operation." + "YarnApplication": { + "description": "A YARN application created by a job. Application information is a subset of \u003ccode\u003eorg.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto\u003c/code\u003e.", + "type": "object", + "properties": { + "state": { + "description": "Required The application state.", + "type": "string", + "enumDescriptions": [ + "Status is unspecified.", + "Status is NEW.", + "Status is NEW_SAVING.", + "Status is SUBMITTED.", + "Status is ACCEPTED.", + "Status is RUNNING.", + "Status is FINISHED.", + "Status is FAILED.", + "Status is KILLED." + ], + "enum": [ + "STATE_UNSPECIFIED", + "NEW", + "NEW_SAVING", + "SUBMITTED", + "ACCEPTED", + "RUNNING", + "FINISHED", + "FAILED", + "KILLED" + ] + }, + "name": { + "description": "Required The application name.", + "type": "string" + }, + "trackingUrl": { + "description": "Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.", + "type": "string" + }, + "progress": { + "description": "Required The numerical progress of the application, from 1 to 100.", + "format": "float", + "type": "number" + } + }, + "id": "YarnApplication" }, - "status": { - "$ref": "OperationStatus", - "description": "[Output-only] Current operation status." + "ClusterOperationMetadata": { + "description": "Metadata describing the operation.", + "type": "object", + "properties": { + "operationType": { + "description": "Output-only The operation type.", + "type": "string" + }, + "description": { + "description": "Output-only Short description of operation.", + "type": "string" + }, + "warnings": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Output-only Errors encountered during operation execution." + }, + "labels": { + "description": "Output-only Labels associated with the operation", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "status": { + "$ref": "ClusterOperationStatus", + "description": "Output-only Current operation status." + }, + "statusHistory": { + "description": "Output-only The previous operation status.", + "type": "array", + "items": { + "$ref": "ClusterOperationStatus" + } + }, + "clusterName": { + "description": "Output-only Name of the cluster for the operation.", + "type": "string" + }, + "clusterUuid": { + "description": "Output-only Cluster UUID for the operation.", + "type": "string" + } + }, + "id": "ClusterOperationMetadata" }, - "statusHistory": { - "type": "array", - "description": "[Output-only] Previous operation status.", - "items": { - "$ref": "OperationStatus" - } + "HiveJob": { + "description": "A Cloud Dataproc job for running Hive queries on YARN.", + "type": "object", + "properties": { + "continueOnFailure": { + "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" + }, + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "queryFileUri": { + "description": "The HCFS URI of the script that contains Hive queries.", + "type": "string" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + "type": "array", + "items": { + "type": "string" + } + }, + "scriptVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional Mapping of query variable names to values (equivalent to the Hive command: 'SET name=\"value\";').", + "type": "object" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + "type": "object" + } + }, + "id": "HiveJob" }, - "operationType": { - "type": "string", - "description": "[Output-only] The operation type." + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "type": "object", + "properties": {}, + "id": "Empty" }, - "description": { - "type": "string", - "description": "[Output-only] Short description of operation." - } - } - }, - "OperationStatus": { - "id": "OperationStatus", - "type": "object", - "description": "The status of the operation.", - "properties": { - "state": { - "type": "string", - "description": "A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "DiagnoseClusterResults": { + "description": "The location of diagnostic output.", + "type": "object", + "properties": { + "outputUri": { + "type": "string", + "description": "Output-only The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics." + } + }, + "id": "DiagnoseClusterResults" }, - "innerState": { - "type": "string", - "description": "A message containing the detailed operation state." + "ListJobsRequest": { + "description": "A request to list jobs in a project.", + "type": "object", + "properties": { + "pageToken": { + "description": "Optional The page token, returned by a previous call, to request the next page of results.", + "type": "string" + }, + "clusterName": { + "description": "Optional If set, the returned jobs list includes only jobs that were submitted to the named cluster.", + "type": "string" + }, + "pageSize": { + "description": "Optional The number of results to return in each response.", + "format": "int32", + "type": "integer" + }, + "filter": { + "description": "Optional A filter constraining which jobs to list. Valid filters contain job state and label terms such as: labels.key1 = val1 AND (labels.k2 = val2 OR labels.k3 = val3)", + "type": "string" + }, + "jobStateMatcher": { + "enumDescriptions": [ + "Match all jobs, regardless of state.", + "Only match jobs in non-terminal states: PENDING, RUNNING, CANCEL_PENDING", + "Only match jobs in terminal states: CANCELLED, DONE, ERROR" + ], + "enum": [ + "ALL", + "ACTIVE", + "NON_ACTIVE" + ], + "description": "Optional Specifies enumerated categories of jobs to list.", + "type": "string" + } + }, + "id": "ListJobsRequest" }, - "details": { - "type": "string", - "description": "A message containing any operation metadata details." + "GceConfiguration": { + "id": "GceConfiguration", + "description": "Deprecated Common configuration settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", + "type": "object", + "properties": { + "imageUri": { + "description": "Deprecated The Google Compute Engine image resource used for cluster instances. Example: \"compute.googleapis.com/projects/debian-cloud /global/images/backports-debian-7-wheezy-v20140904\".", + "type": "string" + }, + "machineTypeUri": { + "description": "Deprecated The Google Compute Engine machine type used for cluster instances. Example: \"compute.googleapis.com/projects/project_id /zones/us-east1-a/machineTypes/n1-standard-2\".", + "type": "string" + }, + "networkUri": { + "description": "Deprecated The Google Compute Engine network to be used for machine communications. Inbound SSH connections are necessary to complete cluster configuration. Example \"compute.googleapis.com/projects/project_id /zones/us-east1-a/default\".", + "type": "string" + }, + "serviceAccountScopes": { + "description": "Deprecated The service account scopes included in Google Compute Engine instances. Must include devstorage.full_control to enable the Google Cloud Storage connector. Example \"auth.googleapis.com/compute\" and \"auth.googleapis.com/devstorage.full_control\".", + "type": "array", + "items": { + "type": "string" + } + }, + "zoneUri": { + "description": "Deprecated The zone where the Google Compute Engine cluster will be located. Example: \"compute.googleapis.com/projects/project_id /zones/us-east1-a\".", + "type": "string" + } + } }, - "stateStartTime": { - "type": "string", - "description": "The time this state was entered." - } - } - } - }, - "resources": { - "projects": { - "resources": { - "regions": { - "resources": { - "clusters": { - "methods": { - "create": { - "id": "dataproc.projects.regions.clusters.create", - "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters", - "httpMethod": "POST", - "description": "Request to create a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" + "SoftwareConfiguration": { + "description": "Specifies the selection and configuration of software inside the cluster.", + "type": "object", + "properties": { + "imageVersion": { + "type": "string", + "description": "Optional The version of software inside the cluster. It must match the regular expression 0-9+.0-9+. If unspecified it will default to latest version." + }, + "properties": { + "additionalProperties": { + "type": "string" }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" + "description": "Optional The properties to set on daemon configuration files.Property keys are specified in \"prefix:property\" format, such as \"core:fs.defaultFS\". The following are supported prefixes and their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - pig.properties spark - spark-defaults.conf", + "type": "object" + } + }, + "id": "SoftwareConfiguration" + }, + "PySparkJob": { + "description": "A Cloud Dataproc job for running PySpark applications on YARN.", + "type": "object", + "properties": { + "mainPythonFileUri": { + "description": "Required The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file.", + "type": "string" + }, + "archiveUris": { + "description": "Optional HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region" - ], - "request": { - "$ref": "Cluster" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "patch": { - "id": "dataproc.projects.regions.clusters.patch", - "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "PATCH", - "description": "Request to update a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" - }, - "updateMask": { - "type": "string", - "description": "[Required] Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as \"configuration.worker_configuration.num_instances,\" and the PATCH request body would specify the new value, as follows: { \"configuration\":{ \"workerConfiguration\":{ \"numInstances\":\"5\" } } } Note: Currently, configuration.worker_configuration.num_instances is the only field that can be updated.", - "location": "query" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "request": { - "$ref": "Cluster" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "delete": { - "id": "dataproc.projects.regions.clusters.delete", - "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "DELETE", - "description": "Request to delete a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + "loggingConfiguration": { + "description": "Optional The runtime log configuration for job execution.", + "$ref": "LoggingConfiguration" + }, + "properties": { + "description": "Optional A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + "type": "object", + "additionalProperties": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "get": { - "id": "dataproc.projects.regions.clusters.get", - "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", - "httpMethod": "GET", - "description": "Request to get the resource representation for a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + "args": { + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "clusterName" - ], - "response": { - "$ref": "Cluster" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "list": { - "id": "dataproc.projects.regions.clusters.list", - "path": "v1alpha1/projects/{projectId}/regions/{region}/clusters", - "httpMethod": "GET", - "description": "Request a list of all regions/{region}/clusters in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" - }, - "filter": { - "type": "string", - "description": "[Optional] A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The standard List page size.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The standard List page token.", - "location": "query" + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "pythonFileUris": { + "description": "Optional HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region" - ], - "response": { - "$ref": "ListClustersResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] } - } }, - "jobs": { - "methods": { - "submit": { - "id": "dataproc.projects.regions.jobs.submit", - "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs:submit", - "httpMethod": "POST", - "description": "Submits a job to a cluster.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" + "id": "PySparkJob" + }, + "ClusterConfiguration": { + "description": "The cluster configuration.", + "type": "object", + "properties": { + "initializationActions": { + "type": "array", + "items": { + "$ref": "NodeInitializationAction" }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" + "description": "Optional Commands to execute on each node after configuration is completed. By default, executables are run on master and all worker nodes. You can test a node's \u003ccode\u003erole\u003c/code\u003e metadata to run an executable on a master or worker node, as shown below:\nROLE=$(/usr/share/google/get_metadata_value attributes/role)\nif [[ \"${ROLE}\" == 'Master' ]]; then\n ... master specific actions ...\nelse\n ... worker specific actions ...\nfi\n" + }, + "workerConfiguration": { + "$ref": "InstanceGroupConfiguration", + "description": "Optional The Google Compute Engine configuration settings for worker instances in a cluster." + }, + "softwareConfiguration": { + "$ref": "SoftwareConfiguration", + "description": "Optional The configuration settings for software inside the cluster." + }, + "gceClusterConfiguration": { + "$ref": "GceClusterConfiguration", + "description": "Optional The shared Google Compute Engine configuration settings for all instances in a cluster." + }, + "configurationBucket": { + "description": "Optional A Google Cloud Storage staging bucket used for sharing generated SSH keys and configuration. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, then it will create and manage this project-level, per-location bucket for you.", + "type": "string" + }, + "numWorkers": { + "description": "Deprecated The number of worker nodes in the cluster.", + "format": "int32", + "type": "integer" + }, + "masterDiskConfiguration": { + "$ref": "DiskConfiguration", + "description": "Deprecated The configuration settings of master node disk options." + }, + "workerDiskConfiguration": { + "$ref": "DiskConfiguration", + "description": "Deprecated The configuration settings of worker node disk options." + }, + "gceConfiguration": { + "$ref": "GceConfiguration", + "description": "Deprecated The Google Compute Engine configuration settings for cluster resources." + }, + "masterConfiguration": { + "$ref": "InstanceGroupConfiguration", + "description": "Optional The Google Compute Engine configuration settings for the master instance in a cluster." + }, + "secondaryWorkerConfiguration": { + "description": "Optional The Google Compute Engine configuration settings for additional worker instances in a cluster.", + "$ref": "InstanceGroupConfiguration" + }, + "masterName": { + "description": "Deprecated The Master's hostname. Dataproc derives the name from cluster_name if not set by user (recommended practice is to let Dataproc derive the name). Derived master name example: hadoop-m.", + "type": "string" + }, + "workers": { + "description": "Deprecated The list of worker node names. Dataproc derives the names from cluster_name and num_workers if not set by user (recommended practice is to let Dataproc derive the name). Derived worker node name example: hadoop-w-0.", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region" - ], - "request": { - "$ref": "SubmitJobRequest" - }, - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + } + }, + "id": "ClusterConfiguration" + }, + "ClusterMetrics": { + "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.", + "type": "object", + "properties": { + "hdfsMetrics": { + "type": "object", + "additionalProperties": { + "format": "int64", + "type": "string" + }, + "description": "The HDFS metrics." }, - "get": { - "id": "dataproc.projects.regions.jobs.get", - "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", - "httpMethod": "GET", - "description": "Gets the resource representation for a job in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" + "yarnMetrics": { + "additionalProperties": { + "format": "int64", + "type": "string" }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" + "description": "The YARN metrics.", + "type": "object" + } + }, + "id": "ClusterMetrics" + }, + "LoggingConfiguration": { + "properties": { + "driverLogLevels": { + "additionalProperties": { + "enum": [ + "LEVEL_UNSPECIFIED", + "ALL", + "TRACE", + "DEBUG", + "INFO", + "WARN", + "ERROR", + "FATAL", + "OFF" + ], + "type": "string" }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + "description": "The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG", + "type": "object" + } + }, + "id": "LoggingConfiguration", + "description": "The runtime logging configuration of the job.", + "type": "object" + }, + "InstanceGroupConfiguration": { + "description": "The configuration settings for Google Compute Engine resources in an instance group, such as a master or worker group.", + "type": "object", + "properties": { + "isPreemptible": { + "description": "Specifies that this instance group contains Preemptible Instances.", + "type": "boolean" + }, + "imageUri": { + "description": "Output-only The Google Compute Engine image resource used for cluster instances. Inferred from SoftwareConfiguration.image_version. Example: \"compute.googleapis.com/projects/debian-cloud /global/images/backports-debian-7-wheezy-v20140904\".", + "type": "string" + }, + "machineTypeUri": { + "description": "The Google Compute Engine machine type used for cluster instances. Example: \"compute.googleapis.com/projects/project_id /zones/us-east1-a/machineTypes/n1-standard-2\".", + "type": "string" + }, + "instanceNames": { + "description": "The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group if not set by user (recommended practice is to let Dataproc derive the name).", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "jobId" - ], - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "cancel": { - "id": "dataproc.projects.regions.jobs.cancel", - "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", - "httpMethod": "POST", - "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs:list or regions/{region}/jobs:get.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" - }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" + "managedGroupConfiguration": { + "$ref": "ManagedGroupConfiguration", + "description": "Output-only The configuration for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups." + }, + "numInstances": { + "description": "The number of VM instances in the instance group. For master instance groups, must be set to 1.", + "format": "int32", + "type": "integer" + }, + "diskConfiguration": { + "$ref": "DiskConfiguration", + "description": "Disk option configuration settings." + } + }, + "id": "InstanceGroupConfiguration" + }, + "GceClusterConfiguration": { + "description": "Common configuration settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", + "type": "object", + "properties": { + "networkUri": { + "description": "The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see https://cloud.google.com/compute/docs/subnetworks for more information). Example: compute.googleapis.com/projects/[project_id]/regions/global/default.", + "type": "string" + }, + "zoneUri": { + "description": "Required The zone where the Google Compute Engine cluster will be located. Example: \"compute.googleapis.com/projects/project_id /zones/us-east1-a\".", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "type": "string" }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + "description": "The Google Compute Engine metadata entries to add to all instances.", + "type": "object" + }, + "internalIpOnly": { + "description": "If true, all instances in the cluser will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + "type": "boolean" + }, + "serviceAccountScopes": { + "description": "The service account scopes included in Google Compute Engine instances. Must include devstorage.full_control to enable the Google Cloud Storage connector. Example \"auth.googleapis.com/compute\" and \"auth.googleapis.com/devstorage.full_control\".", + "type": "array", + "items": { + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "region", - "jobId" - ], - "request": { - "$ref": "CancelJobRequest" - }, - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "delete": { - "id": "dataproc.projects.regions.jobs.delete", - "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", - "httpMethod": "DELETE", - "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The Google Compute Engine tags to add to all instances." + }, + "serviceAccount": { + "description": "Optional The service account of the instances. Defaults to the default Google Compute Engine service account. Custom service accounts need permissions equivalent to the folloing IAM roles:\nroles/logging.logWriter\nroles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: [account_id]@[project_id].iam.gserviceaccount.com", + "type": "string" + }, + "subnetworkUri": { + "description": "The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: compute.googleapis.com/projects/[project_id]/regions/us-east1/sub0.", + "type": "string" + } + }, + "id": "GceClusterConfiguration" + }, + "CancelOperationRequest": { + "id": "CancelOperationRequest", + "description": "The request message for Operations.CancelOperation.", + "type": "object", + "properties": {} + }, + "DiagnoseClusterOutputLocation": { + "description": "The location where output from diagnostic command can be found.", + "type": "object", + "properties": { + "outputUri": { + "type": "string", + "description": "Output-only The Google Cloud Storage URI of the diagnostic output. This will be a plain text file with summary of collected diagnostics." + } + }, + "id": "DiagnoseClusterOutputLocation" + }, + "Operation": { + "properties": { + "response": { + "additionalProperties": { + "type": "any", + "description": "Properties of the object. Contains field @type with type URL." }, - "region": { - "type": "string", - "description": "[Required] The Dataproc region in which to handle the request.", - "required": true, - "location": "path" + "description": "The operation response. If the called method returns no data on success, the response is google.protobuf.Empty. If the called method is Get,Create or Update, the response is the resource. For all other methods, the response type is a concatenation of the method name and \"Response\". For example, if the called method is TakeSnapshot(), the response type is TakeSnapshotResponse.", + "type": "object" + }, + "name": { + "description": "The name of the operation resource, in the format projects/project_id/operations/operation_id", + "type": "string" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure." + }, + "metadata": { + "additionalProperties": { + "type": "any", + "description": "Properties of the object. Contains field @type with type URL." }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "projectId", - "region", - "jobId" - ], - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "description": "Service-specific metadata associated with the operation.", + "type": "object" + }, + "done": { + "description": "Indicates if the operation is done. If true, the operation is complete and the result is available. If false, the operation is still in progress.", + "type": "boolean" } - } - } - } - } - } - }, - "operations": { - "methods": { - "get": { - "id": "dataproc.operations.get", - "path": "v1alpha1/{+name}", - "httpMethod": "GET", - "description": "Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.", - "parameters": { - "name": { - "type": "string", - "description": "The operation resource name.", - "required": true, - "pattern": "^operations/.+$", - "location": "path" - } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + }, + "id": "Operation", + "description": "An asynchronous operation in a project that runs over a given cluster. Used to track the progress of a user request that is running asynchronously. Examples include creating a cluster, updating a cluster, and deleting a cluster.", + "type": "object" }, - "list": { - "id": "dataproc.operations.list", - "path": "v1alpha1/{+name}", - "httpMethod": "GET", - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", - "parameters": { - "name": { - "type": "string", - "description": "The operation collection name.", - "required": true, - "pattern": "^operations$", - "location": "path" + "OperationStatus": { + "description": "The status of the operation.", + "type": "object", + "properties": { + "state": { + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is running.", + "The operation is done; either cancelled or completed." + ], + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ], + "description": "A message containing the operation state.", + "type": "string" + }, + "details": { + "description": "A message containing any operation metadata details.", + "type": "string" + }, + "innerState": { + "description": "A message containing the detailed operation state.", + "type": "string" + }, + "stateStartTime": { + "description": "The time this state was entered.", + "format": "google-datetime", + "type": "string" + } }, - "filter": { - "type": "string", - "description": "[Required] A JSON object that contains filters for the list operation, in the format {\"key1\":\"value1\",\"key2\":\"value2\", ..., }. Possible keys include project_id, cluster_name, and operation_state_matcher. If project_id is set, requests the list of operations that belong to the specified Google Cloud Platform project ID. This key is required. If cluster_name is set, requests the list of operations that were submitted to the specified cluster name. This key is optional. If operation_state_matcher is set, requests the list of operations that match one of the following status options: ALL, ACTIVE, or NON_ACTIVE.", - "location": "query" + "id": "OperationStatus" + }, + "JobReference": { + "description": "Encapsulates the full scoping used to reference a job.", + "type": "object", + "properties": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "type": "string" + }, + "jobId": { + "description": "Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.", + "type": "string" + } }, - "pageSize": { - "type": "integer", - "description": "The standard List page size.", - "format": "int32", - "location": "query" + "id": "JobReference" + }, + "SubmitJobRequest": { + "description": "A job submission request.", + "type": "object", + "properties": { + "job": { + "$ref": "Job", + "description": "Required The job resource." + } }, - "pageToken": { - "type": "string", - "description": "The standard List page token.", - "location": "query" - } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListOperationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "id": "SubmitJobRequest" }, - "cancel": { - "id": "dataproc.operations.cancel", - "path": "v1alpha1/{+name}:cancel", - "httpMethod": "POST", - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource to be cancelled.", - "required": true, - "pattern": "^operations/.+$", - "location": "path" - } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "CancelOperationRequest" - }, - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "Status": { + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc which can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting purpose.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "details": { + "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + } + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "id": "Status" }, - "delete": { - "id": "dataproc.operations.delete", - "path": "v1alpha1/{+name}", - "httpMethod": "DELETE", - "description": "Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource to be deleted.", - "required": true, - "pattern": "^operations/.+$", - "location": "path" - } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "JobScheduling": { + "description": "Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release.", + "type": "object", + "properties": { + "maxFailuresPerHour": { + "description": "Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.", + "format": "int32", + "type": "integer" + } + }, + "id": "JobScheduling" } - } - } - } + }, + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "protocol": "rest" } diff --git a/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-gen.go b/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-gen.go index 1f2636027..b6ef55af4 100644 --- a/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-gen.go +++ b/vendor/google.golang.org/api/dataproc/v1alpha1/dataproc-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Operations *OperationsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -144,42 +149,42 @@ type CancelOperationRequest struct { // Cluster: Describes the identifying information, configuration, and // status of a cluster of Google Compute Engine instances. type Cluster struct { - // ClusterName: [Required] The cluster name. Cluster names within a + // ClusterName: Required The cluster name. Cluster names within a // project must be unique. Names from deleted clusters can be reused. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] A cluster UUID (Unique Universal + // ClusterUuid: Output-only A cluster UUID (Unique Universal // Identifier). Cloud Dataproc generates this value when it creates the // cluster. ClusterUuid string `json:"clusterUuid,omitempty"` - // Configuration: [Required] The cluster configuration. It may differ - // from a user's initial configuration due to Cloud Dataproc setting of + // Configuration: Required The cluster configuration. It may differ from + // a user's initial configuration due to Cloud Dataproc setting of // default values and updating clusters. Configuration *ClusterConfiguration `json:"configuration,omitempty"` - // CreateTime: [Output-only] The timestamp of cluster creation. + // CreateTime: Output-only The timestamp of cluster creation. CreateTime string `json:"createTime,omitempty"` - // Labels: [Optional] The labels to associate with this cluster. Label - // keys must be between 1 and 63 characters long, and must conform to - // the following PCRE regular expression: \p{Ll}\p{Lo}{0,62} Label - // values must be between 1 and 63 characters long, and must conform to - // the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No - // more than 64 labels can be associated with a given cluster. + // Labels: Optional The labels to associate with this cluster.Label keys + // must be between 1 and 63 characters long, and must conform to the + // following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values + // must be between 1 and 63 characters long, and must conform to the + // following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more + // than 64 labels can be associated with a given cluster. Labels map[string]string `json:"labels,omitempty"` // Metrics: Contains cluster daemon metrics such as HDFS and YARN stats. Metrics *ClusterMetrics `json:"metrics,omitempty"` - // ProjectId: [Required] The Google Cloud Platform project ID that the + // ProjectId: Required The Google Cloud Platform project ID that the // cluster belongs to. ProjectId string `json:"projectId,omitempty"` - // Status: [Output-only] Cluster status. + // Status: Output-only Cluster status. Status *ClusterStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] Previous cluster statuses. + // StatusHistory: Output-only Previous cluster statuses. StatusHistory []*ClusterStatus `json:"statusHistory,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -211,7 +216,7 @@ func (s *Cluster) MarshalJSON() ([]byte, error) { // ClusterConfiguration: The cluster configuration. type ClusterConfiguration struct { - // ConfigurationBucket: [Optional] A Google Cloud Storage staging bucket + // ConfigurationBucket: Optional A Google Cloud Storage staging bucket // used for sharing generated SSH keys and configuration. If you do not // specify a staging bucket, Cloud Dataproc will determine an // appropriate Cloud Storage location (US, ASIA, or EU) for your @@ -220,56 +225,61 @@ type ClusterConfiguration struct { // project-level, per-location bucket for you. ConfigurationBucket string `json:"configurationBucket,omitempty"` - // GceClusterConfiguration: [Optional] The shared Google Compute Engine + // GceClusterConfiguration: Optional The shared Google Compute Engine // configuration settings for all instances in a cluster. GceClusterConfiguration *GceClusterConfiguration `json:"gceClusterConfiguration,omitempty"` - // GceConfiguration: [Deprecated] The Google Compute Engine - // configuration settings for cluster resources. + // GceConfiguration: Deprecated The Google Compute Engine configuration + // settings for cluster resources. GceConfiguration *GceConfiguration `json:"gceConfiguration,omitempty"` - // InitializationActions: [Optional] Commands to execute on each node + // InitializationActions: Optional Commands to execute on each node // after configuration is completed. By default, executables are run on - // master and all worker nodes. You can test a node's role metadata to - // run an executable on a master or worker node, as shown below: - // ROLE=$(/usr/share/google/get_metadata_value attributes/role) if [[ - // "${ROLE}" == 'Master' ]]; then ... master specific actions ... else - // ... worker specific actions ... fi + // master and all worker nodes. You can test a node's role + // metadata to run an executable on a master or worker node, as shown + // below: + // ROLE=$(/usr/share/google/get_metadata_value attributes/role) + // if [[ "${ROLE}" == 'Master' ]]; then + // ... master specific actions ... + // else + // ... worker specific actions ... + // fi + // InitializationActions []*NodeInitializationAction `json:"initializationActions,omitempty"` - // MasterConfiguration: [Optional] The Google Compute Engine - // configuration settings for the master instance in a cluster. + // MasterConfiguration: Optional The Google Compute Engine configuration + // settings for the master instance in a cluster. MasterConfiguration *InstanceGroupConfiguration `json:"masterConfiguration,omitempty"` - // MasterDiskConfiguration: [Deprecated] The configuration settings of + // MasterDiskConfiguration: Deprecated The configuration settings of // master node disk options. MasterDiskConfiguration *DiskConfiguration `json:"masterDiskConfiguration,omitempty"` - // MasterName: [Deprecated] The Master's hostname. Dataproc derives the + // MasterName: Deprecated The Master's hostname. Dataproc derives the // name from cluster_name if not set by user (recommended practice is to // let Dataproc derive the name). Derived master name example: hadoop-m. MasterName string `json:"masterName,omitempty"` - // NumWorkers: [Deprecated] The number of worker nodes in the cluster. + // NumWorkers: Deprecated The number of worker nodes in the cluster. NumWorkers int64 `json:"numWorkers,omitempty"` - // SecondaryWorkerConfiguration: [Optional] The Google Compute Engine + // SecondaryWorkerConfiguration: Optional The Google Compute Engine // configuration settings for additional worker instances in a cluster. SecondaryWorkerConfiguration *InstanceGroupConfiguration `json:"secondaryWorkerConfiguration,omitempty"` - // SoftwareConfiguration: [Optional] The configuration settings for + // SoftwareConfiguration: Optional The configuration settings for // software inside the cluster. SoftwareConfiguration *SoftwareConfiguration `json:"softwareConfiguration,omitempty"` - // WorkerConfiguration: [Optional] The Google Compute Engine - // configuration settings for worker instances in a cluster. + // WorkerConfiguration: Optional The Google Compute Engine configuration + // settings for worker instances in a cluster. WorkerConfiguration *InstanceGroupConfiguration `json:"workerConfiguration,omitempty"` - // WorkerDiskConfiguration: [Deprecated] The configuration settings of + // WorkerDiskConfiguration: Deprecated The configuration settings of // worker node disk options. WorkerDiskConfiguration *DiskConfiguration `json:"workerDiskConfiguration,omitempty"` - // Workers: [Deprecated] The list of worker node names. Dataproc derives + // Workers: Deprecated The list of worker node names. Dataproc derives // the names from cluster_name and num_workers if not set by user // (recommended practice is to let Dataproc derive the name). Derived // worker node name example: hadoop-w-0. @@ -333,27 +343,30 @@ func (s *ClusterMetrics) MarshalJSON() ([]byte, error) { // ClusterOperationMetadata: Metadata describing the operation. type ClusterOperationMetadata struct { - // ClusterName: [Output-only] Name of the cluster for the operation. + // ClusterName: Output-only Name of the cluster for the operation. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] Cluster UUID for the operation. + // ClusterUuid: Output-only Cluster UUID for the operation. ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: [Output-only] Short description of operation. + // Description: Output-only Short description of operation. Description string `json:"description,omitempty"` - // Labels: [Output-only] labels associated with the operation + // Labels: Output-only Labels associated with the operation Labels map[string]string `json:"labels,omitempty"` - // OperationType: [Output-only] The operation type. + // OperationType: Output-only The operation type. OperationType string `json:"operationType,omitempty"` - // Status: [Output-only] Current operation status. + // Status: Output-only Current operation status. Status *ClusterOperationStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] The previous operation status. + // StatusHistory: Output-only The previous operation status. StatusHistory []*ClusterOperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output-only Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -379,24 +392,24 @@ func (s *ClusterOperationMetadata) MarshalJSON() ([]byte, error) { // ClusterOperationStatus: The status of the operation. type ClusterOperationStatus struct { - // Details: [Output-only]A message containing any operation metadata + // Details: Output-onlyA message containing any operation metadata // details. Details string `json:"details,omitempty"` - // InnerState: [Output-only] A message containing the detailed operation + // InnerState: Output-only A message containing the detailed operation // state. InnerState string `json:"innerState,omitempty"` - // State: [Output-only] A message containing the operation state. + // State: Output-only A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. State string `json:"state,omitempty"` - // StateStartTime: [Output-only] The time this state was entered. + // StateStartTime: Output-only The time this state was entered. StateStartTime string `json:"stateStartTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Details") to @@ -430,12 +443,16 @@ type ClusterStatus struct { // State: The cluster's state. // // Possible values: - // "UNKNOWN" - // "CREATING" - // "RUNNING" - // "ERROR" - // "DELETING" - // "UPDATING" + // "UNKNOWN" - The cluster state is unknown. + // "CREATING" - The cluster is being created and set up. It is not + // ready for use. + // "RUNNING" - The cluster is currently running and healthy. It is + // ready for use. + // "ERROR" - The cluster encountered an error. It is not ready for + // use. + // "DELETING" - The cluster is being deleted. It cannot be used. + // "UPDATING" - The cluster is being updated. It continues to accept + // and process jobs. State string `json:"state,omitempty"` // StateStartTime: Time when this state was entered. @@ -467,9 +484,9 @@ func (s *ClusterStatus) MarshalJSON() ([]byte, error) { // DiagnoseClusterOutputLocation: The location where output from // diagnostic command can be found. type DiagnoseClusterOutputLocation struct { - // OutputUri: [Output-only] The Google Cloud Storage URI of the - // diagnostic output. This will be a plain text file with summary of - // collected diagnostics. + // OutputUri: Output-only The Google Cloud Storage URI of the diagnostic + // output. This will be a plain text file with summary of collected + // diagnostics. OutputUri string `json:"outputUri,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputUri") to @@ -497,9 +514,9 @@ func (s *DiagnoseClusterOutputLocation) MarshalJSON() ([]byte, error) { // DiagnoseClusterResults: The location of diagnostic output. type DiagnoseClusterResults struct { - // OutputUri: [Output-only] The Google Cloud Storage URI of the - // diagnostic output. The output report is a plain text file with a - // summary of collected diagnostics. + // OutputUri: Output-only The Google Cloud Storage URI of the diagnostic + // output. The output report is a plain text file with a summary of + // collected diagnostics. OutputUri string `json:"outputUri,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputUri") to @@ -528,15 +545,15 @@ func (s *DiagnoseClusterResults) MarshalJSON() ([]byte, error) { // DiskConfiguration: Specifies the configuration of disk options for a // group of VM instances. type DiskConfiguration struct { - // BootDiskSizeGb: [Optional] Size in GB of the boot disk (default is + // BootDiskSizeGb: Optional Size in GB of the boot disk (default is // 500GB). BootDiskSizeGb int64 `json:"bootDiskSizeGb,omitempty"` - // NumLocalSsds: [Optional] Number of attached SSDs, from 0 to 4 - // (default is 0). If SSDs are not attached, the boot disk is used to - // store runtime logs, and HDFS data. If one or more SSDs are attached, - // this runtime bulk data is spread across them, and the boot disk - // contains only basic configuration and installed binaries. + // NumLocalSsds: Optional Number of attached SSDs, from 0 to 4 (default + // is 0). If SSDs are not attached, the boot disk is used to store + // runtime logs, and HDFS data. If one or more SSDs are attached, this + // runtime bulk data is spread across them, and the boot disk contains + // only basic configuration and installed binaries. NumLocalSsds int64 `json:"numLocalSsds,omitempty"` // ForceSendFields is a list of field names (e.g. "BootDiskSizeGb") to @@ -566,9 +583,12 @@ func (s *DiskConfiguration) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for `Empty` is -// empty JSON object `{}`. +// instance: +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// The JSON representation for Empty is empty JSON object {}. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -597,9 +617,19 @@ type GceClusterConfiguration struct { // the project is used, if it exists. Cannot be a "Custom Subnet // Network" (see https://cloud.google.com/compute/docs/subnetworks for // more information). Example: - // `compute.googleapis.com/projects/[project_id]/regions/global/default`. + // compute.googleapis.com/projects/[project_id]/regions/global/default. NetworkUri string `json:"networkUri,omitempty"` + // ServiceAccount: Optional The service account of the instances. + // Defaults to the default Google Compute Engine service account. Custom + // service accounts need permissions equivalent to the folloing IAM + // roles: + // roles/logging.logWriter + // roles/storage.objectAdmin(see + // https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: + // [account_id]@[project_id].iam.gserviceaccount.com + ServiceAccount string `json:"serviceAccount,omitempty"` + // ServiceAccountScopes: The service account scopes included in Google // Compute Engine instances. Must include devstorage.full_control to // enable the Google Cloud Storage connector. Example @@ -610,15 +640,15 @@ type GceClusterConfiguration struct { // SubnetworkUri: The Google Compute Engine subnetwork to be used for // machine communications. Cannot be specified with network_uri. // Example: - // `compute.googleapis.com/projects/[project_id]/regions/us-east1/sub0`. + // compute.googleapis.com/projects/[project_id]/regions/us-east1/sub0. SubnetworkUri string `json:"subnetworkUri,omitempty"` // Tags: The Google Compute Engine tags to add to all instances. Tags []string `json:"tags,omitempty"` - // ZoneUri: [Required] The zone where the Google Compute Engine cluster - // will be located. Example: - // "compute.googleapis.com/projects/[project_id] /zones/us-east1-a". + // ZoneUri: Required The zone where the Google Compute Engine cluster + // will be located. Example: "compute.googleapis.com/projects/project_id + // /zones/us-east1-a". ZoneUri string `json:"zoneUri,omitempty"` // ForceSendFields is a list of field names (e.g. "InternalIpOnly") to @@ -645,39 +675,39 @@ func (s *GceClusterConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// GceConfiguration: [Deprecated] Common configuration settings for +// GceConfiguration: Deprecated Common configuration settings for // resources of Google Compute Engine cluster instances, applicable to // all instances in the cluster. type GceConfiguration struct { - // ImageUri: [Deprecated] The Google Compute Engine image resource used + // ImageUri: Deprecated The Google Compute Engine image resource used // for cluster instances. Example: // "compute.googleapis.com/projects/debian-cloud // /global/images/backports-debian-7-wheezy-v20140904". ImageUri string `json:"imageUri,omitempty"` - // MachineTypeUri: [Deprecated] The Google Compute Engine machine type + // MachineTypeUri: Deprecated The Google Compute Engine machine type // used for cluster instances. Example: - // "compute.googleapis.com/projects/[project_id] + // "compute.googleapis.com/projects/project_id // /zones/us-east1-a/machineTypes/n1-standard-2". MachineTypeUri string `json:"machineTypeUri,omitempty"` - // NetworkUri: [Deprecated] The Google Compute Engine network to be used + // NetworkUri: Deprecated The Google Compute Engine network to be used // for machine communications. Inbound SSH connections are necessary to // complete cluster configuration. Example - // "compute.googleapis.com/projects/[project_id] + // "compute.googleapis.com/projects/project_id // /zones/us-east1-a/default". NetworkUri string `json:"networkUri,omitempty"` - // ServiceAccountScopes: [Deprecated] The service account scopes - // included in Google Compute Engine instances. Must include + // ServiceAccountScopes: Deprecated The service account scopes included + // in Google Compute Engine instances. Must include // devstorage.full_control to enable the Google Cloud Storage connector. // Example "auth.googleapis.com/compute" and // "auth.googleapis.com/devstorage.full_control". ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"` - // ZoneUri: [Deprecated] The zone where the Google Compute Engine - // cluster will be located. Example: - // "compute.googleapis.com/projects/[project_id] /zones/us-east1-a". + // ZoneUri: Deprecated The zone where the Google Compute Engine cluster + // will be located. Example: "compute.googleapis.com/projects/project_id + // /zones/us-east1-a". ZoneUri string `json:"zoneUri,omitempty"` // ForceSendFields is a list of field names (e.g. "ImageUri") to @@ -706,28 +736,28 @@ func (s *GceConfiguration) MarshalJSON() ([]byte, error) { // HadoopJob: A Cloud Dataproc job for running Hadoop MapReduce jobs on // YARN. type HadoopJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include + // Args: Optional The arguments to pass to the driver. Do not include // arguments, such as -libjars or -Dfoo=bar, that can be set as job // properties, since a collision may occur that causes an incorrect job // submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Hadoop drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] Jar file URIs to add to the CLASSPATHs of the + // JarFileUris: Optional Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` // MainClass: The name of the driver's main class. The jar file @@ -742,7 +772,7 @@ type HadoopJob struct { // file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Hadoop. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site and classes in user code. @@ -773,19 +803,19 @@ func (s *HadoopJob) MarshalJSON() ([]byte, error) { // HiveJob: A Cloud Dataproc job for running Hive queries on YARN. type HiveJob struct { - // ContinueOnFailure: [Optional] Whether to continue executing queries - // if a query fails. The default value is false. Setting to true can be + // ContinueOnFailure: Optional Whether to continue executing queries if + // a query fails. The default value is false. Setting to true can be // useful when executing independent parallel queries. ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can - // contain Hive SerDes and UDFs. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH + // of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive + // SerDes and UDFs. JarFileUris []string `json:"jarFileUris,omitempty"` - // Properties: [Optional] A mapping of property names and values, used - // to configure Hive. Properties that conflict with values set by the - // Cloud Dataproc API may be overwritten. Can include properties set in + // Properties: Optional A mapping of property names and values, used to + // configure Hive. Properties that conflict with values set by the Cloud + // Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and // classes in user code. Properties map[string]string `json:"properties,omitempty"` @@ -796,7 +826,7 @@ type HiveJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values + // ScriptVariables: Optional Mapping of query variable names to values // (equivalent to the Hive command: 'SET name="value";'). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` @@ -831,7 +861,7 @@ type InstanceGroupConfiguration struct { // DiskConfiguration: Disk option configuration settings. DiskConfiguration *DiskConfiguration `json:"diskConfiguration,omitempty"` - // ImageUri: [Output-only] The Google Compute Engine image resource used + // ImageUri: Output-only The Google Compute Engine image resource used // for cluster instances. Inferred from // SoftwareConfiguration.image_version. Example: // "compute.googleapis.com/projects/debian-cloud @@ -849,11 +879,11 @@ type InstanceGroupConfiguration struct { // MachineTypeUri: The Google Compute Engine machine type used for // cluster instances. Example: - // "compute.googleapis.com/projects/[project_id] + // "compute.googleapis.com/projects/project_id // /zones/us-east1-a/machineTypes/n1-standard-2". MachineTypeUri string `json:"machineTypeUri,omitempty"` - // ManagedGroupConfiguration: [Output-only] The configuration for Google + // ManagedGroupConfiguration: Output-only The configuration for Google // Compute Engine Instance Group Manager that manages this group. This // is only used for preemptible instance groups. ManagedGroupConfiguration *ManagedGroupConfiguration `json:"managedGroupConfiguration,omitempty"` @@ -888,24 +918,24 @@ func (s *InstanceGroupConfiguration) MarshalJSON() ([]byte, error) { // Job: A Cloud Dataproc job resource. type Job struct { - // DriverControlFilesUri: [Output-only] If present, the location of + // DriverControlFilesUri: Output-only If present, the location of // miscellaneous control files which may be used as part of job setup // and handling. If not present, control files may be placed in the same // location as driver_output_uri. DriverControlFilesUri string `json:"driverControlFilesUri,omitempty"` - // DriverInputResourceUri: [Output-only] A URI pointing to the location - // of the stdin of the job's driver program, only set if the job is + // DriverInputResourceUri: Output-only A URI pointing to the location of + // the stdin of the job's driver program, only set if the job is // interactive. DriverInputResourceUri string `json:"driverInputResourceUri,omitempty"` - // DriverOutputResourceUri: [Output-only] A URI pointing to the location + // DriverOutputResourceUri: Output-only A URI pointing to the location // of the stdout of the job's driver program. DriverOutputResourceUri string `json:"driverOutputResourceUri,omitempty"` - // DriverOutputUri: [Output-only] A URI pointing to the location of the - // mixed stdout/stderr of the job's driver program—for example, - // gs://sysbucket123/foo-cluster/jobid-123/driver/output. + // DriverOutputUri: Output-only A URI pointing to the location of the + // mixed stdout/stderr of the job's driver program—for example, + // gs://sysbucket123/foo-cluster/jobid-123/driver/output. DriverOutputUri string `json:"driverOutputUri,omitempty"` // HadoopJob: Job is a Hadoop job. @@ -914,55 +944,58 @@ type Job struct { // HiveJob: Job is a Hive job. HiveJob *HiveJob `json:"hiveJob,omitempty"` - // Interactive: [Optional] If set to true, then the driver's stdin will - // be kept open and driver_input_uri will be set to provide a path at - // which additional input can be sent to the driver. + // Interactive: Optional If set to true, then the driver's stdin will be + // kept open and driver_input_uri will be set to provide a path at which + // additional input can be sent to the driver. Interactive bool `json:"interactive,omitempty"` - // Labels: [Optional] The labels to associate with this job. Label keys + // Labels: Optional The labels to associate with this job.Label keys // must be between 1 and 63 characters long, and must conform to the - // following regular expression: \p{Ll}\p{Lo}{0,62} Label values must be + // following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be // between 1 and 63 characters long, and must conform to the following - // regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 - // labels can be associated with a given job. + // regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels + // can be associated with a given job. Labels map[string]string `json:"labels,omitempty"` // PigJob: Job is a Pig job. PigJob *PigJob `json:"pigJob,omitempty"` - // Placement: [Required] Job information, including how, when, and where + // Placement: Required Job information, including how, when, and where // to run the job. Placement *JobPlacement `json:"placement,omitempty"` // PysparkJob: Job is a Pyspark job. PysparkJob *PySparkJob `json:"pysparkJob,omitempty"` - // Reference: [Optional] The fully-qualified reference to the job, which + // Reference: Optional The fully-qualified reference to the job, which // can be used to obtain the equivalent REST path of the job resource. // If this property is not specified when a job is created, the server - // generates a job_id. + // generates a job_id. Reference *JobReference `json:"reference,omitempty"` + // Scheduling: Optional Job scheduling configuration. + Scheduling *JobScheduling `json:"scheduling,omitempty"` + // SparkJob: Job is a Spark job. SparkJob *SparkJob `json:"sparkJob,omitempty"` // SparkSqlJob: Job is a SparkSql job. SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` - // Status: [Output-only] The job status. Additional application-specific - // status information may be contained in the type_job and - // yarn_applications fields. + // Status: Output-only The job status. Additional application-specific + // status information may be contained in the type_job and + // yarn_applications fields. Status *JobStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] The previous job status. + // StatusHistory: Output-only The previous job status. StatusHistory []*JobStatus `json:"statusHistory,omitempty"` - // SubmittedBy: [Output-only] The email address of the user submitting - // the job. For jobs submitted on the cluster, the address is - // username@hostname. + // SubmittedBy: Output-only The email address of the user submitting the + // job. For jobs submitted on the cluster, the address is + // username@hostname. SubmittedBy string `json:"submittedBy,omitempty"` - // YarnApplications: [Output-only] The collection of Yarn applications + // YarnApplications: Output-only The collection of Yarn applications // spun up by this job. YarnApplications []*YarnApplication `json:"yarnApplications,omitempty"` @@ -997,11 +1030,11 @@ func (s *Job) MarshalJSON() ([]byte, error) { // JobPlacement: Cloud Dataproc job configuration. type JobPlacement struct { - // ClusterName: [Required] The name of the cluster where the job will be + // ClusterName: Required The name of the cluster where the job will be // submitted. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] A cluster UUID generated by the Dataproc + // ClusterUuid: Output-only A cluster UUID generated by the Dataproc // service when the job is submitted. ClusterUuid string `json:"clusterUuid,omitempty"` @@ -1030,16 +1063,15 @@ func (s *JobPlacement) MarshalJSON() ([]byte, error) { // JobReference: Encapsulates the full scoping used to reference a job. type JobReference struct { - // JobId: [Required] The job ID, which must be unique within the - // project. The job ID is generated by the server upon job submission or - // provided by the user as a means to perform retries without creating - // duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers - // (0-9), underscores (_), or dashes (-). The maximum length is 512 - // characters. + // JobId: Required The job ID, which must be unique within the project. + // The job ID is generated by the server upon job submission or provided + // by the user as a means to perform retries without creating duplicate + // jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). The maximum length is 100 characters. JobId string `json:"jobId,omitempty"` - // ProjectId: [Required] The ID of the Google Cloud Platform project - // that the job belongs to. + // ProjectId: Required The ID of the Google Cloud Platform project that + // the job belongs to. ProjectId string `json:"projectId,omitempty"` // ForceSendFields is a list of field names (e.g. "JobId") to @@ -1065,10 +1097,45 @@ func (s *JobReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// JobScheduling: Job scheduling options.Beta Feature: These options are +// available for testing purposes only. They may be changed before final +// release. +type JobScheduling struct { + // MaxFailuresPerHour: Optional Maximum number of times per hour a + // driver may be restarted as a result of driver terminating with + // non-zero code before job is reported failed.A job may be reported as + // thrashing if driver exits with non-zero code 4 times within 10 minute + // window.Maximum value is 10. + MaxFailuresPerHour int64 `json:"maxFailuresPerHour,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxFailuresPerHour") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxFailuresPerHour") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobScheduling) MarshalJSON() ([]byte, error) { + type noMethod JobScheduling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // JobStatus: Cloud Dataproc job status. type JobStatus struct { - // Details: [Optional] Job state details, such as an error description - // if the state is ERROR. + // Details: Optional Job state details, such as an error description if + // the state is ERROR. Details string `json:"details,omitempty"` // EndTime: The time when the job completed. @@ -1080,21 +1147,29 @@ type JobStatus struct { // StartTime: The time when the server started the job. StartTime string `json:"startTime,omitempty"` - // State: [Required] A state message specifying the overall job state. + // State: Required A state message specifying the overall job state. // // Possible values: - // "STATE_UNSPECIFIED" - // "PENDING" - // "SETUP_DONE" - // "RUNNING" - // "CANCEL_PENDING" - // "CANCEL_STARTED" - // "CANCELLED" - // "DONE" - // "ERROR" + // "STATE_UNSPECIFIED" - The job state is unknown. + // "PENDING" - The job is pending; it has been submitted, but is not + // yet running. + // "SETUP_DONE" - Job has been received by the service and completed + // initial setup; it will shortly be submitted to the cluster. + // "RUNNING" - The job is running on the cluster. + // "CANCEL_PENDING" - A CancelJob request has been received, but is + // pending. + // "CANCEL_STARTED" - Transient in-flight resources have been + // canceled, and the request to cancel the running job has been issued + // to the cluster. + // "CANCELLED" - The job cancelation was successful. + // "DONE" - The job has completed successfully. + // "ERROR" - The job has completed, but encountered an error. + // "ATTEMPT_FAILURE" - Job attempt has failed. The detail field + // contains failure details for this attempt.Applies to restartable jobs + // only. State string `json:"state,omitempty"` - // StateStartTime: [Output-only] The time when this state was entered. + // StateStartTime: Output-only The time when this state was entered. StateStartTime string `json:"stateStartTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Details") to @@ -1122,7 +1197,7 @@ func (s *JobStatus) MarshalJSON() ([]byte, error) { // ListClustersResponse: The list of all clusters in a project. type ListClustersResponse struct { - // Clusters: [Output-only] The clusters in the project. + // Clusters: Output-only The clusters in the project. Clusters []*Cluster `json:"clusters,omitempty"` // NextPageToken: The standard List next-page token. @@ -1157,29 +1232,30 @@ func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { // ListJobsRequest: A request to list jobs in a project. type ListJobsRequest struct { - // ClusterName: [Optional] If set, the returned jobs list includes only + // ClusterName: Optional If set, the returned jobs list includes only // jobs that were submitted to the named cluster. ClusterName string `json:"clusterName,omitempty"` - // Filter: [Optional] A filter constraining which jobs to list. Valid + // Filter: Optional A filter constraining which jobs to list. Valid // filters contain job state and label terms such as: labels.key1 = val1 // AND (labels.k2 = val2 OR labels.k3 = val3) Filter string `json:"filter,omitempty"` - // JobStateMatcher: [Optional] Specifies enumerated categories of jobs - // to list. + // JobStateMatcher: Optional Specifies enumerated categories of jobs to + // list. // // Possible values: - // "ALL" - // "ACTIVE" - // "NON_ACTIVE" + // "ALL" - Match all jobs, regardless of state. + // "ACTIVE" - Only match jobs in non-terminal states: PENDING, + // RUNNING, CANCEL_PENDING + // "NON_ACTIVE" - Only match jobs in terminal states: CANCELLED, DONE, + // ERROR JobStateMatcher string `json:"jobStateMatcher,omitempty"` - // PageSize: [Optional] The number of results to return in each - // response. + // PageSize: Optional The number of results to return in each response. PageSize int64 `json:"pageSize,omitempty"` - // PageToken: [Optional] The page token, returned by a previous call, to + // PageToken: Optional The page token, returned by a previous call, to // request the next page of results. PageToken string `json:"pageToken,omitempty"` @@ -1208,14 +1284,19 @@ func (s *ListJobsRequest) MarshalJSON() ([]byte, error) { // ListJobsResponse: A response to a request to list jobs in a project. type ListJobsResponse struct { - // Jobs: [Output-only] Jobs list. + // Jobs: Output-only Jobs list. Jobs []*Job `json:"jobs,omitempty"` - // NextPageToken: [Optional] This token is included in the response if + // NextPageToken: Optional This token is included in the response if // there are more results to fetch. To fetch additional results, provide - // this value as the page_token in a subsequent ListJobsRequest. + // this value as the page_token in a subsequent + // ListJobsRequest. NextPageToken string `json:"nextPageToken,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Jobs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1310,12 +1391,12 @@ func (s *LoggingConfiguration) MarshalJSON() ([]byte, error) { // ManagedGroupConfiguration: Specifies the resources used to actively // manage an instance group. type ManagedGroupConfiguration struct { - // InstanceGroupManagerName: [Output-only] The name of Instance Group + // InstanceGroupManagerName: Output-only The name of Instance Group // Manager managing this group. InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"` - // InstanceTemplateName: [Output-only] The name of Instance Template - // used for Managed Instance Group. + // InstanceTemplateName: Output-only The name of Instance Template used + // for Managed Instance Group. InstanceTemplateName string `json:"instanceTemplateName,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1346,15 +1427,14 @@ func (s *ManagedGroupConfiguration) MarshalJSON() ([]byte, error) { // NodeInitializationAction: Specifies an executable to run on a fully // configured node and a timeout period for executable completion. type NodeInitializationAction struct { - // ExecutableFile: [Required] Google Cloud Storage URI of executable - // file. + // ExecutableFile: Required Google Cloud Storage URI of executable file. ExecutableFile string `json:"executableFile,omitempty"` - // ExecutionTimeout: [Optional] Amount of time executable has to - // complete. Default is 10 minutes. Cluster creation fails with an - // explanatory error message (the name of the executable that caused the - // error and the exceeded timeout period) if the executable is not - // completed at end of the timeout period. + // ExecutionTimeout: Optional Amount of time executable has to complete. + // Default is 10 minutes. Cluster creation fails with an explanatory + // error message (the name of the executable that caused the error and + // the exceeded timeout period) if the executable is not completed at + // end of the timeout period. ExecutionTimeout string `json:"executionTimeout,omitempty"` // ForceSendFields is a list of field names (e.g. "ExecutableFile") to @@ -1387,7 +1467,7 @@ func (s *NodeInitializationAction) MarshalJSON() ([]byte, error) { // a cluster, and deleting a cluster. type Operation struct { // Done: Indicates if the operation is done. If true, the operation is - // complete and the `result` is available. If false, the operation is + // complete and the result is available. If false, the operation is // still in progress. Done bool `json:"done,omitempty"` @@ -1398,15 +1478,15 @@ type Operation struct { Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The name of the operation resource, in the format - // projects/[project_id]/operations/[operation_id] + // projects/project_id/operations/operation_id Name string `json:"name,omitempty"` // Response: The operation response. If the called method returns no - // data on success, the response is `google.protobuf.Empty`. If the - // called method is `Get`,`Create` or `Update`, the response is the - // resource. For all other methods, the response type is a concatenation - // of the method name and "Response". For example, if the called method - // is `TakeSnapshot()`, the response type is `TakeSnapshotResponse`. + // data on success, the response is google.protobuf.Empty. If the called + // method is Get,Create or Update, the response is the resource. For all + // other methods, the response type is a concatenation of the method + // name and "Response". For example, if the called method is + // TakeSnapshot(), the response type is TakeSnapshotResponse. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1444,7 +1524,7 @@ type OperationMetadata struct { // ClusterUuid: Cluster UUId for the operation. ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: [Output-only] Short description of operation. + // Description: Output-only Short description of operation. Description string `json:"description,omitempty"` // Details: A message containing any operation metadata details. @@ -1459,7 +1539,7 @@ type OperationMetadata struct { // InsertTime: The time that the operation was requested. InsertTime string `json:"insertTime,omitempty"` - // OperationType: [Output-only] The operation type. + // OperationType: Output-only The operation type. OperationType string `json:"operationType,omitempty"` // StartTime: The time that the operation was started by the server. @@ -1468,18 +1548,21 @@ type OperationMetadata struct { // State: A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is currently running. + // "DONE" - The operation is done, either cancelled or completed. State string `json:"state,omitempty"` - // Status: [Output-only] Current operation status. + // Status: Output-only Current operation status. Status *OperationStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] Previous operation status. + // StatusHistory: Output-only Previous operation status. StatusHistory []*OperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output-only Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1514,10 +1597,10 @@ type OperationStatus struct { // State: A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. State string `json:"state,omitempty"` // StateStartTime: The time this state was entered. @@ -1548,21 +1631,21 @@ func (s *OperationStatus) MarshalJSON() ([]byte, error) { // PigJob: A Cloud Dataproc job for running Pig queries on YARN. type PigJob struct { - // ContinueOnFailure: [Optional] Whether to continue executing queries - // if a query fails. The default value is false. Setting to true can be + // ContinueOnFailure: Optional Whether to continue executing queries if + // a query fails. The default value is false. Setting to true can be // useful when executing independent parallel queries. ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can - // contain Pig UDFs. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH + // of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig + // UDFs. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Pig. Properties that conflict with values set by the Cloud // Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and @@ -1576,8 +1659,8 @@ type PigJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values - // (equivalent to the Pig command: "name=[value]"). + // ScriptVariables: Optional Mapping of query variable names to values + // (equivalent to the Pig command: "name=value"). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") @@ -1607,39 +1690,39 @@ func (s *PigJob) MarshalJSON() ([]byte, error) { // PySparkJob: A Cloud Dataproc job for running PySpark applications on // YARN. type PySparkJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include + // Args: Optional The arguments to pass to the driver. Do not include // arguments, such as --conf, that can be set as job properties, since a // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Python drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATHs of the Python driver and tasks. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs + // of the Python driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` - // MainPythonFileUri: [Required] The Hadoop Compatible Filesystem (HCFS) + // MainPythonFileUri: Required The Hadoop Compatible Filesystem (HCFS) // URI of the main Python file to use as the driver. Must be a .py file. MainPythonFileUri string `json:"mainPythonFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure PySpark. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `json:"properties,omitempty"` - // PythonFileUris: [Optional] HCFS file URIs of Python files to pass to + // PythonFileUris: Optional HCFS file URIs of Python files to pass to // the PySpark framework. Supported file types: .py, .egg, and .zip. PythonFileUris []string `json:"pythonFileUris,omitempty"` @@ -1668,12 +1751,21 @@ func (s *PySparkJob) MarshalJSON() ([]byte, error) { // QueryList: A list of queries to run on a cluster. type QueryList struct { - // Queries: [Required] The queries to execute. You do not need to + // Queries: Required The queries to execute. You do not need to // terminate a query with a semicolon. Multiple queries can be specified // in one string by separating each with a semicolon. Here is an example // of an Cloud Dataproc API snippet that uses a QueryList to specify a - // HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", - // "query3;query4", ] } } + // HiveJob: + // "hiveJob": { + // "queryList": { + // "queries": [ + // "query1", + // "query2", + // "query3;query4", + // ] + // } + // } + // Queries []string `json:"queries,omitempty"` // ForceSendFields is a list of field names (e.g. "Queries") to @@ -1702,17 +1794,17 @@ func (s *QueryList) MarshalJSON() ([]byte, error) { // SoftwareConfiguration: Specifies the selection and configuration of // software inside the cluster. type SoftwareConfiguration struct { - // ImageVersion: [Optional] The version of software inside the cluster. - // It must match the regular expression [0-9]+\.[0-9]+. If unspecified - // it will default to latest version. + // ImageVersion: Optional The version of software inside the cluster. It + // must match the regular expression 0-9+.0-9+. If unspecified it will + // default to latest version. ImageVersion string `json:"imageVersion,omitempty"` - // Properties: [Optional] The properties to set on daemon configuration - // files. Property keys are specified in "prefix:property" format, such + // Properties: Optional The properties to set on daemon configuration + // files.Property keys are specified in "prefix:property" format, such // as "core:fs.defaultFS". The following are supported prefixes and - // their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - - // mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - - // pig.properties spark - spark-defaults.conf + // their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - + // mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - + // pig.properties spark - spark-defaults.conf Properties map[string]string `json:"properties,omitempty"` // ForceSendFields is a list of field names (e.g. "ImageVersion") to @@ -1741,27 +1833,27 @@ func (s *SoftwareConfiguration) MarshalJSON() ([]byte, error) { // SparkJob: A Cloud Dataproc job for running Spark applications on // YARN. type SparkJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include + // Args: Optional The arguments to pass to the driver. Do not include // arguments, such as --conf, that can be set as job properties, since a // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Spark drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATHs of the Spark driver and tasks. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs + // of the Spark driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` // MainClass: The name of the driver's main class. The jar file that @@ -1773,7 +1865,7 @@ type SparkJob struct { // jar file that contains the main class. MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Spark. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. @@ -1804,15 +1896,15 @@ func (s *SparkJob) MarshalJSON() ([]byte, error) { // SparkSqlJob: A Cloud Dataproc job for running Spark SQL queries. type SparkSqlJob struct { - // JarFileUris: [Optional] HCFS URIs of jar files to be added to the - // Spark CLASSPATH. + // JarFileUris: Optional HCFS URIs of jar files to be added to the Spark + // CLASSPATH. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Spark SQL's SparkConf. Properties that conflict with values // set by the Cloud Dataproc API may be overwritten. Properties map[string]string `json:"properties,omitempty"` @@ -1823,7 +1915,7 @@ type SparkSqlJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values + // ScriptVariables: Optional Mapping of query variable names to values // (equivalent to the Spark SQL command: SET name="value";). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` @@ -1850,42 +1942,45 @@ func (s *SparkSqlJob) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Status: The `Status` type defines a logical error model that is +// Status: The Status type defines a logical error model that is // suitable for different programming environments, including REST APIs -// and RPC APIs. It is used by [gRPC](https://github.com/grpc). The -// error model is designed to be: - Simple to use and understand for -// most users - Flexible enough to meet unexpected needs # Overview The -// `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// google.rpc.Code, but it may accept additional error codes if needed. -// The error message should be a developer-facing English message that -// helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the -// error details or localize it in the client. The optional error -// details may contain arbitrary information about the error. There is a -// predefined set of error detail types in the package `google.rpc` -// which can be used for common error conditions. # Language mapping The -// `Status` message is the logical representation of the error model, -// but it is not necessarily the actual wire format. When the `Status` -// message is exposed in different client libraries and different wire -// protocols, it can be mapped differently. For example, it will likely -// be mapped to some exceptions in Java, but more likely mapped to some -// error codes in C. # Other uses The error model and the `Status` -// message can be used in a variety of environments, either with or -// without APIs, to provide a consistent developer experience across -// different environments. Example uses of this error model include: - +// and RPC APIs. It is used by gRPC (https://github.com/grpc). The error +// model is designed to be: +// Simple to use and understand for most users +// Flexible enough to meet unexpected needsOverviewThe Status message +// contains three pieces of data: error code, error message, and error +// details. The error code should be an enum value of google.rpc.Code, +// but it may accept additional error codes if needed. The error message +// should be a developer-facing English message that helps developers +// understand and resolve the error. If a localized user-facing error +// message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of +// error detail types in the package google.rpc which can be used for +// common error conditions.Language mappingThe Status message is the +// logical representation of the error model, but it is not necessarily +// the actual wire format. When the Status message is exposed in +// different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions in Java, but more likely mapped to some error codes in +// C.Other usesThe error model and the Status message can be used in a +// variety of environments, either with or without APIs, to provide a +// consistent developer experience across different environments.Example +// uses of this error model include: // Partial errors. If a service needs to return partial errors to the -// client, it may embed the `Status` in the normal response to indicate -// the partial errors. - Workflow errors. A typical workflow has -// multiple steps. Each step may have a `Status` message for error -// reporting purpose. - Batch operations. If a client uses batch request -// and batch response, the `Status` message should be used directly -// inside batch response, one for each error sub-response. - +// client, it may embed the Status in the normal response to indicate +// the partial errors. +// Workflow errors. A typical workflow has multiple steps. Each step may +// have a Status message for error reporting purpose. +// Batch operations. If a client uses batch request and batch response, +// the Status message should be used directly inside batch response, one +// for each error sub-response. // Asynchronous operations. If an API call embeds asynchronous operation // results in its response, the status of those operations should be -// represented directly using the `Status` message. - Logging. If some -// API errors are stored in logs, the message `Status` could be used -// directly after any stripping needed for security/privacy reasons. +// represented directly using the Status message. +// Logging. If some API errors are stored in logs, the message Status +// could be used directly after any stripping needed for +// security/privacy reasons. type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. @@ -1925,7 +2020,7 @@ func (s *Status) MarshalJSON() ([]byte, error) { // SubmitJobRequest: A job submission request. type SubmitJobRequest struct { - // Job: [Required] The job resource. + // Job: Required The job resource. Job *Job `json:"job,omitempty"` // ForceSendFields is a list of field names (e.g. "Job") to @@ -1953,30 +2048,31 @@ func (s *SubmitJobRequest) MarshalJSON() ([]byte, error) { // YarnApplication: A YARN application created by a job. Application // information is a subset of -// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. type YarnApplication struct { - // Name: [Required] The application name. + // Name: Required The application name. Name string `json:"name,omitempty"` - // Progress: [Required] The numerical progress of the application, from - // 1 to 100. + // Progress: Required The numerical progress of the application, from 1 + // to 100. Progress float64 `json:"progress,omitempty"` - // State: [Required] The application state. + // State: Required The application state. // // Possible values: - // "STATE_UNSPECIFIED" - // "NEW" - // "NEW_SAVING" - // "SUBMITTED" - // "ACCEPTED" - // "RUNNING" - // "FINISHED" - // "FAILED" - // "KILLED" + // "STATE_UNSPECIFIED" - Status is unspecified. + // "NEW" - Status is NEW. + // "NEW_SAVING" - Status is NEW_SAVING. + // "SUBMITTED" - Status is SUBMITTED. + // "ACCEPTED" - Status is ACCEPTED. + // "RUNNING" - Status is RUNNING. + // "FINISHED" - Status is FINISHED. + // "FAILED" - Status is FAILED. + // "KILLED" - Status is KILLED. State string `json:"state,omitempty"` - // TrackingUrl: [Optional] The HTTP URL of the ApplicationMaster, + // TrackingUrl: Optional The HTTP URL of the ApplicationMaster, // HistoryServer, or TimelineServer that provides application-specific // information. The URL uses the internal hostname, and requires a proxy // server for resolution and, possibly, access. @@ -2033,8 +2129,8 @@ type OperationsCancelCall struct { // Cancel: Starts asynchronous cancellation on a long-running operation. // The server makes a best effort to cancel the operation, but success // is not guaranteed. If the server doesn't support this method, it -// returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use -// [Operations.GetOperation] or other methods to check whether the +// returns google.rpc.Code.UNIMPLEMENTED. Clients may use +// Operations.GetOperation or other methods to check whether the // cancellation succeeded or the operation completed despite // cancellation. func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { @@ -2075,6 +2171,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { @@ -2130,7 +2227,8 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients may use Operations.GetOperation or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.", + // "flatPath": "v1alpha1/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "dataproc.operations.cancel", // "parameterOrder": [ @@ -2209,6 +2307,7 @@ func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") @@ -2260,6 +2359,7 @@ func (c *OperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) return ret, nil // { // "description": "Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.", + // "flatPath": "v1alpha1/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "dataproc.operations.delete", // "parameterOrder": [ @@ -2346,6 +2446,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2400,6 +2501,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) return ret, nil // { // "description": "Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1alpha1/operations/{operationsId}", // "httpMethod": "GET", // "id": "dataproc.operations.get", // "parameterOrder": [ @@ -2438,21 +2540,21 @@ type OperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `google.rpc.Code.UNIMPLEMENTED`. +// google.rpc.Code.UNIMPLEMENTED. func (r *OperationsService) List(name string) *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// Filter sets the optional parameter "filter": [Required] A JSON object +// Filter sets the optional parameter "filter": Required A JSON object // that contains filters for the list operation, in the format // {"key1":"value1","key2":"value2", ..., }. Possible keys include -// project_id, cluster_name, and operation_state_matcher. If project_id +// project_id, cluster_name, and operation_state_matcher.If project_id // is set, requests the list of operations that belong to the specified -// Google Cloud Platform project ID. This key is required. If +// Google Cloud Platform project ID. This key is required.If // cluster_name is set, requests the list of operations that were -// submitted to the specified cluster name. This key is optional. If +// submitted to the specified cluster name. This key is optional.If // operation_state_matcher is set, requests the list of operations that // match one of the following status options: ALL, ACTIVE, or // NON_ACTIVE. @@ -2516,6 +2618,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2569,7 +2672,8 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", + // "flatPath": "v1alpha1/operations", // "httpMethod": "GET", // "id": "dataproc.operations.list", // "parameterOrder": [ @@ -2577,7 +2681,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe // ], // "parameters": { // "filter": { - // "description": "[Required] A JSON object that contains filters for the list operation, in the format {\"key1\":\"value1\",\"key2\":\"value2\", ..., }. Possible keys include project_id, cluster_name, and operation_state_matcher. If project_id is set, requests the list of operations that belong to the specified Google Cloud Platform project ID. This key is required. If cluster_name is set, requests the list of operations that were submitted to the specified cluster name. This key is optional. If operation_state_matcher is set, requests the list of operations that match one of the following status options: ALL, ACTIVE, or NON_ACTIVE.", + // "description": "Required A JSON object that contains filters for the list operation, in the format {\"key1\":\"value1\",\"key2\":\"value2\", ..., }. Possible keys include project_id, cluster_name, and operation_state_matcher.If project_id is set, requests the list of operations that belong to the specified Google Cloud Platform project ID. This key is required.If cluster_name is set, requests the list of operations that were submitted to the specified cluster name. This key is optional.If operation_state_matcher is set, requests the list of operations that match one of the following status options: ALL, ACTIVE, or NON_ACTIVE.", // "location": "query", // "type": "string" // }, @@ -2684,6 +2788,7 @@ func (c *ProjectsRegionsClustersCreateCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cluster) if err != nil { @@ -2741,6 +2846,7 @@ func (c *ProjectsRegionsClustersCreateCall) Do(opts ...googleapi.CallOption) (*O return ret, nil // { // "description": "Request to create a cluster in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters", // "httpMethod": "POST", // "id": "dataproc.projects.regions.clusters.create", // "parameterOrder": [ @@ -2749,13 +2855,13 @@ func (c *ProjectsRegionsClustersCreateCall) Do(opts ...googleapi.CallOption) (*O // ], // "parameters": { // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -2827,6 +2933,7 @@ func (c *ProjectsRegionsClustersDeleteCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}") @@ -2880,6 +2987,7 @@ func (c *ProjectsRegionsClustersDeleteCall) Do(opts ...googleapi.CallOption) (*O return ret, nil // { // "description": "Request to delete a cluster in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", // "httpMethod": "DELETE", // "id": "dataproc.projects.regions.clusters.delete", // "parameterOrder": [ @@ -2889,19 +2997,19 @@ func (c *ProjectsRegionsClustersDeleteCall) Do(opts ...googleapi.CallOption) (*O // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -2982,6 +3090,7 @@ func (c *ProjectsRegionsClustersGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3038,6 +3147,7 @@ func (c *ProjectsRegionsClustersGetCall) Do(opts ...googleapi.CallOption) (*Clus return ret, nil // { // "description": "Request to get the resource representation for a cluster in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", // "httpMethod": "GET", // "id": "dataproc.projects.regions.clusters.get", // "parameterOrder": [ @@ -3047,19 +3157,19 @@ func (c *ProjectsRegionsClustersGetCall) Do(opts ...googleapi.CallOption) (*Clus // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3096,7 +3206,7 @@ func (r *ProjectsRegionsClustersService) List(projectId string, region string) * return c } -// Filter sets the optional parameter "filter": [Optional] A filter +// Filter sets the optional parameter "filter": Optional A filter // constraining which clusters to list. Valid filters contain label // terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 // = val3) @@ -3160,6 +3270,7 @@ func (c *ProjectsRegionsClustersListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3215,6 +3326,7 @@ func (c *ProjectsRegionsClustersListCall) Do(opts ...googleapi.CallOption) (*Lis return ret, nil // { // "description": "Request a list of all regions/{region}/clusters in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters", // "httpMethod": "GET", // "id": "dataproc.projects.regions.clusters.list", // "parameterOrder": [ @@ -3223,7 +3335,7 @@ func (c *ProjectsRegionsClustersListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "filter": { - // "description": "[Optional] A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", + // "description": "Optional A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", // "location": "query", // "type": "string" // }, @@ -3239,13 +3351,13 @@ func (c *ProjectsRegionsClustersListCall) Do(opts ...googleapi.CallOption) (*Lis // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3306,15 +3418,22 @@ func (r *ProjectsRegionsClustersService) Patch(projectId string, region string, return c } -// UpdateMask sets the optional parameter "updateMask": [Required] -// Specifies the path, relative to Cluster, of the field to update. For -// example, to change the number of workers in a cluster to 5, the -// update_mask parameter would be specified as -// "configuration.worker_configuration.num_instances," and the PATCH -// request body would specify the new value, as follows: { -// "configuration":{ "workerConfiguration":{ "numInstances":"5" } } } -// Note: Currently, configuration.worker_configuration.num_instances is -// the only field that can be updated. +// UpdateMask sets the optional parameter "updateMask": Required +// Specifies the path, relative to Cluster, of the field to +// update. For example, to change the number of workers in a cluster to +// 5, the update_mask parameter would be specified as +// "configuration.worker_configuration.num_instances," and +// the PATCH request body would specify the new value, as follows: +// { +// "configuration":{ +// "workerConfiguration":{ +// "numInstances":"5" +// } +// } +// } +// Note: Currently, +// configuration.worker_configuration.num_instances is the +// only field that can be updated. func (c *ProjectsRegionsClustersPatchCall) UpdateMask(updateMask string) *ProjectsRegionsClustersPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -3351,6 +3470,7 @@ func (c *ProjectsRegionsClustersPatchCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cluster) if err != nil { @@ -3409,6 +3529,7 @@ func (c *ProjectsRegionsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Op return ret, nil // { // "description": "Request to update a cluster in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/clusters/{clusterName}", // "httpMethod": "PATCH", // "id": "dataproc.projects.regions.clusters.patch", // "parameterOrder": [ @@ -3418,25 +3539,26 @@ func (c *ProjectsRegionsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Op // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "[Required] Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as \"configuration.worker_configuration.num_instances,\" and the PATCH request body would specify the new value, as follows: { \"configuration\":{ \"workerConfiguration\":{ \"numInstances\":\"5\" } } } Note: Currently, configuration.worker_configuration.num_instances is the only field that can be updated.", + // "description": "Required Specifies the path, relative to \u003ccode\u003eCluster\u003c/code\u003e, of the field to update. For example, to change the number of workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003e\"configuration.worker_configuration.num_instances,\"\u003c/code\u003e and the PATCH request body would specify the new value, as follows:\n{\n \"configuration\":{\n \"workerConfiguration\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003econfiguration.worker_configuration.num_instances\u003c/code\u003e is the only field that can be updated.", + // "format": "google-fieldmask", // "location": "query", // "type": "string" // } @@ -3511,6 +3633,7 @@ func (c *ProjectsRegionsJobsCancelCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceljobrequest) if err != nil { @@ -3569,6 +3692,7 @@ func (c *ProjectsRegionsJobsCancelCall) Do(opts ...googleapi.CallOption) (*Job, return ret, nil // { // "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs:list or regions/{region}/jobs:get.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", // "httpMethod": "POST", // "id": "dataproc.projects.regions.jobs.cancel", // "parameterOrder": [ @@ -3578,19 +3702,19 @@ func (c *ProjectsRegionsJobsCancelCall) Do(opts ...googleapi.CallOption) (*Job, // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3623,7 +3747,7 @@ type ProjectsRegionsJobsDeleteCall struct { } // Delete: Deletes the job from the project. If the job is active, the -// delete fails, and the response returns `FAILED_PRECONDITION`. +// delete fails, and the response returns FAILED_PRECONDITION. func (r *ProjectsRegionsJobsService) Delete(projectId string, region string, jobId string) *ProjectsRegionsJobsDeleteCall { c := &ProjectsRegionsJobsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -3663,6 +3787,7 @@ func (c *ProjectsRegionsJobsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}") @@ -3715,7 +3840,8 @@ func (c *ProjectsRegionsJobsDeleteCall) Do(opts ...googleapi.CallOption) (*Job, } return ret, nil // { - // "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", + // "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", // "httpMethod": "DELETE", // "id": "dataproc.projects.regions.jobs.delete", // "parameterOrder": [ @@ -3725,19 +3851,19 @@ func (c *ProjectsRegionsJobsDeleteCall) Do(opts ...googleapi.CallOption) (*Job, // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" @@ -3817,6 +3943,7 @@ func (c *ProjectsRegionsJobsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3873,6 +4000,7 @@ func (c *ProjectsRegionsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, err return ret, nil // { // "description": "Gets the resource representation for a job in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", // "httpMethod": "GET", // "id": "dataproc.projects.regions.jobs.get", // "parameterOrder": [ @@ -3882,25 +4010,364 @@ func (c *ProjectsRegionsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, err // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Required The Dataproc region in which to handle the request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + // "response": { + // "$ref": "Job" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dataproc.projects.regions.jobs.list": + +type ProjectsRegionsJobsListCall struct { + s *Service + projectId string + region string + listjobsrequest *ListJobsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// List: Lists regions/{region}/jobs in a project. +func (r *ProjectsRegionsJobsService) List(projectId string, region string, listjobsrequest *ListJobsRequest) *ProjectsRegionsJobsListCall { + c := &ProjectsRegionsJobsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.region = region + c.listjobsrequest = listjobsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsRegionsJobsListCall) Fields(s ...googleapi.Field) *ProjectsRegionsJobsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsRegionsJobsListCall) Context(ctx context.Context) *ProjectsRegionsJobsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsRegionsJobsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsRegionsJobsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.listjobsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/projects/{projectId}/regions/{region}/jobs:list") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.regions.jobs.list" call. +// Exactly one of *ListJobsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListJobsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsRegionsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListJobsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists regions/{region}/jobs in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs:list", + // "httpMethod": "POST", + // "id": "dataproc.projects.regions.jobs.list", + // "parameterOrder": [ + // "projectId", + // "region" + // ], + // "parameters": { + // "projectId": { + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Required The Dataproc region in which to handle the request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs:list", + // "request": { + // "$ref": "ListJobsRequest" + // }, + // "response": { + // "$ref": "ListJobsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsRegionsJobsListCall) Pages(ctx context.Context, f func(*ListJobsResponse) error) error { + c.ctx_ = ctx + defer func(pt string) { c.listjobsrequest.PageToken = pt }(c.listjobsrequest.PageToken) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.listjobsrequest.PageToken = x.NextPageToken + } +} + +// method id "dataproc.projects.regions.jobs.patch": + +type ProjectsRegionsJobsPatchCall struct { + s *Service + projectId string + region string + jobId string + job *Job + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a job in a project. +func (r *ProjectsRegionsJobsService) Patch(projectId string, region string, jobId string, job *Job) *ProjectsRegionsJobsPatchCall { + c := &ProjectsRegionsJobsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.region = region + c.jobId = jobId + c.job = job + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required +// Specifies the path, relative to Job, of the field to +// update. For example, to update the labels of a Job the +// update_mask parameter would be specified as +// labels, and the PATCH request body would specify the new +// value. Note: Currently, labels is the +// only field that can be updated. +func (c *ProjectsRegionsJobsPatchCall) UpdateMask(updateMask string) *ProjectsRegionsJobsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsRegionsJobsPatchCall) Fields(s ...googleapi.Field) *ProjectsRegionsJobsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsRegionsJobsPatchCall) Context(ctx context.Context) *ProjectsRegionsJobsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsRegionsJobsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsRegionsJobsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "region": c.region, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.regions.jobs.patch" call. +// Exactly one of *Job or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Job.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsRegionsJobsPatchCall) Do(opts ...googleapi.CallOption) (*Job, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Job{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a job in a project.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + // "httpMethod": "PATCH", + // "id": "dataproc.projects.regions.jobs.patch", + // "parameterOrder": [ + // "projectId", + // "region", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Required Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "v1alpha1/projects/{projectId}/regions/{region}/jobs/{jobId}", + // "request": { + // "$ref": "Job" + // }, // "response": { // "$ref": "Job" // }, @@ -3963,6 +4430,7 @@ func (c *ProjectsRegionsJobsSubmitCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.submitjobrequest) if err != nil { @@ -4020,6 +4488,7 @@ func (c *ProjectsRegionsJobsSubmitCall) Do(opts ...googleapi.CallOption) (*Job, return ret, nil // { // "description": "Submits a job to a cluster.", + // "flatPath": "v1alpha1/projects/{projectId}/regions/{region}/jobs:submit", // "httpMethod": "POST", // "id": "dataproc.projects.regions.jobs.submit", // "parameterOrder": [ @@ -4028,13 +4497,13 @@ func (c *ProjectsRegionsJobsSubmitCall) Do(opts ...googleapi.CallOption) (*Job, // ], // "parameters": { // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "region": { - // "description": "[Required] The Dataproc region in which to handle the request.", + // "description": "Required The Dataproc region in which to handle the request.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-api.json b/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-api.json index 907b0ee9d..bf980dddc 100644 --- a/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-api.json +++ b/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-api.json @@ -1,1736 +1,1857 @@ { - "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/cZQ-6vqHhMM0YELMHYWsiSzNXAM\"", - "discoveryVersion": "v1", - "id": "dataproc:v1beta1", - "name": "dataproc", - "version": "v1beta1", - "revision": "20161102", - "title": "Google Cloud Dataproc API", - "description": "An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/dataproc/", - "protocol": "rest", - "baseUrl": "https://dataproc.googleapis.com/", - "basePath": "", - "rootUrl": "https://dataproc.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" - }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" - }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" - }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "schemas": { - "Cluster": { - "id": "Cluster", - "type": "object", - "description": "Describes the identifying information, configuration, and status of a cluster of Google Compute Engine instances.", - "properties": { - "projectId": { - "type": "string", - "description": "[Required] The Google Cloud Platform project ID that the cluster belongs to." - }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name. Cluster names within a project must be unique. Names from deleted clusters can be reused." - }, - "configuration": { - "$ref": "ClusterConfiguration", - "description": "[Required] The cluster configuration. Note that Cloud Dataproc may set default values, and values may change when clusters are updated." - }, - "labels": { - "type": "object", - "description": "[Optional] The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given cluster.", - "additionalProperties": { - "type": "string" - } - }, - "status": { - "$ref": "ClusterStatus", - "description": "[Output-only] Cluster status." - }, - "statusHistory": { - "type": "array", - "description": "[Output-only] Previous cluster statuses.", - "items": { - "$ref": "ClusterStatus" - } - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster." - }, - "metrics": { - "$ref": "ClusterMetrics", - "description": "Contains cluster daemon metrics such as HDFS and YARN stats." - } - } - }, - "ClusterConfiguration": { - "id": "ClusterConfiguration", - "type": "object", - "description": "The cluster configuration.", - "properties": { - "configurationBucket": { - "type": "string", - "description": "[Optional] A Google Cloud Storage staging bucket used for sharing generated SSH keys and configuration. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, and then it will create and manage this project-level, per-location bucket for you." - }, - "gceClusterConfiguration": { - "$ref": "GceClusterConfiguration", - "description": "[Required] The shared Google Compute Engine configuration settings for all instances in a cluster." - }, - "masterConfiguration": { - "$ref": "InstanceGroupConfiguration", - "description": "[Optional] The Google Compute Engine configuration settings for the master instance in a cluster." - }, - "workerConfiguration": { - "$ref": "InstanceGroupConfiguration", - "description": "[Optional] The Google Compute Engine configuration settings for worker instances in a cluster." - }, - "secondaryWorkerConfiguration": { - "$ref": "InstanceGroupConfiguration", - "description": "[Optional] The Google Compute Engine configuration settings for additional worker instances in a cluster." - }, - "softwareConfiguration": { - "$ref": "SoftwareConfiguration", - "description": "[Optional] The configuration settings for software inside the cluster." - }, - "initializationActions": { - "type": "array", - "description": "[Optional] Commands to execute on each node after configuration is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below: ROLE=$(/usr/share/google/get_metadata_value attributes/role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", - "items": { - "$ref": "NodeInitializationAction" - } - } - } - }, - "GceClusterConfiguration": { - "id": "GceClusterConfiguration", - "type": "object", - "description": "Common configuration settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", - "properties": { - "zoneUri": { - "type": "string", - "description": "[Required] The zone where the Google Compute Engine cluster will be located. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`." - }, - "networkUri": { - "type": "string", - "description": "The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see https://cloud.google.com/compute/docs/subnetworks for more information). Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`." - }, - "subnetworkUri": { - "type": "string", - "description": "The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`." - }, - "internalIpOnly": { - "type": "boolean", - "description": "If true, all instances in the cluser will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses." - }, - "serviceAccountScopes": { - "type": "array", - "description": "The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included: - https://www.googleapis.com/auth/cloud.useraccounts.readonly - https://www.googleapis.com/auth/devstorage.read_write - https://www.googleapis.com/auth/logging.write If no scopes are specfied, the following defaults are also provided: - https://www.googleapis.com/auth/bigquery - https://www.googleapis.com/auth/bigtable.admin.table - https://www.googleapis.com/auth/bigtable.data - https://www.googleapis.com/auth/devstorage.full_control", - "items": { - "type": "string" - } - }, - "tags": { - "type": "array", - "description": "The Google Compute Engine tags to add to all instances.", - "items": { - "type": "string" - } - }, - "metadata": { - "type": "object", - "description": "The Google Compute Engine metadata entries to add to all instances.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "InstanceGroupConfiguration": { - "id": "InstanceGroupConfiguration", - "type": "object", - "description": "The configuration settings for Google Compute Engine resources in an instance group, such as a master or worker group.", - "properties": { - "numInstances": { - "type": "integer", - "description": "The number of VM instances in the instance group. For master instance groups, must be set to 1.", - "format": "int32" - }, - "instanceNames": { - "type": "array", - "description": "The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group if not set by user (recommended practice is to let Dataproc derive the name).", - "items": { - "type": "string" - } - }, - "imageUri": { - "type": "string", - "description": "[Output-only] The Google Compute Engine image resource used for cluster instances. Inferred from `SoftwareConfiguration.image_version`." - }, - "machineTypeUri": { - "type": "string", - "description": "The Google Compute Engine machine type used for cluster instances. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`." - }, - "diskConfiguration": { - "$ref": "DiskConfiguration", - "description": "Disk option configuration settings." - }, - "isPreemptible": { - "type": "boolean", - "description": "Specifies that this instance group contains Preemptible Instances." - }, - "managedGroupConfiguration": { - "$ref": "ManagedGroupConfiguration", - "description": "[Output-only] The configuration for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups." - } - } - }, - "DiskConfiguration": { - "id": "DiskConfiguration", - "type": "object", - "description": "Specifies the configuration of disk options for a group of VM instances.", - "properties": { - "bootDiskSizeGb": { - "type": "integer", - "description": "[Optional] Size in GB of the boot disk (default is 500GB).", - "format": "int32" - }, - "numLocalSsds": { - "type": "integer", - "description": "[Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic configuration and installed binaries.", - "format": "int32" - } - } - }, - "ManagedGroupConfiguration": { - "id": "ManagedGroupConfiguration", - "type": "object", - "description": "Specifies the resources used to actively manage an instance group.", - "properties": { - "instanceTemplateName": { - "type": "string", - "description": "[Output-only] The name of the Instance Template used for the Managed Instance Group." - }, - "instanceGroupManagerName": { - "type": "string", - "description": "[Output-only] The name of the Instance Group Manager for this group." - } - } - }, - "SoftwareConfiguration": { - "id": "SoftwareConfiguration", - "type": "object", - "description": "Specifies the selection and configuration of software inside the cluster.", - "properties": { - "imageVersion": { - "type": "string", - "description": "[Optional] The version of software inside the cluster. It must match the regular expression `[0-9]+\\.[0-9]+`. If unspecified, it defaults to the latest version (see [Cloud Dataproc Versioning](/dataproc/versioning))." - }, - "properties": { - "type": "object", - "description": "[Optional] The properties to set on daemon configuration files. Property keys are specified in \"prefix:property\" format, such as \"core:fs.defaultFS\". The following are supported prefixes and their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - pig.properties spark - spark-defaults.conf", - "additionalProperties": { - "type": "string" - } - } - } - }, - "NodeInitializationAction": { - "id": "NodeInitializationAction", - "type": "object", - "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", - "properties": { - "executableFile": { - "type": "string", - "description": "[Required] Google Cloud Storage URI of executable file." - }, - "executionTimeout": { - "type": "string", - "description": "[Optional] Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period." + "version": "v1beta1", + "baseUrl": "https://dataproc.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } - } }, - "ClusterStatus": { - "id": "ClusterStatus", - "type": "object", - "description": "The status of a cluster and its instances.", - "properties": { - "state": { - "type": "string", - "description": "The cluster's state.", - "enum": [ - "UNKNOWN", - "CREATING", - "RUNNING", - "ERROR", - "DELETING", - "UPDATING" - ] - }, - "detail": { - "type": "string", - "description": "Optional details of cluster's state." + "kind": "discovery#restDescription", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "servicePath": "", + "rootUrl": "https://dataproc.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "dataproc", + "batchPath": "batch", + "id": "dataproc:v1beta1", + "documentationLink": "https://cloud.google.com/dataproc/", + "revision": "20170214", + "title": "Google Cloud Dataproc API", + "discoveryVersion": "v1", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "jobs": { + "methods": { + "delete": { + "parameters": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}", + "path": "v1beta1/projects/{projectId}/jobs/{jobId}", + "id": "dataproc.projects.jobs.delete", + "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", + "response": { + "$ref": "Empty" + }, + "httpMethod": "DELETE", + "parameterOrder": [ + "projectId", + "jobId" + ] + }, + "list": { + "id": "dataproc.projects.jobs.list", + "path": "v1beta1/projects/{projectId}/jobs", + "description": "Lists jobs in a project.", + "httpMethod": "GET", + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "ListJobsResponse" + }, + "parameters": { + "filter": { + "location": "query", + "description": "Optional A filter constraining which jobs to list. Valid filters contain job state and label terms such as: labels.key1 = val1 AND (labels.k2 = val2 OR labels.k3 = val3)", + "type": "string" + }, + "jobStateMatcher": { + "location": "query", + "enum": [ + "ALL", + "ACTIVE", + "NON_ACTIVE" + ], + "description": "Optional Specifies enumerated categories of jobs to list.", + "type": "string" + }, + "pageToken": { + "description": "Optional The page token, returned by a previous call, to request the next page of results.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional The number of results to return in each response.", + "format": "int32", + "type": "integer" + }, + "clusterName": { + "location": "query", + "description": "Optional If set, the returned jobs list includes only jobs that were submitted to the named cluster.", + "type": "string" + }, + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectId}/jobs" + }, + "cancel": { + "request": { + "$ref": "CancelJobRequest" + }, + "description": "Starts a job cancellation request. To access the job resource after cancellation, call jobs.list or jobs.get.", + "httpMethod": "POST", + "parameterOrder": [ + "projectId", + "jobId" + ], + "response": { + "$ref": "Job" + }, + "parameters": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "jobId": { + "description": "Required The job ID.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}:cancel", + "id": "dataproc.projects.jobs.cancel", + "path": "v1beta1/projects/{projectId}/jobs/{jobId}:cancel" + }, + "get": { + "description": "Gets the resource representation for a job in a project.", + "parameterOrder": [ + "projectId", + "jobId" + ], + "response": { + "$ref": "Job" + }, + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}", + "path": "v1beta1/projects/{projectId}/jobs/{jobId}", + "id": "dataproc.projects.jobs.get" + }, + "patch": { + "description": "Updates a job in a project.", + "request": { + "$ref": "Job" + }, + "response": { + "$ref": "Job" + }, + "parameterOrder": [ + "projectId", + "jobId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "updateMask": { + "location": "query", + "description": "Required Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + "format": "google-fieldmask", + "type": "string" + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string", + "location": "path" + }, + "jobId": { + "location": "path", + "description": "Required The job ID.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}", + "path": "v1beta1/projects/{projectId}/jobs/{jobId}", + "id": "dataproc.projects.jobs.patch" + }, + "submit": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectId}/jobs:submit", + "path": "v1beta1/projects/{projectId}/jobs:submit", + "id": "dataproc.projects.jobs.submit", + "description": "Submits a job to a cluster.", + "request": { + "$ref": "SubmitJobRequest" + }, + "response": { + "$ref": "Job" + }, + "parameterOrder": [ + "projectId" + ], + "httpMethod": "POST" + } + } + }, + "clusters": { + "methods": { + "diagnose": { + "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation.", + "request": { + "$ref": "DiagnoseClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "clusterName" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterName": { + "location": "path", + "description": "Required The cluster name.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}:diagnose", + "path": "v1beta1/projects/{projectId}/clusters/{clusterName}:diagnose", + "id": "dataproc.projects.clusters.diagnose" + }, + "delete": { + "response": { + "$ref": "Operation" + }, + "httpMethod": "DELETE", + "parameterOrder": [ + "projectId", + "clusterName" + ], + "parameters": { + "clusterName": { + "location": "path", + "description": "Required The cluster name.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}", + "path": "v1beta1/projects/{projectId}/clusters/{clusterName}", + "id": "dataproc.projects.clusters.delete", + "description": "Deletes a cluster in a project." + }, + "list": { + "path": "v1beta1/projects/{projectId}/clusters", + "id": "dataproc.projects.clusters.list", + "description": "Lists all clusters in a project.", + "response": { + "$ref": "ListClustersResponse" + }, + "parameterOrder": [ + "projectId" + ], + "httpMethod": "GET", + "parameters": { + "filter": { + "location": "query", + "description": "Optional A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "The standard List page token.", + "type": "string" + }, + "pageSize": { + "description": "The standard List page size.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectId}/clusters" + }, + "create": { + "parameters": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectId}/clusters", + "path": "v1beta1/projects/{projectId}/clusters", + "id": "dataproc.projects.clusters.create", + "request": { + "$ref": "Cluster" + }, + "description": "Creates a cluster in a project.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId" + ], + "httpMethod": "POST" + }, + "patch": { + "request": { + "$ref": "Cluster" + }, + "description": "Updates a cluster in a project.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "clusterName" + ], + "httpMethod": "PATCH", + "parameters": { + "updateMask": { + "location": "query", + "description": "Required Specifies the path, relative to \u003ccode\u003eCluster\u003c/code\u003e, of the field to update. For example, to change the number of workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003econfiguration.worker_configuration.num_instances\u003c/code\u003e, and the PATCH request body would specify the new value, as follows:\n{\n \"configuration\":{\n \"workerConfiguration\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003econfig.worker_config.num_instances\u003c/code\u003e and \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e are the only fields that can be updated.", + "format": "google-fieldmask", + "type": "string" + }, + "clusterName": { + "location": "path", + "description": "Required The cluster name.", + "required": true, + "type": "string" + }, + "projectId": { + "location": "path", + "description": "Required The ID of the Google Cloud Platform project the cluster belongs to.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}", + "path": "v1beta1/projects/{projectId}/clusters/{clusterName}", + "id": "dataproc.projects.clusters.patch" + }, + "get": { + "response": { + "$ref": "Cluster" + }, + "parameterOrder": [ + "projectId", + "clusterName" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterName": { + "location": "path", + "description": "Required The cluster name.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}", + "path": "v1beta1/projects/{projectId}/clusters/{clusterName}", + "id": "dataproc.projects.clusters.get", + "description": "Gets the resource representation for a cluster in a project." + } + } + } + } }, - "stateStartTime": { - "type": "string", - "description": "Time when this state was entered." + "operations": { + "methods": { + "delete": { + "httpMethod": "DELETE", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be deleted.", + "required": true, + "type": "string", + "pattern": "^operations/.+$", + "location": "path" + } + }, + "flatPath": "v1beta1/operations/{operationsId}", + "id": "dataproc.operations.delete", + "path": "v1beta1/{+name}", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED." + }, + "get": { + "path": "v1beta1/{+name}", + "id": "dataproc.operations.get", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "required": true, + "type": "string", + "pattern": "^operations/.+$", + "location": "path" + } + }, + "flatPath": "v1beta1/operations/{operationsId}" + }, + "list": { + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the operation collection.", + "required": true, + "type": "string", + "pattern": "^operations$", + "location": "path" + }, + "pageToken": { + "description": "The standard list page token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + }, + "filter": { + "description": "The standard list filter.", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1beta1/operations", + "path": "v1beta1/{+name}", + "id": "dataproc.operations.list", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations." + }, + "cancel": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "required": true, + "type": "string", + "pattern": "^operations/.+$", + "location": "path" + } + }, + "flatPath": "v1beta1/operations/{operationsId}:cancel", + "id": "dataproc.operations.cancel", + "path": "v1beta1/{+name}:cancel", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use operations.get or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation.", + "request": { + "$ref": "CancelOperationRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + } + } + } } - } }, - "ClusterMetrics": { - "id": "ClusterMetrics", - "type": "object", - "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.", - "properties": { - "hdfsMetrics": { - "type": "object", - "description": "The HDFS metrics.", - "additionalProperties": { + "parameters": { + "bearer_token": { + "description": "OAuth bearer token.", "type": "string", - "format": "int64" - } + "location": "query" }, - "yarnMetrics": { - "type": "object", - "description": "The YARN metrics.", - "additionalProperties": { + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", "type": "string", - "format": "int64" - } - } - } - }, - "Operation": { - "id": "Operation", - "type": "object", - "description": "This resource represents a long-running operation that is the result of a network API call.", - "properties": { - "name": { - "type": "string", - "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should have the format of `operations/some/unique/name`." - }, - "metadata": { - "type": "object", - "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - }, - "done": { - "type": "boolean", - "description": "If the value is `false`, it means the operation is still in progress. If true, the operation is completed, and either `error` or `response` is available." - }, - "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure or cancellation." - }, - "response": { - "type": "object", - "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - }, - "Status": { - "id": "Status", - "type": "object", - "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). The error model is designed to be: - Simple to use and understand for most users - Flexible enough to meet unexpected needs # Overview The `Status` message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers *understand* and *resolve* the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package `google.rpc` which can be used for common error conditions. # Language mapping The `Status` message is the logical representation of the error model, but it is not necessarily the actual wire format. When the `Status` message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C. # Other uses The error model and the `Status` message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments. Example uses of this error model include: - Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors. - Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose. - Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response. - Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message. - Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.", - "properties": { - "code": { - "type": "integer", - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32" - }, - "message": { - "type": "string", - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client." - }, - "details": { - "type": "array", - "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", - "items": { - "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - } - }, - "ListClustersResponse": { - "id": "ListClustersResponse", - "type": "object", - "description": "The list of all clusters in a project.", - "properties": { - "clusters": { - "type": "array", - "description": "[Output-only] The clusters in the project.", - "items": { - "$ref": "Cluster" - } + "location": "query" }, - "nextPageToken": { - "type": "string", - "description": "The standard List next-page token." - } - } - }, - "DiagnoseClusterRequest": { - "id": "DiagnoseClusterRequest", - "type": "object", - "description": "A request to collect cluster diagnostic information." - }, - "SubmitJobRequest": { - "id": "SubmitJobRequest", - "type": "object", - "description": "A request to submit a job.", - "properties": { - "job": { - "$ref": "Job", - "description": "[Required] The job resource." - } - } - }, - "Job": { - "id": "Job", - "type": "object", - "description": "A Cloud Dataproc job resource.", - "properties": { - "reference": { - "$ref": "JobReference", - "description": "[Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." - }, - "placement": { - "$ref": "JobPlacement", - "description": "[Required] Job information, including how, when, and where to run the job." - }, - "hadoopJob": { - "$ref": "HadoopJob", - "description": "Job is a Hadoop job." - }, - "sparkJob": { - "$ref": "SparkJob", - "description": "Job is a Spark job." - }, - "pysparkJob": { - "$ref": "PySparkJob", - "description": "Job is a Pyspark job." - }, - "hiveJob": { - "$ref": "HiveJob", - "description": "Job is a Hive job." - }, - "pigJob": { - "$ref": "PigJob", - "description": "Job is a Pig job." - }, - "sparkSqlJob": { - "$ref": "SparkSqlJob", - "description": "Job is a SparkSql job." - }, - "status": { - "$ref": "JobStatus", - "description": "[Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields." - }, - "statusHistory": { - "type": "array", - "description": "[Output-only] The previous job status.", - "items": { - "$ref": "JobStatus" - } - }, - "yarnApplications": { - "type": "array", - "description": "[Output-only] The collection of YARN applications spun up by this job.", - "items": { - "$ref": "YarnApplication" - } - }, - "submittedBy": { - "type": "string", - "description": "[Output-only] The email address of the user submitting the job. For jobs submitted on the cluster, the address is username@hostname." - }, - "driverInputResourceUri": { - "type": "string", - "description": "[Output-only] A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive." - }, - "driverOutputResourceUri": { - "type": "string", - "description": "[Output-only] A URI pointing to the location of the stdout of the job's driver program." - }, - "driverControlFilesUri": { - "type": "string", - "description": "[Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`." - }, - "interactive": { - "type": "boolean", - "description": "[Optional] If set to `true`, the driver's stdin will be kept open and `driver_input_uri` will be set to provide a path at which additional input can be sent to the driver." - }, - "labels": { - "type": "object", - "description": "[Optional] The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given job.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "JobReference": { - "id": "JobReference", - "type": "object", - "description": "Encapsulates the full scoping used to reference a job.", - "properties": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to." - }, - "jobId": { - "type": "string", - "description": "[Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters." - } - } - }, - "JobPlacement": { - "id": "JobPlacement", - "type": "object", - "description": "Cloud Dataproc job configuration.", - "properties": { - "clusterName": { - "type": "string", - "description": "[Required] The name of the cluster where the job will be submitted." - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] A cluster UUID generated by the Dataproc service when the job is submitted." - } - } - }, - "HadoopJob": { - "id": "HadoopJob", - "type": "object", - "description": "A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.", - "properties": { - "mainJarFileUri": { - "type": "string", - "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'" - }, - "mainClass": { - "type": "string", - "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" }, - "jarFileUris": { - "type": "array", - "description": "[Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", - "items": { - "type": "string" - } + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", "type": "string" - } }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", - "items": { - "type": "string" - } + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", - "additionalProperties": { - "type": "string" - } + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "LoggingConfiguration": { - "id": "LoggingConfiguration", - "type": "object", - "description": "The runtime logging configuration of the job.", - "properties": { - "driverLogLevels": { - "type": "object", - "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - "additionalProperties": { + "$.xgafv": { + "description": "V1 error format.", "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", "enum": [ - "LEVEL_UNSPECIFIED", - "ALL", - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL", - "OFF" + "1", + "2" ] - } - } - } - }, - "SparkJob": { - "id": "SparkJob", - "type": "object", - "description": "A Cloud Dataproc job for running Spark applications on YARN.", - "properties": { - "mainJarFileUri": { - "type": "string", - "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class." - }, - "mainClass": { - "type": "string", - "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } - }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "PySparkJob": { - "id": "PySparkJob", - "type": "object", - "description": "A Cloud Dataproc job for running PySpark applications on YARN.", - "properties": { - "mainPythonFileUri": { - "type": "string", - "description": "[Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file." - }, - "args": { - "type": "array", - "description": "[Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - "items": { - "type": "string" - } - }, - "pythonFileUris": { - "type": "array", - "description": "[Optional] HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", - "items": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", - "items": { - "type": "string" - } - }, - "fileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", - "items": { - "type": "string" - } - }, - "archiveUris": { - "type": "array", - "description": "[Optional] HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", - "items": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "HiveJob": { - "id": "HiveJob", - "type": "object", - "description": "A Cloud Dataproc job for running Hive queries on YARN.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains Hive queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "continueOnFailure": { - "type": "boolean", - "description": "[Optional] Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", - "additionalProperties": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", - "additionalProperties": { - "type": "string" - } }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", - "items": { - "type": "string" - } - } - } - }, - "QueryList": { - "id": "QueryList", - "type": "object", - "description": "A list of queries to run on a cluster.", - "properties": { - "queries": { - "type": "array", - "description": "[Required] The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - "items": { - "type": "string" - } - } - } - }, - "PigJob": { - "id": "PigJob", - "type": "object", - "description": "A Cloud Dataproc job for running Pig queries on YARN.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains the Pig queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "continueOnFailure": { - "type": "boolean", - "description": "[Optional] Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", - "additionalProperties": { - "type": "string" - } - }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", - "additionalProperties": { - "type": "string" - } - }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", - "items": { - "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "SparkSqlJob": { - "id": "SparkSqlJob", - "type": "object", - "description": "A Cloud Dataproc job for running Spark SQL queries.", - "properties": { - "queryFileUri": { - "type": "string", - "description": "The HCFS URI of the script that contains SQL queries." - }, - "queryList": { - "$ref": "QueryList", - "description": "A list of queries." - }, - "scriptVariables": { - "type": "object", - "description": "[Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", - "additionalProperties": { - "type": "string" - } + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" }, - "properties": { - "type": "object", - "description": "[Optional] A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.", - "additionalProperties": { - "type": "string" - } + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" }, - "jarFileUris": { - "type": "array", - "description": "[Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH.", - "items": { + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", "type": "string" - } - }, - "loggingConfiguration": { - "$ref": "LoggingConfiguration", - "description": "[Optional] The runtime log configuration for job execution." - } - } - }, - "JobStatus": { - "id": "JobStatus", - "type": "object", - "description": "Cloud Dataproc job status.", - "properties": { - "state": { - "type": "string", - "description": "[Required] A state message specifying the overall job state.", - "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "SETUP_DONE", - "RUNNING", - "CANCEL_PENDING", - "CANCEL_STARTED", - "CANCELLED", - "DONE", - "ERROR" - ] - }, - "details": { - "type": "string", - "description": "[Optional] Job state details, such as an error description if the state is ERROR." - }, - "stateStartTime": { - "type": "string", - "description": "[Output-only] The time when this state was entered." - } - } - }, - "YarnApplication": { - "id": "YarnApplication", - "type": "object", - "description": "A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.", - "properties": { - "name": { - "type": "string", - "description": "[Required] The application name." - }, - "state": { - "type": "string", - "description": "[Required] The application state.", - "enum": [ - "STATE_UNSPECIFIED", - "NEW", - "NEW_SAVING", - "SUBMITTED", - "ACCEPTED", - "RUNNING", - "FINISHED", - "FAILED", - "KILLED" - ] }, - "progress": { - "type": "number", - "description": "[Required] The numerical progress of the application, from 1 to 100.", - "format": "float" - }, - "trackingUrl": { - "type": "string", - "description": "[Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access." + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" } - } }, - "ListJobsResponse": { - "id": "ListJobsResponse", - "type": "object", - "description": "A list of jobs in a project.", - "properties": { - "jobs": { - "type": "array", - "description": "[Output-only] Jobs list.", - "items": { - "$ref": "Job" - } - }, - "nextPageToken": { - "type": "string", - "description": "[Optional] This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the `page_token` in a subsequent ListJobsRequest." - } - } - }, - "CancelJobRequest": { - "id": "CancelJobRequest", - "type": "object", - "description": "A request to cancel a job." - }, - "Empty": { - "id": "Empty", - "type": "object", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`." - }, - "ListOperationsResponse": { - "id": "ListOperationsResponse", - "type": "object", - "description": "The response message for Operations.ListOperations.", - "properties": { - "operations": { - "type": "array", - "description": "A list of operations that matches the specified filter in the request.", - "items": { - "$ref": "Operation" - } - }, - "nextPageToken": { - "type": "string", - "description": "The standard List next-page token." - } - } - }, - "CancelOperationRequest": { - "id": "CancelOperationRequest", - "type": "object", - "description": "The request message for Operations.CancelOperation." - }, - "DiagnoseClusterResults": { - "id": "DiagnoseClusterResults", - "type": "object", - "description": "The location of diagnostic output.", - "properties": { - "outputUri": { - "type": "string", - "description": "[Output-only] The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics." - } - } - }, - "ClusterOperationMetadata": { - "id": "ClusterOperationMetadata", - "type": "object", - "description": "Metadata describing the operation.", - "properties": { - "clusterName": { - "type": "string", - "description": "[Output-only] Name of the cluster for the operation." - }, - "clusterUuid": { - "type": "string", - "description": "[Output-only] Cluster UUID for the operation." - }, - "status": { - "$ref": "ClusterOperationStatus", - "description": "[Output-only] Current operation status." - }, - "statusHistory": { - "type": "array", - "description": "[Output-only] The previous operation status.", - "items": { - "$ref": "ClusterOperationStatus" - } - }, - "operationType": { - "type": "string", - "description": "[Output-only] The operation type." - }, - "description": { - "type": "string", - "description": "[Output-only] Short description of operation." - }, - "labels": { - "type": "object", - "description": "[Output-only] labels associated with the operation", - "additionalProperties": { - "type": "string" - } - } - } - }, - "ClusterOperationStatus": { - "id": "ClusterOperationStatus", - "type": "object", - "description": "The status of the operation.", - "properties": { - "state": { - "type": "string", - "description": "[Output-only] A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "schemas": { + "PySparkJob": { + "description": "A Cloud Dataproc job for running PySpark applications on YARN.", + "type": "object", + "properties": { + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "loggingConfiguration": { + "description": "Optional The runtime log configuration for job execution.", + "$ref": "LoggingConfiguration" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + "type": "object" + }, + "args": { + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "type": "array", + "items": { + "type": "string" + } + }, + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "pythonFileUris": { + "description": "Optional HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainPythonFileUri": { + "description": "Required The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file.", + "type": "string" + }, + "archiveUris": { + "description": "Optional HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "PySparkJob" }, - "innerState": { - "type": "string", - "description": "[Output-only] A message containing the detailed operation state." + "ClusterMetrics": { + "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.", + "type": "object", + "properties": { + "yarnMetrics": { + "additionalProperties": { + "format": "int64", + "type": "string" + }, + "description": "The YARN metrics.", + "type": "object" + }, + "hdfsMetrics": { + "description": "The HDFS metrics.", + "type": "object", + "additionalProperties": { + "format": "int64", + "type": "string" + } + } + }, + "id": "ClusterMetrics" }, - "details": { - "type": "string", - "description": "[Output-only]A message containing any operation metadata details." + "ClusterConfiguration": { + "id": "ClusterConfiguration", + "description": "The cluster configuration.", + "type": "object", + "properties": { + "initializationActions": { + "description": "Optional Commands to execute on each node after configuration is completed. By default, executables are run on master and all worker nodes. You can test a node's \u003ccode\u003erole\u003c/code\u003e metadata to run an executable on a master or worker node, as shown below:\nROLE=$(/usr/share/google/get_metadata_value attributes/role)\nif [[ \"${ROLE}\" == 'Master' ]]; then\n ... master specific actions ...\nelse\n ... worker specific actions ...\nfi\n", + "type": "array", + "items": { + "$ref": "NodeInitializationAction" + } + }, + "workerConfiguration": { + "$ref": "InstanceGroupConfiguration", + "description": "Optional The Google Compute Engine configuration settings for worker instances in a cluster." + }, + "softwareConfiguration": { + "description": "Optional The configuration settings for software inside the cluster.", + "$ref": "SoftwareConfiguration" + }, + "gceClusterConfiguration": { + "$ref": "GceClusterConfiguration", + "description": "Required The shared Google Compute Engine configuration settings for all instances in a cluster." + }, + "configurationBucket": { + "description": "Optional A Google Cloud Storage staging bucket used for sharing generated SSH keys and configuration. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, and then it will create and manage this project-level, per-location bucket for you.", + "type": "string" + }, + "masterConfiguration": { + "$ref": "InstanceGroupConfiguration", + "description": "Optional The Google Compute Engine configuration settings for the master instance in a cluster." + }, + "secondaryWorkerConfiguration": { + "description": "Optional The Google Compute Engine configuration settings for additional worker instances in a cluster.", + "$ref": "InstanceGroupConfiguration" + } + } }, - "stateStartTime": { - "type": "string", - "description": "[Output-only] The time this state was entered." - } - } - }, - "DiagnoseClusterOutputLocation": { - "id": "DiagnoseClusterOutputLocation", - "type": "object", - "description": "The location where output from diagnostic command can be found.", - "properties": { - "outputUri": { - "type": "string", - "description": "[Output-only] The Google Cloud Storage URI of the diagnostic output. This will be a plain text file with summary of collected diagnostics." - } - } - }, - "OperationMetadata": { - "id": "OperationMetadata", - "type": "object", - "description": "Metadata describing the operation.", - "properties": { - "state": { - "type": "string", - "description": "A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "LoggingConfiguration": { + "description": "The runtime logging configuration of the job.", + "type": "object", + "properties": { + "driverLogLevels": { + "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + "type": "object", + "additionalProperties": { + "type": "string", + "enum": [ + "LEVEL_UNSPECIFIED", + "ALL", + "TRACE", + "DEBUG", + "INFO", + "WARN", + "ERROR", + "FATAL", + "OFF" + ] + } + } + }, + "id": "LoggingConfiguration" }, - "innerState": { - "type": "string", - "description": "A message containing the detailed operation state." + "InstanceGroupConfiguration": { + "id": "InstanceGroupConfiguration", + "description": "The configuration settings for Google Compute Engine resources in an instance group, such as a master or worker group.", + "type": "object", + "properties": { + "isPreemptible": { + "description": "Specifies that this instance group contains Preemptible Instances.", + "type": "boolean" + }, + "imageUri": { + "description": "Output-only The Google Compute Engine image resource used for cluster instances. Inferred from SoftwareConfiguration.image_version.", + "type": "string" + }, + "machineTypeUri": { + "description": "The Google Compute Engine machine type used for cluster instances. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2.", + "type": "string" + }, + "instanceNames": { + "description": "The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group if not set by user (recommended practice is to let Dataproc derive the name).", + "type": "array", + "items": { + "type": "string" + } + }, + "managedGroupConfiguration": { + "description": "Output-only The configuration for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + "$ref": "ManagedGroupConfiguration" + }, + "numInstances": { + "description": "The number of VM instances in the instance group. For master instance groups, must be set to 1.", + "format": "int32", + "type": "integer" + }, + "diskConfiguration": { + "description": "Disk option configuration settings.", + "$ref": "DiskConfiguration" + } + } }, - "details": { - "type": "string", - "description": "A message containing any operation metadata details." + "GceClusterConfiguration": { + "description": "Common configuration settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.", + "type": "object", + "properties": { + "networkUri": { + "description": "The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see https://cloud.google.com/compute/docs/subnetworks for more information). Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default.", + "type": "string" + }, + "zoneUri": { + "description": "Required The zone where the Google Compute Engine cluster will be located. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone].", + "type": "string" + }, + "internalIpOnly": { + "description": "If true, all instances in the cluser will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + "type": "boolean" + }, + "metadata": { + "description": "The Google Compute Engine metadata entries to add to all instances.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "serviceAccountScopes": { + "description": "The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included: - https://www.googleapis.com/auth/cloud.useraccounts.readonly - https://www.googleapis.com/auth/devstorage.read_write - https://www.googleapis.com/auth/logging.write If no scopes are specfied, the following defaults are also provided: - https://www.googleapis.com/auth/bigquery - https://www.googleapis.com/auth/bigtable.admin.table - https://www.googleapis.com/auth/bigtable.data - https://www.googleapis.com/auth/devstorage.full_control", + "type": "array", + "items": { + "type": "string" + } + }, + "tags": { + "description": "The Google Compute Engine tags to add to all instances.", + "type": "array", + "items": { + "type": "string" + } + }, + "serviceAccount": { + "description": "Optional The service account of the instances. Defaults to the default Google Compute Engine service account. Custom service accounts need permissions equivalent to the folloing IAM roles:\nroles/logging.logWriter\nroles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: [account_id]@[project_id].iam.gserviceaccount.com", + "type": "string" + }, + "subnetworkUri": { + "description": "The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.", + "type": "string" + } + }, + "id": "GceClusterConfiguration" }, - "insertTime": { - "type": "string", - "description": "The time that the operation was requested." + "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", + "type": "object", + "properties": {}, + "id": "CancelOperationRequest" }, - "startTime": { - "type": "string", - "description": "The time that the operation was started by the server." + "DiagnoseClusterOutputLocation": { + "description": "The location of diagnostic output.", + "type": "object", + "properties": { + "outputUri": { + "description": "Output-only The Google Cloud Storage URI of the diagnostic output. This is a plain text file with a summary of collected diagnostics.", + "type": "string" + } + }, + "id": "DiagnoseClusterOutputLocation" }, - "endTime": { - "type": "string", - "description": "The time that the operation completed." + "Operation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "type": "object", + "properties": { + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should have the format of operations/some/unique/name.", + "type": "string" + }, + "error": { + "description": "The error result of the operation in case of failure or cancellation.", + "$ref": "Status" + }, + "metadata": { + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + }, + "done": { + "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", + "type": "boolean" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", + "type": "object" + } + }, + "id": "Operation" }, - "clusterName": { - "type": "string", - "description": "Name of the cluster for the operation." + "OperationStatus": { + "id": "OperationStatus", + "description": "The status of the operation.", + "type": "object", + "properties": { + "innerState": { + "description": "A message containing the detailed operation state.", + "type": "string" + }, + "stateStartTime": { + "description": "The time this state was entered.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "A message containing the operation state.", + "type": "string", + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is running.", + "The operation is done; either cancelled or completed." + ], + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ] + }, + "details": { + "description": "A message containing any operation metadata details.", + "type": "string" + } + } }, - "clusterUuid": { - "type": "string", - "description": "Cluster UUId for the operation." + "JobReference": { + "description": "Encapsulates the full scoping used to reference a job.", + "type": "object", + "properties": { + "projectId": { + "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + "type": "string" + }, + "jobId": { + "description": "Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.", + "type": "string" + } + }, + "id": "JobReference" }, - "status": { - "$ref": "OperationStatus", - "description": "[Output-only] Current operation status." + "SubmitJobRequest": { + "id": "SubmitJobRequest", + "description": "A request to submit a job.", + "type": "object", + "properties": { + "job": { + "$ref": "Job", + "description": "Required The job resource." + } + } }, - "statusHistory": { - "type": "array", - "description": "[Output-only] Previous operation status.", - "items": { - "$ref": "OperationStatus" - } + "Status": { + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc which can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting purpose.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { + "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + } + } + }, + "id": "Status" }, - "operationType": { - "type": "string", - "description": "[Output-only] The operation type." + "JobScheduling": { + "id": "JobScheduling", + "description": "Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release.", + "type": "object", + "properties": { + "maxFailuresPerHour": { + "description": "Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.", + "format": "int32", + "type": "integer" + } + } }, - "description": { - "type": "string", - "description": "[Output-only] Short description of operation." - } - } - }, - "OperationStatus": { - "id": "OperationStatus", - "type": "object", - "description": "The status of the operation.", - "properties": { - "state": { - "type": "string", - "description": "A message containing the operation state.", - "enum": [ - "UNKNOWN", - "PENDING", - "RUNNING", - "DONE" - ] + "NodeInitializationAction": { + "id": "NodeInitializationAction", + "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", + "type": "object", + "properties": { + "executableFile": { + "description": "Required Google Cloud Storage URI of executable file.", + "type": "string" + }, + "executionTimeout": { + "description": "Optional Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + "format": "google-duration", + "type": "string" + } + } }, - "innerState": { - "type": "string", - "description": "A message containing the detailed operation state." + "ListJobsResponse": { + "id": "ListJobsResponse", + "description": "A list of jobs in a project.", + "type": "object", + "properties": { + "jobs": { + "description": "Output-only Jobs list.", + "type": "array", + "items": { + "$ref": "Job" + } + }, + "nextPageToken": { + "description": "Optional This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListJobsRequest\u003c/code\u003e.", + "type": "string" + } + } }, - "details": { - "type": "string", - "description": "A message containing any operation metadata details." + "CancelJobRequest": { + "description": "A request to cancel a job.", + "type": "object", + "properties": {}, + "id": "CancelJobRequest" }, - "stateStartTime": { - "type": "string", - "description": "The time this state was entered." - } - } - } - }, - "resources": { - "projects": { - "resources": { - "clusters": { - "methods": { - "create": { - "id": "dataproc.projects.clusters.create", - "path": "v1beta1/projects/{projectId}/clusters", - "httpMethod": "POST", - "description": "Creates a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "projectId" - ], - "request": { - "$ref": "Cluster" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "patch": { - "id": "dataproc.projects.clusters.patch", - "path": "v1beta1/projects/{projectId}/clusters/{clusterName}", - "httpMethod": "PATCH", - "description": "Updates a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project the cluster belongs to.", - "required": true, - "location": "path" + "SparkSqlJob": { + "description": "A Cloud Dataproc job for running Spark SQL queries.", + "type": "object", + "properties": { + "queryFileUri": { + "description": "The HCFS URI of the script that contains SQL queries.", + "type": "string" }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" - }, - "updateMask": { - "type": "string", - "description": "[Required] Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as configuration.worker_configuration.num_instances, and the `PATCH` request body would specify the new value, as follows: { \"configuration\":{ \"workerConfiguration\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the `PATCH` request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } Note: Currently, config.worker_config.num_instances and config.secondary_worker_config.num_instances are the only fields that can be updated.", - "location": "query" + "queryList": { + "description": "A list of queries.", + "$ref": "QueryList" + }, + "scriptVariables": { + "description": "Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.", + "type": "array", + "items": { + "type": "string" + } + }, + "loggingConfiguration": { + "description": "Optional The runtime log configuration for job execution.", + "$ref": "LoggingConfiguration" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.", + "type": "object" } - }, - "parameterOrder": [ - "projectId", - "clusterName" - ], - "request": { - "$ref": "Cluster" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "delete": { - "id": "dataproc.projects.clusters.delete", - "path": "v1beta1/projects/{projectId}/clusters/{clusterName}", - "httpMethod": "DELETE", - "description": "Deletes a cluster in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" + "id": "SparkSqlJob" + }, + "Cluster": { + "description": "Describes the identifying information, configuration, and status of a cluster of Google Compute Engine instances.", + "type": "object", + "properties": { + "labels": { + "description": "Optional The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 64 labels can be associated with a given cluster.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "metrics": { + "$ref": "ClusterMetrics", + "description": "Contains cluster daemon metrics such as HDFS and YARN stats." + }, + "status": { + "description": "Output-only Cluster status.", + "$ref": "ClusterStatus" + }, + "statusHistory": { + "description": "Output-only Previous cluster statuses.", + "type": "array", + "items": { + "$ref": "ClusterStatus" + } }, "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "projectId", - "clusterName" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "get": { - "id": "dataproc.projects.clusters.get", - "path": "v1beta1/projects/{projectId}/clusters/{clusterName}", - "httpMethod": "GET", - "description": "Gets the resource representation for a cluster in a project.", - "parameters": { + "description": "Required The cluster name. Cluster names within a project must be unique. Names from deleted clusters can be reused.", + "type": "string" + }, + "clusterUuid": { + "description": "Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster.", + "type": "string" + }, "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" + "description": "Required The Google Cloud Platform project ID that the cluster belongs to.", + "type": "string" }, - "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + "configuration": { + "$ref": "ClusterConfiguration", + "description": "Required The cluster configuration. Note that Cloud Dataproc may set default values, and values may change when clusters are updated." } - }, - "parameterOrder": [ - "projectId", - "clusterName" - ], - "response": { - "$ref": "Cluster" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "list": { - "id": "dataproc.projects.clusters.list", - "path": "v1beta1/projects/{projectId}/clusters", - "httpMethod": "GET", - "description": "Lists all clusters in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" - }, - "filter": { - "type": "string", - "description": "[Optional] A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The standard List page size.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The standard List page token.", - "location": "query" + "id": "Cluster" + }, + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "type": "object", + "properties": { + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Operation" + } + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" } - }, - "parameterOrder": [ - "projectId" - ], - "response": { - "$ref": "ListClustersResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "diagnose": { - "id": "dataproc.projects.clusters.diagnose", - "path": "v1beta1/projects/{projectId}/clusters/{clusterName}:diagnose", - "httpMethod": "POST", - "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains `DiagnoseClusterOutputLocation`.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", - "required": true, - "location": "path" + "id": "ListOperationsResponse" + }, + "OperationMetadata": { + "id": "OperationMetadata", + "description": "Metadata describing the operation.", + "type": "object", + "properties": { + "description": { + "description": "Output-only Short description of operation.", + "type": "string" + }, + "warnings": { + "description": "Output-only Errors encountered during operation execution.", + "type": "array", + "items": { + "type": "string" + } + }, + "status": { + "description": "Output-only Current operation status.", + "$ref": "OperationStatus" + }, + "statusHistory": { + "description": "Output-only Previous operation status.", + "type": "array", + "items": { + "$ref": "OperationStatus" + } }, "clusterName": { - "type": "string", - "description": "[Required] The cluster name.", - "required": true, - "location": "path" + "description": "Name of the cluster for the operation.", + "type": "string" + }, + "clusterUuid": { + "description": "Cluster UUId for the operation.", + "type": "string" + }, + "operationType": { + "description": "Output-only The operation type.", + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "clusterName" - ], - "request": { - "$ref": "DiagnoseClusterRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] } - } }, - "jobs": { - "methods": { - "submit": { - "id": "dataproc.projects.jobs.submit", - "path": "v1beta1/projects/{projectId}/jobs:submit", - "httpMethod": "POST", - "description": "Submits a job to a cluster.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" + "JobPlacement": { + "id": "JobPlacement", + "description": "Cloud Dataproc job configuration.", + "type": "object", + "properties": { + "clusterName": { + "description": "Required The name of the cluster where the job will be submitted.", + "type": "string" + }, + "clusterUuid": { + "description": "Output-only A cluster UUID generated by the Dataproc service when the job is submitted.", + "type": "string" + } + } + }, + "ClusterStatus": { + "description": "The status of a cluster and its instances.", + "type": "object", + "properties": { + "detail": { + "description": "Optional details of cluster's state.", + "type": "string" + }, + "state": { + "enumDescriptions": [ + "The cluster state is unknown.", + "The cluster is being created and set up. It is not ready for use.", + "The cluster is currently running and healthy. It is ready for use.", + "The cluster encountered an error. It is not ready for use.", + "The cluster is being deleted. It cannot be used.", + "The cluster is being updated. It continues to accept and process jobs." + ], + "enum": [ + "UNKNOWN", + "CREATING", + "RUNNING", + "ERROR", + "DELETING", + "UPDATING" + ], + "description": "The cluster's state.", + "type": "string" + }, + "stateStartTime": { + "description": "Time when this state was entered.", + "format": "google-datetime", + "type": "string" } - }, - "parameterOrder": [ - "projectId" - ], - "request": { - "$ref": "SubmitJobRequest" - }, - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "get": { - "id": "dataproc.projects.jobs.get", - "path": "v1beta1/projects/{projectId}/jobs/{jobId}", - "httpMethod": "GET", - "description": "Gets the resource representation for a job in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" + "id": "ClusterStatus" + }, + "PigJob": { + "id": "PigJob", + "description": "A Cloud Dataproc job for running Pig queries on YARN.", + "type": "object", + "properties": { + "continueOnFailure": { + "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + "queryFileUri": { + "description": "The HCFS URI of the script that contains the Pig queries.", + "type": "string" + }, + "queryList": { + "description": "A list of queries.", + "$ref": "QueryList" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + "type": "array", + "items": { + "type": "string" + } + }, + "scriptVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional Mapping of query variable names to values (equivalent to the Pig command: name=[value]).", + "type": "object" + }, + "loggingConfiguration": { + "$ref": "LoggingConfiguration", + "description": "Optional The runtime log configuration for job execution." + }, + "properties": { + "description": "Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "ManagedGroupConfiguration": { + "description": "Specifies the resources used to actively manage an instance group.", + "type": "object", + "properties": { + "instanceGroupManagerName": { + "description": "Output-only The name of the Instance Group Manager for this group.", + "type": "string" + }, + "instanceTemplateName": { + "description": "Output-only The name of the Instance Template used for the Managed Instance Group.", + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "jobId" - ], - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "list": { - "id": "dataproc.projects.jobs.list", - "path": "v1beta1/projects/{projectId}/jobs", - "httpMethod": "GET", - "description": "Lists jobs in a project.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" - }, - "pageSize": { - "type": "integer", - "description": "[Optional] The number of results to return in each response.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "[Optional] The page token, returned by a previous call, to request the next page of results.", - "location": "query" + "id": "ManagedGroupConfiguration" + }, + "ListClustersResponse": { + "description": "The list of all clusters in a project.", + "type": "object", + "properties": { + "clusters": { + "description": "Output-only The clusters in the project.", + "type": "array", + "items": { + "$ref": "Cluster" + } }, - "clusterName": { - "type": "string", - "description": "[Optional] If set, the returned jobs list includes only jobs that were submitted to the named cluster.", - "location": "query" - }, - "jobStateMatcher": { - "type": "string", - "description": "[Optional] Specifies enumerated categories of jobs to list.", - "enum": [ - "ALL", - "ACTIVE", - "NON_ACTIVE" - ], - "location": "query" - }, - "filter": { - "type": "string", - "description": "[Optional] A filter constraining which jobs to list. Valid filters contain job state and label terms such as: labels.key1 = val1 AND (labels.k2 = val2 OR labels.k3 = val3)", - "location": "query" + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" } - }, - "parameterOrder": [ - "projectId" - ], - "response": { - "$ref": "ListJobsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "cancel": { - "id": "dataproc.projects.jobs.cancel", - "path": "v1beta1/projects/{projectId}/jobs/{jobId}:cancel", - "httpMethod": "POST", - "description": "Starts a job cancellation request. To access the job resource after cancellation, call [jobs.list](/dataproc/reference/rest/v1beta1/projects.jobs/list) or [jobs.get](/dataproc/reference/rest/v1beta1/projects.jobs/get).", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" + "id": "ListClustersResponse" + }, + "Job": { + "description": "A Cloud Dataproc job resource.", + "type": "object", + "properties": { + "yarnApplications": { + "description": "Output-only The collection of YARN applications spun up by this job.", + "type": "array", + "items": { + "$ref": "YarnApplication" + } }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + "pysparkJob": { + "description": "Job is a Pyspark job.", + "$ref": "PySparkJob" + }, + "reference": { + "description": "Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a \u003ccode\u003ejob_id\u003c/code\u003e.", + "$ref": "JobReference" + }, + "interactive": { + "description": "Optional If set to true, the driver's stdin will be kept open and driver_input_uri will be set to provide a path at which additional input can be sent to the driver.", + "type": "boolean" + }, + "driverInputResourceUri": { + "description": "Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.", + "type": "string" + }, + "hadoopJob": { + "description": "Job is a Hadoop job.", + "$ref": "HadoopJob" + }, + "status": { + "description": "Output-only The job status. Additional application-specific status information may be contained in the \u003ccode\u003etype_job\u003c/code\u003e and \u003ccode\u003eyarn_applications\u003c/code\u003e fields.", + "$ref": "JobStatus" + }, + "placement": { + "$ref": "JobPlacement", + "description": "Required Job information, including how, when, and where to run the job." + }, + "driverControlFilesUri": { + "description": "Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", + "type": "string" + }, + "submittedBy": { + "description": "Output-only The email address of the user submitting the job. For jobs submitted on the cluster, the address is \u003ccode\u003eusername@hostname\u003c/code\u003e.", + "type": "string" + }, + "scheduling": { + "$ref": "JobScheduling", + "description": "Optional Job scheduling configuration." + }, + "pigJob": { + "description": "Job is a Pig job.", + "$ref": "PigJob" + }, + "hiveJob": { + "description": "Job is a Hive job.", + "$ref": "HiveJob" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 64 labels can be associated with a given job.", + "type": "object" + }, + "driverOutputResourceUri": { + "description": "Output-only A URI pointing to the location of the stdout of the job's driver program.", + "type": "string" + }, + "sparkSqlJob": { + "$ref": "SparkSqlJob", + "description": "Job is a SparkSql job." + }, + "statusHistory": { + "description": "Output-only The previous job status.", + "type": "array", + "items": { + "$ref": "JobStatus" + } + }, + "sparkJob": { + "$ref": "SparkJob", + "description": "Job is a Spark job." } - }, - "parameterOrder": [ - "projectId", - "jobId" - ], - "request": { - "$ref": "CancelJobRequest" - }, - "response": { - "$ref": "Job" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "delete": { - "id": "dataproc.projects.jobs.delete", - "path": "v1beta1/projects/{projectId}/jobs/{jobId}", - "httpMethod": "DELETE", - "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", - "parameters": { - "projectId": { - "type": "string", - "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", - "required": true, - "location": "path" + "id": "Job" + }, + "SparkJob": { + "id": "SparkJob", + "description": "A Cloud Dataproc job for running Spark applications on YARN.", + "type": "object", + "properties": { + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + "type": "array", + "items": { + "type": "string" + } }, - "jobId": { - "type": "string", - "description": "[Required] The job ID.", - "required": true, - "location": "path" + "loggingConfiguration": { + "$ref": "LoggingConfiguration", + "description": "Optional The runtime log configuration for job execution." + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + "type": "object" + }, + "args": { + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "type": "array", + "items": { + "type": "string" + } + }, + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.", + "type": "string" + }, + "archiveUris": { + "description": "Optional HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainJarFileUri": { + "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class.", + "type": "string" } - }, - "parameterOrder": [ - "projectId", - "jobId" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] } - } - } - } - }, - "operations": { - "methods": { - "get": { - "id": "dataproc.operations.get", - "path": "v1beta1/{+name}", - "httpMethod": "GET", - "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource.", - "required": true, - "pattern": "^operations/.+$", - "location": "path" + }, + "JobStatus": { + "id": "JobStatus", + "description": "Cloud Dataproc job status.", + "type": "object", + "properties": { + "stateStartTime": { + "description": "Output-only The time when this state was entered.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "enumDescriptions": [ + "The job state is unknown.", + "The job is pending; it has been submitted, but is not yet running.", + "Job has been received by the service and completed initial setup; it will shortly be submitted to the cluster.", + "The job is running on the cluster.", + "A CancelJob request has been received, but is pending.", + "Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.", + "The job cancelation was successful.", + "The job has completed successfully.", + "The job has completed, but encountered an error.", + "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." + ], + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "SETUP_DONE", + "RUNNING", + "CANCEL_PENDING", + "CANCEL_STARTED", + "CANCELLED", + "DONE", + "ERROR", + "ATTEMPT_FAILURE" + ], + "description": "Required A state message specifying the overall job state.", + "type": "string" + }, + "details": { + "description": "Optional Job state details, such as an error description if the state is \u003ccode\u003eERROR\u003c/code\u003e.", + "type": "string" + } } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "list": { - "id": "dataproc.operations.list", - "path": "v1beta1/{+name}", - "httpMethod": "GET", - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding below allows API services to override the binding to use different resource name schemes, such as `users/*/operations`.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation collection.", - "required": true, - "pattern": "^operations$", - "location": "path" + "DiskConfiguration": { + "description": "Specifies the configuration of disk options for a group of VM instances.", + "type": "object", + "properties": { + "bootDiskSizeGb": { + "description": "Optional Size in GB of the boot disk (default is 500GB).", + "format": "int32", + "type": "integer" + }, + "numLocalSsds": { + "description": "Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic configuration and installed binaries.", + "format": "int32", + "type": "integer" + } }, - "filter": { - "type": "string", - "description": "The standard list filter.", - "location": "query" + "id": "DiskConfiguration" + }, + "ClusterOperationStatus": { + "description": "The status of the operation.", + "type": "object", + "properties": { + "state": { + "description": "Output-only A message containing the operation state.", + "type": "string", + "enumDescriptions": [ + "Unused.", + "The operation has been created.", + "The operation is running.", + "The operation is done; either cancelled or completed." + ], + "enum": [ + "UNKNOWN", + "PENDING", + "RUNNING", + "DONE" + ] + }, + "details": { + "description": "Output-onlyA message containing any operation metadata details.", + "type": "string" + }, + "innerState": { + "description": "Output-only A message containing the detailed operation state.", + "type": "string" + }, + "stateStartTime": { + "description": "Output-only The time this state was entered.", + "format": "google-datetime", + "type": "string" + } }, - "pageSize": { - "type": "integer", - "description": "The standard list page size.", - "format": "int32", - "location": "query" + "id": "ClusterOperationStatus" + }, + "HadoopJob": { + "description": "A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.", + "type": "object", + "properties": { + "jarFileUris": { + "description": "Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "loggingConfiguration": { + "$ref": "LoggingConfiguration", + "description": "Optional The runtime log configuration for job execution." + }, + "properties": { + "description": "Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "args": { + "description": "Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + "type": "array", + "items": { + "type": "string" + } + }, + "fileUris": { + "description": "Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainClass": { + "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.", + "type": "string" + }, + "archiveUris": { + "description": "Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + "type": "array", + "items": { + "type": "string" + } + }, + "mainJarFileUri": { + "description": "The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + "type": "string" + } }, - "pageToken": { - "type": "string", - "description": "The standard list page token.", - "location": "query" + "id": "HadoopJob" + }, + "QueryList": { + "id": "QueryList", + "description": "A list of queries to run on a cluster.", + "type": "object", + "properties": { + "queries": { + "description": "Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:\n\"hiveJob\": {\n \"queryList\": {\n \"queries\": [\n \"query1\",\n \"query2\",\n \"query3;query4\",\n ]\n }\n}\n", + "type": "array", + "items": { + "type": "string" + } + } } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListOperationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "cancel": { - "id": "dataproc.operations.cancel", - "path": "v1beta1/{+name}:cancel", - "httpMethod": "POST", - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use [operations.get](/dataproc/reference/rest/v1beta1/operations/get) or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource to be cancelled.", - "required": true, - "pattern": "^operations/.+$", - "location": "path" + "YarnApplication": { + "id": "YarnApplication", + "description": "A YARN application created by a job. Application information is a subset of \u003ccode\u003eorg.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto\u003c/code\u003e.", + "type": "object", + "properties": { + "name": { + "description": "Required The application name.", + "type": "string" + }, + "trackingUrl": { + "description": "Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.", + "type": "string" + }, + "progress": { + "description": "Required The numerical progress of the application, from 1 to 100.", + "format": "float", + "type": "number" + }, + "state": { + "description": "Required The application state.", + "type": "string", + "enumDescriptions": [ + "Status is unspecified.", + "Status is NEW.", + "Status is NEW_SAVING.", + "Status is SUBMITTED.", + "Status is ACCEPTED.", + "Status is RUNNING.", + "Status is FINISHED.", + "Status is FAILED.", + "Status is KILLED." + ], + "enum": [ + "STATE_UNSPECIFIED", + "NEW", + "NEW_SAVING", + "SUBMITTED", + "ACCEPTED", + "RUNNING", + "FINISHED", + "FAILED", + "KILLED" + ] + } } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "CancelOperationRequest" - }, - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "delete": { - "id": "dataproc.operations.delete", - "path": "v1beta1/{+name}", - "httpMethod": "DELETE", - "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the operation resource to be deleted.", - "required": true, - "pattern": "^operations/.+$", - "location": "path" + "DiagnoseClusterRequest": { + "description": "A request to collect cluster diagnostic information.", + "type": "object", + "properties": {}, + "id": "DiagnoseClusterRequest" + }, + "ClusterOperationMetadata": { + "id": "ClusterOperationMetadata", + "description": "Metadata describing the operation.", + "type": "object", + "properties": { + "warnings": { + "description": "Output-only Errors encountered during operation execution.", + "type": "array", + "items": { + "type": "string" + } + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Output-only Labels associated with the operation", + "type": "object" + }, + "status": { + "$ref": "ClusterOperationStatus", + "description": "Output-only Current operation status." + }, + "statusHistory": { + "description": "Output-only The previous operation status.", + "type": "array", + "items": { + "$ref": "ClusterOperationStatus" + } + }, + "clusterUuid": { + "description": "Output-only Cluster UUID for the operation.", + "type": "string" + }, + "clusterName": { + "description": "Output-only Name of the cluster for the operation.", + "type": "string" + }, + "operationType": { + "description": "Output-only The operation type.", + "type": "string" + }, + "description": { + "description": "Output-only Short description of operation.", + "type": "string" + } } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "HiveJob": { + "id": "HiveJob", + "description": "A Cloud Dataproc job for running Hive queries on YARN.", + "type": "object", + "properties": { + "continueOnFailure": { + "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", + "type": "boolean" + }, + "queryFileUri": { + "description": "The HCFS URI of the script that contains Hive queries.", + "type": "string" + }, + "queryList": { + "$ref": "QueryList", + "description": "A list of queries." + }, + "scriptVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).", + "type": "object" + }, + "jarFileUris": { + "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + "type": "array", + "items": { + "type": "string" + } + }, + "properties": { + "description": "Optional A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "DiagnoseClusterResults": { + "description": "The location of diagnostic output.", + "type": "object", + "properties": { + "outputUri": { + "description": "Output-only The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", + "type": "string" + } + }, + "id": "DiagnoseClusterResults" + }, + "SoftwareConfiguration": { + "description": "Specifies the selection and configuration of software inside the cluster.", + "type": "object", + "properties": { + "imageVersion": { + "description": "Optional The version of software inside the cluster. It must match the regular expression [0-9]+\\.[0-9]+. If unspecified, it defaults to the latest version (see Cloud Dataproc Versioning).", + "type": "string" + }, + "properties": { + "description": "Optional The properties to set on daemon configuration files.Property keys are specified in \"prefix:property\" format, such as \"core:fs.defaultFS\". The following are supported prefixes and their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - pig.properties spark - spark-defaults.conf", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "id": "SoftwareConfiguration" } - } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" } - } } diff --git a/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-gen.go b/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-gen.go index 80645dbd7..61a61fa05 100644 --- a/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-gen.go +++ b/vendor/google.golang.org/api/dataproc/v1beta1/dataproc-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Operations *OperationsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -132,39 +137,39 @@ type CancelOperationRequest struct { // Cluster: Describes the identifying information, configuration, and // status of a cluster of Google Compute Engine instances. type Cluster struct { - // ClusterName: [Required] The cluster name. Cluster names within a + // ClusterName: Required The cluster name. Cluster names within a // project must be unique. Names from deleted clusters can be reused. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] A cluster UUID (Unique Universal + // ClusterUuid: Output-only A cluster UUID (Unique Universal // Identifier). Cloud Dataproc generates this value when it creates the // cluster. ClusterUuid string `json:"clusterUuid,omitempty"` - // Configuration: [Required] The cluster configuration. Note that Cloud + // Configuration: Required The cluster configuration. Note that Cloud // Dataproc may set default values, and values may change when clusters // are updated. Configuration *ClusterConfiguration `json:"configuration,omitempty"` - // Labels: [Optional] The labels to associate with this cluster. Label - // keys must be between 1 and 63 characters long, and must conform to - // the following PCRE regular expression: \p{Ll}\p{Lo}{0,62} Label - // values must be between 1 and 63 characters long, and must conform to - // the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No - // more than 64 labels can be associated with a given cluster. + // Labels: Optional The labels to associate with this cluster.Label keys + // must be between 1 and 63 characters long, and must conform to the + // following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values + // must be between 1 and 63 characters long, and must conform to the + // following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more + // than 64 labels can be associated with a given cluster. Labels map[string]string `json:"labels,omitempty"` // Metrics: Contains cluster daemon metrics such as HDFS and YARN stats. Metrics *ClusterMetrics `json:"metrics,omitempty"` - // ProjectId: [Required] The Google Cloud Platform project ID that the + // ProjectId: Required The Google Cloud Platform project ID that the // cluster belongs to. ProjectId string `json:"projectId,omitempty"` - // Status: [Output-only] Cluster status. + // Status: Output-only Cluster status. Status *ClusterStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] Previous cluster statuses. + // StatusHistory: Output-only Previous cluster statuses. StatusHistory []*ClusterStatus `json:"statusHistory,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -196,7 +201,7 @@ func (s *Cluster) MarshalJSON() ([]byte, error) { // ClusterConfiguration: The cluster configuration. type ClusterConfiguration struct { - // ConfigurationBucket: [Optional] A Google Cloud Storage staging bucket + // ConfigurationBucket: Optional A Google Cloud Storage staging bucket // used for sharing generated SSH keys and configuration. If you do not // specify a staging bucket, Cloud Dataproc will determine an // appropriate Cloud Storage location (US, ASIA, or EU) for your @@ -205,33 +210,38 @@ type ClusterConfiguration struct { // this project-level, per-location bucket for you. ConfigurationBucket string `json:"configurationBucket,omitempty"` - // GceClusterConfiguration: [Required] The shared Google Compute Engine + // GceClusterConfiguration: Required The shared Google Compute Engine // configuration settings for all instances in a cluster. GceClusterConfiguration *GceClusterConfiguration `json:"gceClusterConfiguration,omitempty"` - // InitializationActions: [Optional] Commands to execute on each node + // InitializationActions: Optional Commands to execute on each node // after configuration is completed. By default, executables are run on - // master and all worker nodes. You can test a node's role metadata to - // run an executable on a master or worker node, as shown below: - // ROLE=$(/usr/share/google/get_metadata_value attributes/role) if [[ - // "${ROLE}" == 'Master' ]]; then ... master specific actions ... else - // ... worker specific actions ... fi + // master and all worker nodes. You can test a node's role + // metadata to run an executable on a master or worker node, as shown + // below: + // ROLE=$(/usr/share/google/get_metadata_value attributes/role) + // if [[ "${ROLE}" == 'Master' ]]; then + // ... master specific actions ... + // else + // ... worker specific actions ... + // fi + // InitializationActions []*NodeInitializationAction `json:"initializationActions,omitempty"` - // MasterConfiguration: [Optional] The Google Compute Engine - // configuration settings for the master instance in a cluster. + // MasterConfiguration: Optional The Google Compute Engine configuration + // settings for the master instance in a cluster. MasterConfiguration *InstanceGroupConfiguration `json:"masterConfiguration,omitempty"` - // SecondaryWorkerConfiguration: [Optional] The Google Compute Engine + // SecondaryWorkerConfiguration: Optional The Google Compute Engine // configuration settings for additional worker instances in a cluster. SecondaryWorkerConfiguration *InstanceGroupConfiguration `json:"secondaryWorkerConfiguration,omitempty"` - // SoftwareConfiguration: [Optional] The configuration settings for + // SoftwareConfiguration: Optional The configuration settings for // software inside the cluster. SoftwareConfiguration *SoftwareConfiguration `json:"softwareConfiguration,omitempty"` - // WorkerConfiguration: [Optional] The Google Compute Engine - // configuration settings for worker instances in a cluster. + // WorkerConfiguration: Optional The Google Compute Engine configuration + // settings for worker instances in a cluster. WorkerConfiguration *InstanceGroupConfiguration `json:"workerConfiguration,omitempty"` // ForceSendFields is a list of field names (e.g. "ConfigurationBucket") @@ -292,27 +302,30 @@ func (s *ClusterMetrics) MarshalJSON() ([]byte, error) { // ClusterOperationMetadata: Metadata describing the operation. type ClusterOperationMetadata struct { - // ClusterName: [Output-only] Name of the cluster for the operation. + // ClusterName: Output-only Name of the cluster for the operation. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] Cluster UUID for the operation. + // ClusterUuid: Output-only Cluster UUID for the operation. ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: [Output-only] Short description of operation. + // Description: Output-only Short description of operation. Description string `json:"description,omitempty"` - // Labels: [Output-only] labels associated with the operation + // Labels: Output-only Labels associated with the operation Labels map[string]string `json:"labels,omitempty"` - // OperationType: [Output-only] The operation type. + // OperationType: Output-only The operation type. OperationType string `json:"operationType,omitempty"` - // Status: [Output-only] Current operation status. + // Status: Output-only Current operation status. Status *ClusterOperationStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] The previous operation status. + // StatusHistory: Output-only The previous operation status. StatusHistory []*ClusterOperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output-only Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -338,24 +351,24 @@ func (s *ClusterOperationMetadata) MarshalJSON() ([]byte, error) { // ClusterOperationStatus: The status of the operation. type ClusterOperationStatus struct { - // Details: [Output-only]A message containing any operation metadata + // Details: Output-onlyA message containing any operation metadata // details. Details string `json:"details,omitempty"` - // InnerState: [Output-only] A message containing the detailed operation + // InnerState: Output-only A message containing the detailed operation // state. InnerState string `json:"innerState,omitempty"` - // State: [Output-only] A message containing the operation state. + // State: Output-only A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. State string `json:"state,omitempty"` - // StateStartTime: [Output-only] The time this state was entered. + // StateStartTime: Output-only The time this state was entered. StateStartTime string `json:"stateStartTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Details") to @@ -389,12 +402,16 @@ type ClusterStatus struct { // State: The cluster's state. // // Possible values: - // "UNKNOWN" - // "CREATING" - // "RUNNING" - // "ERROR" - // "DELETING" - // "UPDATING" + // "UNKNOWN" - The cluster state is unknown. + // "CREATING" - The cluster is being created and set up. It is not + // ready for use. + // "RUNNING" - The cluster is currently running and healthy. It is + // ready for use. + // "ERROR" - The cluster encountered an error. It is not ready for + // use. + // "DELETING" - The cluster is being deleted. It cannot be used. + // "UPDATING" - The cluster is being updated. It continues to accept + // and process jobs. State string `json:"state,omitempty"` // StateStartTime: Time when this state was entered. @@ -423,12 +440,11 @@ func (s *ClusterStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DiagnoseClusterOutputLocation: The location where output from -// diagnostic command can be found. +// DiagnoseClusterOutputLocation: The location of diagnostic output. type DiagnoseClusterOutputLocation struct { - // OutputUri: [Output-only] The Google Cloud Storage URI of the - // diagnostic output. This will be a plain text file with summary of - // collected diagnostics. + // OutputUri: Output-only The Google Cloud Storage URI of the diagnostic + // output. This is a plain text file with a summary of collected + // diagnostics. OutputUri string `json:"outputUri,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputUri") to @@ -461,9 +477,9 @@ type DiagnoseClusterRequest struct { // DiagnoseClusterResults: The location of diagnostic output. type DiagnoseClusterResults struct { - // OutputUri: [Output-only] The Google Cloud Storage URI of the - // diagnostic output. The output report is a plain text file with a - // summary of collected diagnostics. + // OutputUri: Output-only The Google Cloud Storage URI of the diagnostic + // output. The output report is a plain text file with a summary of + // collected diagnostics. OutputUri string `json:"outputUri,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputUri") to @@ -492,15 +508,15 @@ func (s *DiagnoseClusterResults) MarshalJSON() ([]byte, error) { // DiskConfiguration: Specifies the configuration of disk options for a // group of VM instances. type DiskConfiguration struct { - // BootDiskSizeGb: [Optional] Size in GB of the boot disk (default is + // BootDiskSizeGb: Optional Size in GB of the boot disk (default is // 500GB). BootDiskSizeGb int64 `json:"bootDiskSizeGb,omitempty"` - // NumLocalSsds: [Optional] Number of attached SSDs, from 0 to 4 - // (default is 0). If SSDs are not attached, the boot disk is used to - // store runtime logs and HDFS data. If one or more SSDs are attached, - // this runtime bulk data is spread across them, and the boot disk - // contains only basic configuration and installed binaries. + // NumLocalSsds: Optional Number of attached SSDs, from 0 to 4 (default + // is 0). If SSDs are not attached, the boot disk is used to store + // runtime logs and HDFS data. If one or more SSDs are attached, this + // runtime bulk data is spread across them, and the boot disk contains + // only basic configuration and installed binaries. NumLocalSsds int64 `json:"numLocalSsds,omitempty"` // ForceSendFields is a list of field names (e.g. "BootDiskSizeGb") to @@ -530,9 +546,12 @@ func (s *DiskConfiguration) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for `Empty` is -// empty JSON object `{}`. +// instance: +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// The JSON representation for Empty is empty JSON object {}. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -561,10 +580,19 @@ type GceClusterConfiguration struct { // the project is used, if it exists. Cannot be a "Custom Subnet // Network" (see https://cloud.google.com/compute/docs/subnetworks for // more information). Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/g - // lobal/default`. + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default. NetworkUri string `json:"networkUri,omitempty"` + // ServiceAccount: Optional The service account of the instances. + // Defaults to the default Google Compute Engine service account. Custom + // service accounts need permissions equivalent to the folloing IAM + // roles: + // roles/logging.logWriter + // roles/storage.objectAdmin(see + // https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: + // [account_id]@[project_id].iam.gserviceaccount.com + ServiceAccount string `json:"serviceAccount,omitempty"` + // ServiceAccountScopes: The URIs of service account scopes to be // included in Google Compute Engine instances. The following base set // of scopes is always included: - @@ -581,17 +609,15 @@ type GceClusterConfiguration struct { // SubnetworkUri: The Google Compute Engine subnetwork to be used for // machine communications. Cannot be specified with network_uri. // Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/u - // s-east1/sub0`. + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0. SubnetworkUri string `json:"subnetworkUri,omitempty"` // Tags: The Google Compute Engine tags to add to all instances. Tags []string `json:"tags,omitempty"` - // ZoneUri: [Required] The zone where the Google Compute Engine cluster + // ZoneUri: Required The zone where the Google Compute Engine cluster // will be located. Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zo - // ne]`. + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]. ZoneUri string `json:"zoneUri,omitempty"` // ForceSendFields is a list of field names (e.g. "InternalIpOnly") to @@ -621,33 +647,33 @@ func (s *GceClusterConfiguration) MarshalJSON() ([]byte, error) { // HadoopJob: A Cloud Dataproc job for running Hadoop MapReduce jobs on // YARN. type HadoopJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include - // arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job + // Args: Optional The arguments to pass to the driver. Do not include + // arguments, such as -libjars or -Dfoo=bar, that can be set as job // properties, since a collision may occur that causes an incorrect job // submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Hadoop drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] Jar file URIs to add to the CLASSPATHs of the + // JarFileUris: Optional Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` // MainClass: The name of the driver's main class. The jar file // containing the class must be in the default CLASSPATH or specified in - // `jar_file_uris`. + // jar_file_uris. MainClass string `json:"mainClass,omitempty"` // MainJarFileUri: The Hadoop Compatible Filesystem (HCFS) URI of the @@ -657,7 +683,7 @@ type HadoopJob struct { // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Hadoop. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site and classes in user code. @@ -688,19 +714,19 @@ func (s *HadoopJob) MarshalJSON() ([]byte, error) { // HiveJob: A Cloud Dataproc job for running Hive queries on YARN. type HiveJob struct { - // ContinueOnFailure: [Optional] Whether to continue executing queries - // if a query fails. The default value is `false`. Setting to `true` can - // be useful when executing independent parallel queries. + // ContinueOnFailure: Optional Whether to continue executing queries if + // a query fails. The default value is false. Setting to true can be + // useful when executing independent parallel queries. ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can - // contain Hive SerDes and UDFs. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH + // of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive + // SerDes and UDFs. JarFileUris []string `json:"jarFileUris,omitempty"` - // Properties: [Optional] A mapping of property names and values, used - // to configure Hive. Properties that conflict with values set by the - // Cloud Dataproc API may be overwritten. Can include properties set in + // Properties: Optional A mapping of property names and values, used to + // configure Hive. Properties that conflict with values set by the Cloud + // Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and // classes in user code. Properties map[string]string `json:"properties,omitempty"` @@ -711,8 +737,8 @@ type HiveJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values - // (equivalent to the Hive command: `SET name="value";`). + // ScriptVariables: Optional Mapping of query variable names to values + // (equivalent to the Hive command: SET name="value";). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") @@ -746,15 +772,14 @@ type InstanceGroupConfiguration struct { // DiskConfiguration: Disk option configuration settings. DiskConfiguration *DiskConfiguration `json:"diskConfiguration,omitempty"` - // ImageUri: [Output-only] The Google Compute Engine image resource used + // ImageUri: Output-only The Google Compute Engine image resource used // for cluster instances. Inferred from - // `SoftwareConfiguration.image_version`. + // SoftwareConfiguration.image_version. ImageUri string `json:"imageUri,omitempty"` // InstanceNames: The list of instance names. Dataproc derives the names - // from `cluster_name`, `num_instances`, and the instance group if not - // set by user (recommended practice is to let Dataproc derive the - // name). + // from cluster_name, num_instances, and the instance group if not set + // by user (recommended practice is to let Dataproc derive the name). InstanceNames []string `json:"instanceNames,omitempty"` // IsPreemptible: Specifies that this instance group contains @@ -763,11 +788,10 @@ type InstanceGroupConfiguration struct { // MachineTypeUri: The Google Compute Engine machine type used for // cluster instances. Example: - // `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us- - // east1-a/machineTypes/n1-standard-2`. + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2. MachineTypeUri string `json:"machineTypeUri,omitempty"` - // ManagedGroupConfiguration: [Output-only] The configuration for Google + // ManagedGroupConfiguration: Output-only The configuration for Google // Compute Engine Instance Group Manager that manages this group. This // is only used for preemptible instance groups. ManagedGroupConfiguration *ManagedGroupConfiguration `json:"managedGroupConfiguration,omitempty"` @@ -802,18 +826,18 @@ func (s *InstanceGroupConfiguration) MarshalJSON() ([]byte, error) { // Job: A Cloud Dataproc job resource. type Job struct { - // DriverControlFilesUri: [Output-only] If present, the location of + // DriverControlFilesUri: Output-only If present, the location of // miscellaneous control files which may be used as part of job setup // and handling. If not present, control files may be placed in the same - // location as `driver_output_uri`. + // location as driver_output_uri. DriverControlFilesUri string `json:"driverControlFilesUri,omitempty"` - // DriverInputResourceUri: [Output-only] A URI pointing to the location - // of the stdin of the job's driver program, only set if the job is + // DriverInputResourceUri: Output-only A URI pointing to the location of + // the stdin of the job's driver program, only set if the job is // interactive. DriverInputResourceUri string `json:"driverInputResourceUri,omitempty"` - // DriverOutputResourceUri: [Output-only] A URI pointing to the location + // DriverOutputResourceUri: Output-only A URI pointing to the location // of the stdout of the job's driver program. DriverOutputResourceUri string `json:"driverOutputResourceUri,omitempty"` @@ -823,55 +847,58 @@ type Job struct { // HiveJob: Job is a Hive job. HiveJob *HiveJob `json:"hiveJob,omitempty"` - // Interactive: [Optional] If set to `true`, the driver's stdin will be - // kept open and `driver_input_uri` will be set to provide a path at - // which additional input can be sent to the driver. + // Interactive: Optional If set to true, the driver's stdin will be kept + // open and driver_input_uri will be set to provide a path at which + // additional input can be sent to the driver. Interactive bool `json:"interactive,omitempty"` - // Labels: [Optional] The labels to associate with this job. Label keys + // Labels: Optional The labels to associate with this job.Label keys // must be between 1 and 63 characters long, and must conform to the - // following regular expression: \p{Ll}\p{Lo}{0,62} Label values must be + // following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be // between 1 and 63 characters long, and must conform to the following - // regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 - // labels can be associated with a given job. + // regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels + // can be associated with a given job. Labels map[string]string `json:"labels,omitempty"` // PigJob: Job is a Pig job. PigJob *PigJob `json:"pigJob,omitempty"` - // Placement: [Required] Job information, including how, when, and where + // Placement: Required Job information, including how, when, and where // to run the job. Placement *JobPlacement `json:"placement,omitempty"` // PysparkJob: Job is a Pyspark job. PysparkJob *PySparkJob `json:"pysparkJob,omitempty"` - // Reference: [Optional] The fully qualified reference to the job, which + // Reference: Optional The fully qualified reference to the job, which // can be used to obtain the equivalent REST path of the job resource. // If this property is not specified when a job is created, the server - // generates a job_id. + // generates a job_id. Reference *JobReference `json:"reference,omitempty"` + // Scheduling: Optional Job scheduling configuration. + Scheduling *JobScheduling `json:"scheduling,omitempty"` + // SparkJob: Job is a Spark job. SparkJob *SparkJob `json:"sparkJob,omitempty"` // SparkSqlJob: Job is a SparkSql job. SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` - // Status: [Output-only] The job status. Additional application-specific - // status information may be contained in the type_job and - // yarn_applications fields. + // Status: Output-only The job status. Additional application-specific + // status information may be contained in the type_job and + // yarn_applications fields. Status *JobStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] The previous job status. + // StatusHistory: Output-only The previous job status. StatusHistory []*JobStatus `json:"statusHistory,omitempty"` - // SubmittedBy: [Output-only] The email address of the user submitting - // the job. For jobs submitted on the cluster, the address is - // username@hostname. + // SubmittedBy: Output-only The email address of the user submitting the + // job. For jobs submitted on the cluster, the address is + // username@hostname. SubmittedBy string `json:"submittedBy,omitempty"` - // YarnApplications: [Output-only] The collection of YARN applications + // YarnApplications: Output-only The collection of YARN applications // spun up by this job. YarnApplications []*YarnApplication `json:"yarnApplications,omitempty"` @@ -906,11 +933,11 @@ func (s *Job) MarshalJSON() ([]byte, error) { // JobPlacement: Cloud Dataproc job configuration. type JobPlacement struct { - // ClusterName: [Required] The name of the cluster where the job will be + // ClusterName: Required The name of the cluster where the job will be // submitted. ClusterName string `json:"clusterName,omitempty"` - // ClusterUuid: [Output-only] A cluster UUID generated by the Dataproc + // ClusterUuid: Output-only A cluster UUID generated by the Dataproc // service when the job is submitted. ClusterUuid string `json:"clusterUuid,omitempty"` @@ -939,16 +966,16 @@ func (s *JobPlacement) MarshalJSON() ([]byte, error) { // JobReference: Encapsulates the full scoping used to reference a job. type JobReference struct { - // JobId: [Required] The job ID, which must be unique within the - // project. The job ID is generated by the server upon job submission or - // provided by the user as a means to perform retries without creating - // duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers - // (0-9), underscores (_), or hyphens (-). The maximum length is 512 + // JobId: Required The job ID, which must be unique within the project. + // The job ID is generated by the server upon job submission or provided + // by the user as a means to perform retries without creating duplicate + // jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), or hyphens (-). The maximum length is 100 // characters. JobId string `json:"jobId,omitempty"` - // ProjectId: [Required] The ID of the Google Cloud Platform project - // that the job belongs to. + // ProjectId: Required The ID of the Google Cloud Platform project that + // the job belongs to. ProjectId string `json:"projectId,omitempty"` // ForceSendFields is a list of field names (e.g. "JobId") to @@ -974,27 +1001,70 @@ func (s *JobReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// JobScheduling: Job scheduling options.Beta Feature: These options are +// available for testing purposes only. They may be changed before final +// release. +type JobScheduling struct { + // MaxFailuresPerHour: Optional Maximum number of times per hour a + // driver may be restarted as a result of driver terminating with + // non-zero code before job is reported failed.A job may be reported as + // thrashing if driver exits with non-zero code 4 times within 10 minute + // window.Maximum value is 10. + MaxFailuresPerHour int64 `json:"maxFailuresPerHour,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxFailuresPerHour") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxFailuresPerHour") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobScheduling) MarshalJSON() ([]byte, error) { + type noMethod JobScheduling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // JobStatus: Cloud Dataproc job status. type JobStatus struct { - // Details: [Optional] Job state details, such as an error description - // if the state is ERROR. + // Details: Optional Job state details, such as an error description if + // the state is ERROR. Details string `json:"details,omitempty"` - // State: [Required] A state message specifying the overall job state. + // State: Required A state message specifying the overall job state. // // Possible values: - // "STATE_UNSPECIFIED" - // "PENDING" - // "SETUP_DONE" - // "RUNNING" - // "CANCEL_PENDING" - // "CANCEL_STARTED" - // "CANCELLED" - // "DONE" - // "ERROR" + // "STATE_UNSPECIFIED" - The job state is unknown. + // "PENDING" - The job is pending; it has been submitted, but is not + // yet running. + // "SETUP_DONE" - Job has been received by the service and completed + // initial setup; it will shortly be submitted to the cluster. + // "RUNNING" - The job is running on the cluster. + // "CANCEL_PENDING" - A CancelJob request has been received, but is + // pending. + // "CANCEL_STARTED" - Transient in-flight resources have been + // canceled, and the request to cancel the running job has been issued + // to the cluster. + // "CANCELLED" - The job cancelation was successful. + // "DONE" - The job has completed successfully. + // "ERROR" - The job has completed, but encountered an error. + // "ATTEMPT_FAILURE" - Job attempt has failed. The detail field + // contains failure details for this attempt.Applies to restartable jobs + // only. State string `json:"state,omitempty"` - // StateStartTime: [Output-only] The time when this state was entered. + // StateStartTime: Output-only The time when this state was entered. StateStartTime string `json:"stateStartTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Details") to @@ -1022,7 +1092,7 @@ func (s *JobStatus) MarshalJSON() ([]byte, error) { // ListClustersResponse: The list of all clusters in a project. type ListClustersResponse struct { - // Clusters: [Output-only] The clusters in the project. + // Clusters: Output-only The clusters in the project. Clusters []*Cluster `json:"clusters,omitempty"` // NextPageToken: The standard List next-page token. @@ -1057,12 +1127,13 @@ func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { // ListJobsResponse: A list of jobs in a project. type ListJobsResponse struct { - // Jobs: [Output-only] Jobs list. + // Jobs: Output-only Jobs list. Jobs []*Job `json:"jobs,omitempty"` - // NextPageToken: [Optional] This token is included in the response if + // NextPageToken: Optional This token is included in the response if // there are more results to fetch. To fetch additional results, provide - // this value as the `page_token` in a subsequent ListJobsRequest. + // this value as the page_token in a subsequent + // ListJobsRequest. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1163,11 +1234,11 @@ func (s *LoggingConfiguration) MarshalJSON() ([]byte, error) { // ManagedGroupConfiguration: Specifies the resources used to actively // manage an instance group. type ManagedGroupConfiguration struct { - // InstanceGroupManagerName: [Output-only] The name of the Instance - // Group Manager for this group. + // InstanceGroupManagerName: Output-only The name of the Instance Group + // Manager for this group. InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"` - // InstanceTemplateName: [Output-only] The name of the Instance Template + // InstanceTemplateName: Output-only The name of the Instance Template // used for the Managed Instance Group. InstanceTemplateName string `json:"instanceTemplateName,omitempty"` @@ -1199,15 +1270,14 @@ func (s *ManagedGroupConfiguration) MarshalJSON() ([]byte, error) { // NodeInitializationAction: Specifies an executable to run on a fully // configured node and a timeout period for executable completion. type NodeInitializationAction struct { - // ExecutableFile: [Required] Google Cloud Storage URI of executable - // file. + // ExecutableFile: Required Google Cloud Storage URI of executable file. ExecutableFile string `json:"executableFile,omitempty"` - // ExecutionTimeout: [Optional] Amount of time executable has to - // complete. Default is 10 minutes. Cluster creation fails with an - // explanatory error message (the name of the executable that caused the - // error and the exceeded timeout period) if the executable is not - // completed at end of the timeout period. + // ExecutionTimeout: Optional Amount of time executable has to complete. + // Default is 10 minutes. Cluster creation fails with an explanatory + // error message (the name of the executable that caused the error and + // the exceeded timeout period) if the executable is not completed at + // end of the timeout period. ExecutionTimeout string `json:"executionTimeout,omitempty"` // ForceSendFields is a list of field names (e.g. "ExecutableFile") to @@ -1237,9 +1307,9 @@ func (s *NodeInitializationAction) MarshalJSON() ([]byte, error) { // Operation: This resource represents a long-running operation that is // the result of a network API call. type Operation struct { - // Done: If the value is `false`, it means the operation is still in - // progress. If true, the operation is completed, and either `error` or - // `response` is available. + // Done: If the value is false, it means the operation is still in + // progress. If true, the operation is completed, and either error or + // response is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or @@ -1255,18 +1325,17 @@ type Operation struct { // Name: The server-assigned name, which is only unique within the same // service that originally returns it. If you use the default HTTP - // mapping, the `name` should have the format of - // `operations/some/unique/name`. + // mapping, the name should have the format of + // operations/some/unique/name. Name string `json:"name,omitempty"` // Response: The normal response of the operation in case of success. If - // the original method returns no data on success, such as `Delete`, the - // response is `google.protobuf.Empty`. If the original method is - // standard `Get`/`Create`/`Update`, the response should be the - // resource. For other methods, the response should have the type - // `XxxResponse`, where `Xxx` is the original method name. For example, - // if the original method name is `TakeSnapshot()`, the inferred - // response type is `TakeSnapshotResponse`. + // the original method returns no data on success, such as Delete, the + // response is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other + // methods, the response should have the type XxxResponse, where Xxx is + // the original method name. For example, if the original method name is + // TakeSnapshot(), the inferred response type is TakeSnapshotResponse. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1304,42 +1373,21 @@ type OperationMetadata struct { // ClusterUuid: Cluster UUId for the operation. ClusterUuid string `json:"clusterUuid,omitempty"` - // Description: [Output-only] Short description of operation. + // Description: Output-only Short description of operation. Description string `json:"description,omitempty"` - // Details: A message containing any operation metadata details. - Details string `json:"details,omitempty"` - - // EndTime: The time that the operation completed. - EndTime string `json:"endTime,omitempty"` - - // InnerState: A message containing the detailed operation state. - InnerState string `json:"innerState,omitempty"` - - // InsertTime: The time that the operation was requested. - InsertTime string `json:"insertTime,omitempty"` - - // OperationType: [Output-only] The operation type. + // OperationType: Output-only The operation type. OperationType string `json:"operationType,omitempty"` - // StartTime: The time that the operation was started by the server. - StartTime string `json:"startTime,omitempty"` - - // State: A message containing the operation state. - // - // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" - State string `json:"state,omitempty"` - - // Status: [Output-only] Current operation status. + // Status: Output-only Current operation status. Status *OperationStatus `json:"status,omitempty"` - // StatusHistory: [Output-only] Previous operation status. + // StatusHistory: Output-only Previous operation status. StatusHistory []*OperationStatus `json:"statusHistory,omitempty"` + // Warnings: Output-only Errors encountered during operation execution. + Warnings []string `json:"warnings,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1374,10 +1422,10 @@ type OperationStatus struct { // State: A message containing the operation state. // // Possible values: - // "UNKNOWN" - // "PENDING" - // "RUNNING" - // "DONE" + // "UNKNOWN" - Unused. + // "PENDING" - The operation has been created. + // "RUNNING" - The operation is running. + // "DONE" - The operation is done; either cancelled or completed. State string `json:"state,omitempty"` // StateStartTime: The time this state was entered. @@ -1408,21 +1456,21 @@ func (s *OperationStatus) MarshalJSON() ([]byte, error) { // PigJob: A Cloud Dataproc job for running Pig queries on YARN. type PigJob struct { - // ContinueOnFailure: [Optional] Whether to continue executing queries - // if a query fails. The default value is `false`. Setting to `true` can - // be useful when executing independent parallel queries. + // ContinueOnFailure: Optional Whether to continue executing queries if + // a query fails. The default value is false. Setting to true can be + // useful when executing independent parallel queries. ContinueOnFailure bool `json:"continueOnFailure,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can - // contain Pig UDFs. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH + // of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig + // UDFs. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Pig. Properties that conflict with values set by the Cloud // Dataproc API may be overwritten. Can include properties set in // /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and @@ -1436,8 +1484,8 @@ type PigJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values - // (equivalent to the Pig command: `name=[value]`). + // ScriptVariables: Optional Mapping of query variable names to values + // (equivalent to the Pig command: name=[value]). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` // ForceSendFields is a list of field names (e.g. "ContinueOnFailure") @@ -1467,39 +1515,39 @@ func (s *PigJob) MarshalJSON() ([]byte, error) { // PySparkJob: A Cloud Dataproc job for running PySpark applications on // YARN. type PySparkJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include - // arguments, such as `--conf`, that can be set as job properties, since - // a collision may occur that causes an incorrect job submission. + // Args: Optional The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Python drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATHs of the Python driver and tasks. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs + // of the Python driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` - // MainPythonFileUri: [Required] The Hadoop Compatible Filesystem (HCFS) + // MainPythonFileUri: Required The Hadoop Compatible Filesystem (HCFS) // URI of the main Python file to use as the driver. Must be a .py file. MainPythonFileUri string `json:"mainPythonFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure PySpark. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `json:"properties,omitempty"` - // PythonFileUris: [Optional] HCFS file URIs of Python files to pass to + // PythonFileUris: Optional HCFS file URIs of Python files to pass to // the PySpark framework. Supported file types: .py, .egg, and .zip. PythonFileUris []string `json:"pythonFileUris,omitempty"` @@ -1528,12 +1576,21 @@ func (s *PySparkJob) MarshalJSON() ([]byte, error) { // QueryList: A list of queries to run on a cluster. type QueryList struct { - // Queries: [Required] The queries to execute. You do not need to + // Queries: Required The queries to execute. You do not need to // terminate a query with a semicolon. Multiple queries can be specified // in one string by separating each with a semicolon. Here is an example // of an Cloud Dataproc API snippet that uses a QueryList to specify a - // HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", - // "query3;query4", ] } } + // HiveJob: + // "hiveJob": { + // "queryList": { + // "queries": [ + // "query1", + // "query2", + // "query3;query4", + // ] + // } + // } + // Queries []string `json:"queries,omitempty"` // ForceSendFields is a list of field names (e.g. "Queries") to @@ -1562,18 +1619,17 @@ func (s *QueryList) MarshalJSON() ([]byte, error) { // SoftwareConfiguration: Specifies the selection and configuration of // software inside the cluster. type SoftwareConfiguration struct { - // ImageVersion: [Optional] The version of software inside the cluster. - // It must match the regular expression `[0-9]+\.[0-9]+`. If - // unspecified, it defaults to the latest version (see [Cloud Dataproc - // Versioning](/dataproc/versioning)). + // ImageVersion: Optional The version of software inside the cluster. It + // must match the regular expression [0-9]+\.[0-9]+. If unspecified, it + // defaults to the latest version (see Cloud Dataproc Versioning). ImageVersion string `json:"imageVersion,omitempty"` - // Properties: [Optional] The properties to set on daemon configuration - // files. Property keys are specified in "prefix:property" format, such + // Properties: Optional The properties to set on daemon configuration + // files.Property keys are specified in "prefix:property" format, such // as "core:fs.defaultFS". The following are supported prefixes and - // their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - - // mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - - // pig.properties spark - spark-defaults.conf + // their mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - + // mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - + // pig.properties spark - spark-defaults.conf Properties map[string]string `json:"properties,omitempty"` // ForceSendFields is a list of field names (e.g. "ImageVersion") to @@ -1602,39 +1658,39 @@ func (s *SoftwareConfiguration) MarshalJSON() ([]byte, error) { // SparkJob: A Cloud Dataproc job for running Spark applications on // YARN. type SparkJob struct { - // ArchiveUris: [Optional] HCFS URIs of archives to be extracted in the + // ArchiveUris: Optional HCFS URIs of archives to be extracted in the // working directory of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` - // Args: [Optional] The arguments to pass to the driver. Do not include - // arguments, such as `--conf`, that can be set as job properties, since - // a collision may occur that causes an incorrect job submission. + // Args: Optional The arguments to pass to the driver. Do not include + // arguments, such as --conf, that can be set as job properties, since a + // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: [Optional] HCFS URIs of files to be copied to the working + // FileUris: Optional HCFS URIs of files to be copied to the working // directory of Spark drivers and distributed tasks. Useful for naively // parallel tasks. FileUris []string `json:"fileUris,omitempty"` - // JarFileUris: [Optional] HCFS URIs of jar files to add to the - // CLASSPATHs of the Spark driver and tasks. + // JarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs + // of the Spark driver and tasks. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` // MainClass: The name of the driver's main class. The jar file that // contains the class must be in the default CLASSPATH or specified in - // `jar_file_uris`. + // jar_file_uris. MainClass string `json:"mainClass,omitempty"` // MainJarFileUri: The Hadoop Compatible Filesystem (HCFS) URI of the // jar file that contains the main class. MainJarFileUri string `json:"mainJarFileUri,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Spark. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. @@ -1665,15 +1721,15 @@ func (s *SparkJob) MarshalJSON() ([]byte, error) { // SparkSqlJob: A Cloud Dataproc job for running Spark SQL queries. type SparkSqlJob struct { - // JarFileUris: [Optional] HCFS URIs of jar files to be added to the - // Spark CLASSPATH. + // JarFileUris: Optional HCFS URIs of jar files to be added to the Spark + // CLASSPATH. JarFileUris []string `json:"jarFileUris,omitempty"` - // LoggingConfiguration: [Optional] The runtime log configuration for - // job execution. + // LoggingConfiguration: Optional The runtime log configuration for job + // execution. LoggingConfiguration *LoggingConfiguration `json:"loggingConfiguration,omitempty"` - // Properties: [Optional] A mapping of property names to values, used to + // Properties: Optional A mapping of property names to values, used to // configure Spark SQL's SparkConf. Properties that conflict with values // set by the Cloud Dataproc API may be overwritten. Properties map[string]string `json:"properties,omitempty"` @@ -1684,8 +1740,8 @@ type SparkSqlJob struct { // QueryList: A list of queries. QueryList *QueryList `json:"queryList,omitempty"` - // ScriptVariables: [Optional] Mapping of query variable names to values - // (equivalent to the Spark SQL command: SET `name="value";`). + // ScriptVariables: Optional Mapping of query variable names to values + // (equivalent to the Spark SQL command: SET name="value";). ScriptVariables map[string]string `json:"scriptVariables,omitempty"` // ForceSendFields is a list of field names (e.g. "JarFileUris") to @@ -1711,42 +1767,45 @@ func (s *SparkSqlJob) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Status: The `Status` type defines a logical error model that is +// Status: The Status type defines a logical error model that is // suitable for different programming environments, including REST APIs -// and RPC APIs. It is used by [gRPC](https://github.com/grpc). The -// error model is designed to be: - Simple to use and understand for -// most users - Flexible enough to meet unexpected needs # Overview The -// `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// google.rpc.Code, but it may accept additional error codes if needed. -// The error message should be a developer-facing English message that -// helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the -// error details or localize it in the client. The optional error -// details may contain arbitrary information about the error. There is a -// predefined set of error detail types in the package `google.rpc` -// which can be used for common error conditions. # Language mapping The -// `Status` message is the logical representation of the error model, -// but it is not necessarily the actual wire format. When the `Status` -// message is exposed in different client libraries and different wire -// protocols, it can be mapped differently. For example, it will likely -// be mapped to some exceptions in Java, but more likely mapped to some -// error codes in C. # Other uses The error model and the `Status` -// message can be used in a variety of environments, either with or -// without APIs, to provide a consistent developer experience across -// different environments. Example uses of this error model include: - +// and RPC APIs. It is used by gRPC (https://github.com/grpc). The error +// model is designed to be: +// Simple to use and understand for most users +// Flexible enough to meet unexpected needsOverviewThe Status message +// contains three pieces of data: error code, error message, and error +// details. The error code should be an enum value of google.rpc.Code, +// but it may accept additional error codes if needed. The error message +// should be a developer-facing English message that helps developers +// understand and resolve the error. If a localized user-facing error +// message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of +// error detail types in the package google.rpc which can be used for +// common error conditions.Language mappingThe Status message is the +// logical representation of the error model, but it is not necessarily +// the actual wire format. When the Status message is exposed in +// different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions in Java, but more likely mapped to some error codes in +// C.Other usesThe error model and the Status message can be used in a +// variety of environments, either with or without APIs, to provide a +// consistent developer experience across different environments.Example +// uses of this error model include: // Partial errors. If a service needs to return partial errors to the -// client, it may embed the `Status` in the normal response to indicate -// the partial errors. - Workflow errors. A typical workflow has -// multiple steps. Each step may have a `Status` message for error -// reporting purpose. - Batch operations. If a client uses batch request -// and batch response, the `Status` message should be used directly -// inside batch response, one for each error sub-response. - +// client, it may embed the Status in the normal response to indicate +// the partial errors. +// Workflow errors. A typical workflow has multiple steps. Each step may +// have a Status message for error reporting purpose. +// Batch operations. If a client uses batch request and batch response, +// the Status message should be used directly inside batch response, one +// for each error sub-response. // Asynchronous operations. If an API call embeds asynchronous operation // results in its response, the status of those operations should be -// represented directly using the `Status` message. - Logging. If some -// API errors are stored in logs, the message `Status` could be used -// directly after any stripping needed for security/privacy reasons. +// represented directly using the Status message. +// Logging. If some API errors are stored in logs, the message Status +// could be used directly after any stripping needed for +// security/privacy reasons. type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. @@ -1786,7 +1845,7 @@ func (s *Status) MarshalJSON() ([]byte, error) { // SubmitJobRequest: A request to submit a job. type SubmitJobRequest struct { - // Job: [Required] The job resource. + // Job: Required The job resource. Job *Job `json:"job,omitempty"` // ForceSendFields is a list of field names (e.g. "Job") to @@ -1814,30 +1873,31 @@ func (s *SubmitJobRequest) MarshalJSON() ([]byte, error) { // YarnApplication: A YARN application created by a job. Application // information is a subset of -// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. type YarnApplication struct { - // Name: [Required] The application name. + // Name: Required The application name. Name string `json:"name,omitempty"` - // Progress: [Required] The numerical progress of the application, from - // 1 to 100. + // Progress: Required The numerical progress of the application, from 1 + // to 100. Progress float64 `json:"progress,omitempty"` - // State: [Required] The application state. + // State: Required The application state. // // Possible values: - // "STATE_UNSPECIFIED" - // "NEW" - // "NEW_SAVING" - // "SUBMITTED" - // "ACCEPTED" - // "RUNNING" - // "FINISHED" - // "FAILED" - // "KILLED" + // "STATE_UNSPECIFIED" - Status is unspecified. + // "NEW" - Status is NEW. + // "NEW_SAVING" - Status is NEW_SAVING. + // "SUBMITTED" - Status is SUBMITTED. + // "ACCEPTED" - Status is ACCEPTED. + // "RUNNING" - Status is RUNNING. + // "FINISHED" - Status is FINISHED. + // "FAILED" - Status is FAILED. + // "KILLED" - Status is KILLED. State string `json:"state,omitempty"` - // TrackingUrl: [Optional] The HTTP URL of the ApplicationMaster, + // TrackingUrl: Optional The HTTP URL of the ApplicationMaster, // HistoryServer, or TimelineServer that provides application-specific // information. The URL uses the internal hostname, and requires a proxy // server for resolution and, possibly, access. @@ -1894,10 +1954,9 @@ type OperationsCancelCall struct { // Cancel: Starts asynchronous cancellation on a long-running operation. // The server makes a best effort to cancel the operation, but success // is not guaranteed. If the server doesn't support this method, it -// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use -// [operations.get](/dataproc/reference/rest/v1beta1/operations/get) or -// other methods to check whether the cancellation succeeded or whether -// the operation completed despite cancellation. +// returns google.rpc.Code.UNIMPLEMENTED. Clients can use operations.get +// or other methods to check whether the cancellation succeeded or +// whether the operation completed despite cancellation. func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1936,6 +1995,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { @@ -1991,7 +2051,8 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use [operations.get](/dataproc/reference/rest/v1beta1/operations/get) or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use operations.get or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation.", + // "flatPath": "v1beta1/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "dataproc.operations.cancel", // "parameterOrder": [ @@ -2033,7 +2094,7 @@ type OperationsDeleteCall struct { // Delete: Deletes a long-running operation. This method indicates that // the client is no longer interested in the operation result. It does // not cancel the operation. If the server doesn't support this method, -// it returns `google.rpc.Code.UNIMPLEMENTED`. +// it returns google.rpc.Code.UNIMPLEMENTED. func (r *OperationsService) Delete(name string) *OperationsDeleteCall { c := &OperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2071,6 +2132,7 @@ func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") @@ -2121,7 +2183,8 @@ func (c *OperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", + // "flatPath": "v1beta1/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "dataproc.operations.delete", // "parameterOrder": [ @@ -2208,6 +2271,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2262,6 +2326,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) return ret, nil // { // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1beta1/operations/{operationsId}", // "httpMethod": "GET", // "id": "dataproc.operations.get", // "parameterOrder": [ @@ -2300,9 +2365,9 @@ type OperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `UNIMPLEMENTED`. NOTE: the `name` binding below allows API services -// to override the binding to use different resource name schemes, such -// as `users/*/operations`. +// UNIMPLEMENTED.NOTE: the name binding below allows API services to +// override the binding to use different resource name schemes, such as +// users/*/operations. func (r *OperationsService) List(name string) *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2371,6 +2436,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2424,7 +2490,8 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding below allows API services to override the binding to use different resource name schemes, such as `users/*/operations`.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.", + // "flatPath": "v1beta1/operations", // "httpMethod": "GET", // "id": "dataproc.operations.list", // "parameterOrder": [ @@ -2537,6 +2604,7 @@ func (c *ProjectsClustersCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cluster) if err != nil { @@ -2593,6 +2661,7 @@ func (c *ProjectsClustersCreateCall) Do(opts ...googleapi.CallOption) (*Operatio return ret, nil // { // "description": "Creates a cluster in a project.", + // "flatPath": "v1beta1/projects/{projectId}/clusters", // "httpMethod": "POST", // "id": "dataproc.projects.clusters.create", // "parameterOrder": [ @@ -2600,7 +2669,7 @@ func (c *ProjectsClustersCreateCall) Do(opts ...googleapi.CallOption) (*Operatio // ], // "parameters": { // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -2670,6 +2739,7 @@ func (c *ProjectsClustersDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}/clusters/{clusterName}") @@ -2722,6 +2792,7 @@ func (c *ProjectsClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio return ret, nil // { // "description": "Deletes a cluster in a project.", + // "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}", // "httpMethod": "DELETE", // "id": "dataproc.projects.clusters.delete", // "parameterOrder": [ @@ -2730,13 +2801,13 @@ func (c *ProjectsClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -2767,7 +2838,7 @@ type ProjectsClustersDiagnoseCall struct { // Diagnose: Gets cluster diagnostic information. After the operation // completes, the Operation.response field contains -// `DiagnoseClusterOutputLocation`. +// DiagnoseClusterOutputLocation. func (r *ProjectsClustersService) Diagnose(projectId string, clusterName string, diagnoseclusterrequest *DiagnoseClusterRequest) *ProjectsClustersDiagnoseCall { c := &ProjectsClustersDiagnoseCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -2807,6 +2878,7 @@ func (c *ProjectsClustersDiagnoseCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.diagnoseclusterrequest) if err != nil { @@ -2863,7 +2935,8 @@ func (c *ProjectsClustersDiagnoseCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains `DiagnoseClusterOutputLocation`.", + // "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation.", + // "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}:diagnose", // "httpMethod": "POST", // "id": "dataproc.projects.clusters.diagnose", // "parameterOrder": [ @@ -2872,13 +2945,13 @@ func (c *ProjectsClustersDiagnoseCall) Do(opts ...googleapi.CallOption) (*Operat // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -2959,6 +3032,7 @@ func (c *ProjectsClustersGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3014,6 +3088,7 @@ func (c *ProjectsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluster, er return ret, nil // { // "description": "Gets the resource representation for a cluster in a project.", + // "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}", // "httpMethod": "GET", // "id": "dataproc.projects.clusters.get", // "parameterOrder": [ @@ -3022,13 +3097,13 @@ func (c *ProjectsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluster, er // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -3063,7 +3138,7 @@ func (r *ProjectsClustersService) List(projectId string) *ProjectsClustersListCa return c } -// Filter sets the optional parameter "filter": [Optional] A filter +// Filter sets the optional parameter "filter": Optional A filter // constraining which clusters to list. Valid filters contain label // terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 // = val3) @@ -3127,6 +3202,7 @@ func (c *ProjectsClustersListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3181,6 +3257,7 @@ func (c *ProjectsClustersListCall) Do(opts ...googleapi.CallOption) (*ListCluste return ret, nil // { // "description": "Lists all clusters in a project.", + // "flatPath": "v1beta1/projects/{projectId}/clusters", // "httpMethod": "GET", // "id": "dataproc.projects.clusters.list", // "parameterOrder": [ @@ -3188,7 +3265,7 @@ func (c *ProjectsClustersListCall) Do(opts ...googleapi.CallOption) (*ListCluste // ], // "parameters": { // "filter": { - // "description": "[Optional] A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", + // "description": "Optional A filter constraining which clusters to list. Valid filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = val2 OR labels.k3 = val3)", // "location": "query", // "type": "string" // }, @@ -3204,7 +3281,7 @@ func (c *ProjectsClustersListCall) Do(opts ...googleapi.CallOption) (*ListCluste // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -3263,21 +3340,34 @@ func (r *ProjectsClustersService) Patch(projectId string, clusterName string, cl return c } -// UpdateMask sets the optional parameter "updateMask": [Required] -// Specifies the path, relative to Cluster, of the field to update. For -// example, to change the number of workers in a cluster to 5, the -// update_mask parameter would be specified as -// configuration.worker_configuration.num_instances, and the `PATCH` -// request body would specify the new value, as follows: { -// "configuration":{ "workerConfiguration":{ "numInstances":"5" } } } +// UpdateMask sets the optional parameter "updateMask": Required +// Specifies the path, relative to Cluster, of the field to +// update. For example, to change the number of workers in a cluster to +// 5, the update_mask parameter would be specified as +// configuration.worker_configuration.num_instances, and +// the PATCH request body would specify the new value, as follows: +// { +// "configuration":{ +// "workerConfiguration":{ +// "numInstances":"5" +// } +// } +// } // Similarly, to change the number of preemptible workers in a cluster -// to 5, the update_mask parameter would be -// config.secondary_worker_config.num_instances, and the `PATCH` request -// body would be set as follows: { "config":{ "secondaryWorkerConfig":{ -// "numInstances":"5" } } } Note: Currently, -// config.worker_config.num_instances and -// config.secondary_worker_config.num_instances are the only fields that -// can be updated. +// to 5, the update_mask parameter would be +// config.secondary_worker_config.num_instances, and the +// PATCH request body would be set as follows: +// { +// "config":{ +// "secondaryWorkerConfig":{ +// "numInstances":"5" +// } +// } +// } +// Note: Currently, +// config.worker_config.num_instances and +// config.secondary_worker_config.num_instances are the +// only fields that can be updated. func (c *ProjectsClustersPatchCall) UpdateMask(updateMask string) *ProjectsClustersPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -3314,6 +3404,7 @@ func (c *ProjectsClustersPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.cluster) if err != nil { @@ -3371,6 +3462,7 @@ func (c *ProjectsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Operation return ret, nil // { // "description": "Updates a cluster in a project.", + // "flatPath": "v1beta1/projects/{projectId}/clusters/{clusterName}", // "httpMethod": "PATCH", // "id": "dataproc.projects.clusters.patch", // "parameterOrder": [ @@ -3379,19 +3471,20 @@ func (c *ProjectsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Operation // ], // "parameters": { // "clusterName": { - // "description": "[Required] The cluster name.", + // "description": "Required The cluster name.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project the cluster belongs to.", + // "description": "Required The ID of the Google Cloud Platform project the cluster belongs to.", // "location": "path", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "[Required] Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as configuration.worker_configuration.num_instances, and the `PATCH` request body would specify the new value, as follows: { \"configuration\":{ \"workerConfiguration\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the `PATCH` request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } Note: Currently, config.worker_config.num_instances and config.secondary_worker_config.num_instances are the only fields that can be updated.", + // "description": "Required Specifies the path, relative to \u003ccode\u003eCluster\u003c/code\u003e, of the field to update. For example, to change the number of workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003econfiguration.worker_configuration.num_instances\u003c/code\u003e, and the PATCH request body would specify the new value, as follows:\n{\n \"configuration\":{\n \"workerConfiguration\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003econfig.worker_config.num_instances\u003c/code\u003e and \u003ccode\u003econfig.secondary_worker_config.num_instances\u003c/code\u003e are the only fields that can be updated.", + // "format": "google-fieldmask", // "location": "query", // "type": "string" // } @@ -3423,9 +3516,7 @@ type ProjectsJobsCancelCall struct { } // Cancel: Starts a job cancellation request. To access the job resource -// after cancellation, call -// [jobs.list](/dataproc/reference/rest/v1beta1/projects.jobs/list) or -// [jobs.get](/dataproc/reference/rest/v1beta1/projects.jobs/get). +// after cancellation, call jobs.list or jobs.get. func (r *ProjectsJobsService) Cancel(projectId string, jobId string, canceljobrequest *CancelJobRequest) *ProjectsJobsCancelCall { c := &ProjectsJobsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -3465,6 +3556,7 @@ func (c *ProjectsJobsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceljobrequest) if err != nil { @@ -3521,7 +3613,8 @@ func (c *ProjectsJobsCancelCall) Do(opts ...googleapi.CallOption) (*Job, error) } return ret, nil // { - // "description": "Starts a job cancellation request. To access the job resource after cancellation, call [jobs.list](/dataproc/reference/rest/v1beta1/projects.jobs/list) or [jobs.get](/dataproc/reference/rest/v1beta1/projects.jobs/get).", + // "description": "Starts a job cancellation request. To access the job resource after cancellation, call jobs.list or jobs.get.", + // "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}:cancel", // "httpMethod": "POST", // "id": "dataproc.projects.jobs.cancel", // "parameterOrder": [ @@ -3530,13 +3623,13 @@ func (c *ProjectsJobsCancelCall) Do(opts ...googleapi.CallOption) (*Job, error) // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -3568,7 +3661,7 @@ type ProjectsJobsDeleteCall struct { } // Delete: Deletes the job from the project. If the job is active, the -// delete fails, and the response returns `FAILED_PRECONDITION`. +// delete fails, and the response returns FAILED_PRECONDITION. func (r *ProjectsJobsService) Delete(projectId string, jobId string) *ProjectsJobsDeleteCall { c := &ProjectsJobsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -3607,6 +3700,7 @@ func (c *ProjectsJobsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}/jobs/{jobId}") @@ -3658,7 +3752,8 @@ func (c *ProjectsJobsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error } return ret, nil // { - // "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", + // "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", + // "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}", // "httpMethod": "DELETE", // "id": "dataproc.projects.jobs.delete", // "parameterOrder": [ @@ -3667,13 +3762,13 @@ func (c *ProjectsJobsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -3751,6 +3846,7 @@ func (c *ProjectsJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3806,6 +3902,7 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { return ret, nil // { // "description": "Gets the resource representation for a job in a project.", + // "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}", // "httpMethod": "GET", // "id": "dataproc.projects.jobs.get", // "parameterOrder": [ @@ -3814,13 +3911,13 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { // ], // "parameters": { // "jobId": { - // "description": "[Required] The job ID.", + // "description": "Required The job ID.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -3855,7 +3952,7 @@ func (r *ProjectsJobsService) List(projectId string) *ProjectsJobsListCall { return c } -// ClusterName sets the optional parameter "clusterName": [Optional] If +// ClusterName sets the optional parameter "clusterName": Optional If // set, the returned jobs list includes only jobs that were submitted to // the named cluster. func (c *ProjectsJobsListCall) ClusterName(clusterName string) *ProjectsJobsListCall { @@ -3863,7 +3960,7 @@ func (c *ProjectsJobsListCall) ClusterName(clusterName string) *ProjectsJobsList return c } -// Filter sets the optional parameter "filter": [Optional] A filter +// Filter sets the optional parameter "filter": Optional A filter // constraining which jobs to list. Valid filters contain job state and // label terms such as: labels.key1 = val1 AND (labels.k2 = val2 OR // labels.k3 = val3) @@ -3873,7 +3970,7 @@ func (c *ProjectsJobsListCall) Filter(filter string) *ProjectsJobsListCall { } // JobStateMatcher sets the optional parameter "jobStateMatcher": -// [Optional] Specifies enumerated categories of jobs to list. +// Optional Specifies enumerated categories of jobs to list. // // Possible values: // "ALL" @@ -3884,15 +3981,15 @@ func (c *ProjectsJobsListCall) JobStateMatcher(jobStateMatcher string) *Projects return c } -// PageSize sets the optional parameter "pageSize": [Optional] The -// number of results to return in each response. +// PageSize sets the optional parameter "pageSize": Optional The number +// of results to return in each response. func (c *ProjectsJobsListCall) PageSize(pageSize int64) *ProjectsJobsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": [Optional] The -// page token, returned by a previous call, to request the next page of +// PageToken sets the optional parameter "pageToken": Optional The page +// token, returned by a previous call, to request the next page of // results. func (c *ProjectsJobsListCall) PageToken(pageToken string) *ProjectsJobsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -3940,6 +4037,7 @@ func (c *ProjectsJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3994,6 +4092,7 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon return ret, nil // { // "description": "Lists jobs in a project.", + // "flatPath": "v1beta1/projects/{projectId}/jobs", // "httpMethod": "GET", // "id": "dataproc.projects.jobs.list", // "parameterOrder": [ @@ -4001,17 +4100,17 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // ], // "parameters": { // "clusterName": { - // "description": "[Optional] If set, the returned jobs list includes only jobs that were submitted to the named cluster.", + // "description": "Optional If set, the returned jobs list includes only jobs that were submitted to the named cluster.", // "location": "query", // "type": "string" // }, // "filter": { - // "description": "[Optional] A filter constraining which jobs to list. Valid filters contain job state and label terms such as: labels.key1 = val1 AND (labels.k2 = val2 OR labels.k3 = val3)", + // "description": "Optional A filter constraining which jobs to list. Valid filters contain job state and label terms such as: labels.key1 = val1 AND (labels.k2 = val2 OR labels.k3 = val3)", // "location": "query", // "type": "string" // }, // "jobStateMatcher": { - // "description": "[Optional] Specifies enumerated categories of jobs to list.", + // "description": "Optional Specifies enumerated categories of jobs to list.", // "enum": [ // "ALL", // "ACTIVE", @@ -4021,18 +4120,18 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "type": "string" // }, // "pageSize": { - // "description": "[Optional] The number of results to return in each response.", + // "description": "Optional The number of results to return in each response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "[Optional] The page token, returned by a previous call, to request the next page of results.", + // "description": "Optional The page token, returned by a previous call, to request the next page of results.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" @@ -4070,6 +4169,169 @@ func (c *ProjectsJobsListCall) Pages(ctx context.Context, f func(*ListJobsRespon } } +// method id "dataproc.projects.jobs.patch": + +type ProjectsJobsPatchCall struct { + s *Service + projectId string + jobId string + job *Job + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a job in a project. +func (r *ProjectsJobsService) Patch(projectId string, jobId string, job *Job) *ProjectsJobsPatchCall { + c := &ProjectsJobsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.jobId = jobId + c.job = job + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required +// Specifies the path, relative to Job, of the field to +// update. For example, to update the labels of a Job the +// update_mask parameter would be specified as +// labels, and the PATCH request body would specify the new +// value. Note: Currently, labels is the +// only field that can be updated. +func (c *ProjectsJobsPatchCall) UpdateMask(updateMask string) *ProjectsJobsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsJobsPatchCall) Fields(s ...googleapi.Field) *ProjectsJobsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsJobsPatchCall) Context(ctx context.Context) *ProjectsJobsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsJobsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsJobsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}/jobs/{jobId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataproc.projects.jobs.patch" call. +// Exactly one of *Job or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Job.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsJobsPatchCall) Do(opts ...googleapi.CallOption) (*Job, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Job{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a job in a project.", + // "flatPath": "v1beta1/projects/{projectId}/jobs/{jobId}", + // "httpMethod": "PATCH", + // "id": "dataproc.projects.jobs.patch", + // "parameterOrder": [ + // "projectId", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "Required The job ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Required Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta1/projects/{projectId}/jobs/{jobId}", + // "request": { + // "$ref": "Job" + // }, + // "response": { + // "$ref": "Job" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "dataproc.projects.jobs.submit": type ProjectsJobsSubmitCall struct { @@ -4120,6 +4382,7 @@ func (c *ProjectsJobsSubmitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.submitjobrequest) if err != nil { @@ -4176,6 +4439,7 @@ func (c *ProjectsJobsSubmitCall) Do(opts ...googleapi.CallOption) (*Job, error) return ret, nil // { // "description": "Submits a job to a cluster.", + // "flatPath": "v1beta1/projects/{projectId}/jobs:submit", // "httpMethod": "POST", // "id": "dataproc.projects.jobs.submit", // "parameterOrder": [ @@ -4183,7 +4447,7 @@ func (c *ProjectsJobsSubmitCall) Do(opts ...googleapi.CallOption) (*Job, error) // ], // "parameters": { // "projectId": { - // "description": "[Required] The ID of the Google Cloud Platform project that the job belongs to.", + // "description": "Required The ID of the Google Cloud Platform project that the job belongs to.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/datastore/v1/datastore-gen.go b/vendor/google.golang.org/api/datastore/v1/datastore-gen.go index 89f0cdb91..dbcce1b3c 100644 --- a/vendor/google.golang.org/api/datastore/v1/datastore-gen.go +++ b/vendor/google.golang.org/api/datastore/v1/datastore-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} return rs @@ -1650,6 +1655,7 @@ func (c *ProjectsAllocateIdsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.allocateidsrequest) if err != nil { @@ -1785,6 +1791,7 @@ func (c *ProjectsBeginTransactionCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.begintransactionrequest) if err != nil { @@ -1922,6 +1929,7 @@ func (c *ProjectsCommitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitrequest) if err != nil { @@ -2057,6 +2065,7 @@ func (c *ProjectsLookupCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.lookuprequest) if err != nil { @@ -2192,6 +2201,7 @@ func (c *ProjectsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbackrequest) if err != nil { @@ -2327,6 +2337,7 @@ func (c *ProjectsRunQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runqueryrequest) if err != nil { diff --git a/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go b/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go index f0096ee6b..97dc43af7 100644 --- a/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go +++ b/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Datasets *DatasetsService } @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDatasetsService(s *Service) *DatasetsService { rs := &DatasetsService{s: s} return rs @@ -1462,6 +1467,7 @@ func (c *DatasetsAllocateIdsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.allocateidsrequest) if err != nil { @@ -1597,6 +1603,7 @@ func (c *DatasetsBeginTransactionCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.begintransactionrequest) if err != nil { @@ -1733,6 +1740,7 @@ func (c *DatasetsBlindWriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.blindwriterequest) if err != nil { @@ -1869,6 +1877,7 @@ func (c *DatasetsCommitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitrequest) if err != nil { @@ -2004,6 +2013,7 @@ func (c *DatasetsLookupCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.lookuprequest) if err != nil { @@ -2139,6 +2149,7 @@ func (c *DatasetsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbackrequest) if err != nil { @@ -2274,6 +2285,7 @@ func (c *DatasetsRunQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runqueryrequest) if err != nil { diff --git a/vendor/google.golang.org/api/datastore/v1beta2/datastore-gen.go b/vendor/google.golang.org/api/datastore/v1beta2/datastore-gen.go index 0cc01cc7a..0d572cdaa 100644 --- a/vendor/google.golang.org/api/datastore/v1beta2/datastore-gen.go +++ b/vendor/google.golang.org/api/datastore/v1beta2/datastore-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Datasets *DatasetsService } @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDatasetsService(s *Service) *DatasetsService { rs := &DatasetsService{s: s} return rs @@ -1502,6 +1507,7 @@ func (c *DatasetsAllocateIdsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.allocateidsrequest) if err != nil { @@ -1637,6 +1643,7 @@ func (c *DatasetsBeginTransactionCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.begintransactionrequest) if err != nil { @@ -1773,6 +1780,7 @@ func (c *DatasetsCommitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitrequest) if err != nil { @@ -1908,6 +1916,7 @@ func (c *DatasetsLookupCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.lookuprequest) if err != nil { @@ -2043,6 +2052,7 @@ func (c *DatasetsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbackrequest) if err != nil { @@ -2178,6 +2188,7 @@ func (c *DatasetsRunQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runqueryrequest) if err != nil { diff --git a/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go b/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go index b73b29fa5..59ecbd53a 100644 --- a/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go +++ b/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} return rs @@ -1652,6 +1657,7 @@ func (c *ProjectsAllocateIdsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.allocateidsrequest) if err != nil { @@ -1787,6 +1793,7 @@ func (c *ProjectsBeginTransactionCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.begintransactionrequest) if err != nil { @@ -1924,6 +1931,7 @@ func (c *ProjectsCommitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitrequest) if err != nil { @@ -2059,6 +2067,7 @@ func (c *ProjectsLookupCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.lookuprequest) if err != nil { @@ -2194,6 +2203,7 @@ func (c *ProjectsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbackrequest) if err != nil { @@ -2329,6 +2339,7 @@ func (c *ProjectsRunQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runqueryrequest) if err != nil { diff --git a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json index 082129f64..60b9e4277 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json +++ b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/Dg3jkuWJLyfsWpZiK_LBaeIV21s\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/BZOTaOSd6CDPT3kBLAME2ENtrqQ\"", "discoveryVersion": "v1", "id": "deploymentmanager:v2", "name": "deploymentmanager", "canonicalName": "Deployment Manager", "version": "v2", - "revision": "20161221", + "revision": "20170126", "title": "Google Cloud Deployment Manager API", "description": "Declares, configures, and deploys complex solutions on Google Cloud Platform.", "ownerDomain": "google.com", @@ -89,18 +89,18 @@ "AuditConfig": { "id": "AuditConfig", "type": "object", - "description": "Provides the configuration for non-admin_activity logging for a service. Controls exemptions and specific log sub-types.", + "description": "Specifies the audit configuration for a service. It consists of which permission types are logged, and what identities, if any, are exempted from logging. An AuditConifg must have one or more AuditLogConfigs.", "properties": { "auditLogConfigs": { "type": "array", - "description": "The configuration for each type of logging", + "description": "The configuration for logging of each type of permission.", "items": { "$ref": "AuditLogConfig" } }, "exemptedMembers": { "type": "array", - "description": "Specifies the identities that are exempted from \"data access\" audit logging for the `service` specified above. Follows the same format of Binding.members.", + "description": "Specifies the identities that are exempted from \"data access\" audit logging for the `service` specified above. Follows the same format of Binding.members. This field is deprecated in favor of per-permission-type exemptions.", "items": { "type": "string" } @@ -114,11 +114,11 @@ "AuditLogConfig": { "id": "AuditLogConfig", "type": "object", - "description": "Provides the configuration for a sub-type of logging.", + "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting foo@gmail.com from DATA_READ logging.", "properties": { "exemptedMembers": { "type": "array", - "description": "Specifies the identities that are exempted from this type of logging Follows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of [Binding.members][].", "items": { "type": "string" } @@ -265,6 +265,10 @@ "type": "object", "description": "", "properties": { + "description": { + "type": "string", + "description": "[Output Only] An optional user-provided description of the deployment after the current update has been applied." + }, "labels": { "type": "array", "description": "[Output Only] Map of labels; provided by the client when the resource is created or updated. Specifically: Label keys must be between 1 and 63 characters long and must conform to the following regular expression: [a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63 characters long and must conform to the regular expression ([a-z]([-a-z0-9]*[a-z0-9])?)?", @@ -445,7 +449,7 @@ }, "creationTimestamp": { "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." + "description": "[Deprecated] This field is deprecated." }, "description": { "type": "string", @@ -616,7 +620,7 @@ "properties": { "auditConfigs": { "type": "array", - "description": "Specifies audit logging configs for \"data access\". \"data access\": generally refers to data reads/writes and admin reads. \"admin activity\": generally refers to admin writes.\n\nNote: `AuditConfig` doesn't apply to \"admin activity\", which always enables audit logging.", + "description": "Specifies cloud audit logging configuration for this policy.", "items": { "$ref": "AuditConfig" } @@ -1199,11 +1203,10 @@ }, "maxResults": { "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -1552,11 +1555,10 @@ }, "maxResults": { "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -1642,11 +1644,10 @@ }, "maxResults": { "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -1746,11 +1747,10 @@ }, "maxResults": { "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { @@ -1802,11 +1802,10 @@ }, "maxResults": { "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "default": "500", "format": "uint32", "minimum": "0", - "maximum": "500", "location": "query" }, "orderBy": { diff --git a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go index 346345f92..c5535c1ac 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go +++ b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go @@ -76,9 +76,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Deployments *DeploymentsService @@ -98,6 +99,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDeploymentsService(s *Service) *DeploymentsService { rs := &DeploymentsService{s: s} return rs @@ -143,16 +148,19 @@ type TypesService struct { s *Service } -// AuditConfig: Provides the configuration for non-admin_activity -// logging for a service. Controls exemptions and specific log -// sub-types. +// AuditConfig: Specifies the audit configuration for a service. It +// consists of which permission types are logged, and what identities, +// if any, are exempted from logging. An AuditConifg must have one or +// more AuditLogConfigs. type AuditConfig struct { - // AuditLogConfigs: The configuration for each type of logging + // AuditLogConfigs: The configuration for logging of each type of + // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` // ExemptedMembers: Specifies the identities that are exempted from // "data access" audit logging for the `service` specified above. - // Follows the same format of Binding.members. + // Follows the same format of Binding.members. This field is deprecated + // in favor of per-permission-type exemptions. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // Service: Specifies a service that will be enabled for audit logging. @@ -184,10 +192,19 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditLogConfig: Provides the configuration for a sub-type of logging. +// AuditLogConfig: Provides the configuration for logging a type of +// permissions. Example: +// +// { "audit_log_configs": [ { "log_type": "DATA_READ", +// "exempted_members": [ "user:foo@gmail.com" ] }, { "log_type": +// "DATA_WRITE", } ] } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting +// foo@gmail.com from DATA_READ logging. type AuditLogConfig struct { - // ExemptedMembers: Specifies the identities that are exempted from this - // type of logging Follows the same format of Binding.members. + // ExemptedMembers: Specifies the identities that do not cause logging + // for this type of permission. Follows the same format of + // [Binding.members][]. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -462,6 +479,10 @@ func (s *DeploymentLabelEntry) MarshalJSON() ([]byte, error) { } type DeploymentUpdate struct { + // Description: [Output Only] An optional user-provided description of + // the deployment after the current update has been applied. + Description string `json:"description,omitempty"` + // Labels: [Output Only] Map of labels; provided by the client when the // resource is created or updated. Specifically: Label keys must be // between 1 and 63 characters long and must conform to the following @@ -474,7 +495,7 @@ type DeploymentUpdate struct { // configuration of this deployment. Manifest string `json:"manifest,omitempty"` - // ForceSendFields is a list of field names (e.g. "Labels") to + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -482,10 +503,10 @@ type DeploymentUpdate struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -826,8 +847,7 @@ type Operation struct { // ClientOperationId: [Output Only] Reserved for future use. ClientOperationId string `json:"clientOperationId,omitempty"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. + // CreationTimestamp: [Deprecated] This field is deprecated. CreationTimestamp string `json:"creationTimestamp,omitempty"` // Description: [Output Only] A textual description of the operation, @@ -1145,12 +1165,8 @@ func (s *OperationsListResponse) MarshalJSON() ([]byte, error) { // For a description of IAM and its features, see the [IAM developer's // guide](https://cloud.google.com/iam). type Policy struct { - // AuditConfigs: Specifies audit logging configs for "data access". - // "data access": generally refers to data reads/writes and admin reads. - // "admin activity": generally refers to admin writes. - // - // Note: `AuditConfig` doesn't apply to "admin activity", which always - // enables audit logging. + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Multiple @@ -1902,6 +1918,7 @@ func (c *DeploymentsCancelPreviewCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.deploymentscancelpreviewrequest) if err != nil { @@ -2058,6 +2075,7 @@ func (c *DeploymentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/deployments/{deployment}") @@ -2219,6 +2237,7 @@ func (c *DeploymentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2372,6 +2391,7 @@ func (c *DeploymentsGetIamPolicyCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2526,6 +2546,7 @@ func (c *DeploymentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.deployment) if err != nil { @@ -2670,7 +2691,8 @@ func (c *DeploymentsListCall) Filter(filter string) *DeploymentsListCall { // number of results per page that should be returned. If the number of // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in -// subsequent list requests. +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) func (c *DeploymentsListCall) MaxResults(maxResults int64) *DeploymentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -2742,6 +2764,7 @@ func (c *DeploymentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2809,10 +2832,9 @@ func (c *DeploymentsListCall) Do(opts ...googleapi.CallOption) (*DeploymentsList // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "uint32", // "location": "query", - // "maximum": "500", // "minimum": "0", // "type": "integer" // }, @@ -2960,6 +2982,7 @@ func (c *DeploymentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.deployment2) if err != nil { @@ -3141,6 +3164,7 @@ func (c *DeploymentsSetIamPolicyCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { @@ -3289,6 +3313,7 @@ func (c *DeploymentsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.deploymentsstoprequest) if err != nil { @@ -3436,6 +3461,7 @@ func (c *DeploymentsTestIamPermissionsCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { @@ -3621,6 +3647,7 @@ func (c *DeploymentsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.deployment2) if err != nil { @@ -3812,6 +3839,7 @@ func (c *ManifestsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3968,7 +3996,8 @@ func (c *ManifestsListCall) Filter(filter string) *ManifestsListCall { // number of results per page that should be returned. If the number of // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in -// subsequent list requests. +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) func (c *ManifestsListCall) MaxResults(maxResults int64) *ManifestsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -4040,6 +4069,7 @@ func (c *ManifestsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4116,10 +4146,9 @@ func (c *ManifestsListCall) Do(opts ...googleapi.CallOption) (*ManifestsListResp // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "uint32", // "location": "query", - // "maximum": "500", // "minimum": "0", // "type": "integer" // }, @@ -4237,6 +4266,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4381,7 +4411,8 @@ func (c *OperationsListCall) Filter(filter string) *OperationsListCall { // number of results per page that should be returned. If the number of // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in -// subsequent list requests. +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) func (c *OperationsListCall) MaxResults(maxResults int64) *OperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -4453,6 +4484,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4520,10 +4552,9 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*OperationsListRe // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "uint32", // "location": "query", - // "maximum": "500", // "minimum": "0", // "type": "integer" // }, @@ -4643,6 +4674,7 @@ func (c *ResourcesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4798,7 +4830,8 @@ func (c *ResourcesListCall) Filter(filter string) *ResourcesListCall { // number of results per page that should be returned. If the number of // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in -// subsequent list requests. +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) func (c *ResourcesListCall) MaxResults(maxResults int64) *ResourcesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -4870,6 +4903,7 @@ func (c *ResourcesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4946,10 +4980,9 @@ func (c *ResourcesListCall) Do(opts ...googleapi.CallOption) (*ResourcesListResp // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "uint32", // "location": "query", - // "maximum": "500", // "minimum": "0", // "type": "integer" // }, @@ -5060,7 +5093,8 @@ func (c *TypesListCall) Filter(filter string) *TypesListCall { // number of results per page that should be returned. If the number of // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in -// subsequent list requests. +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) func (c *TypesListCall) MaxResults(maxResults int64) *TypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -5132,6 +5166,7 @@ func (c *TypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5199,10 +5234,9 @@ func (c *TypesListCall) Do(opts ...googleapi.CallOption) (*TypesListResponse, er // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "uint32", // "location": "query", - // "maximum": "500", // "minimum": "0", // "type": "integer" // }, diff --git a/vendor/google.golang.org/api/dfareporting/v2.5/dfareporting-gen.go b/vendor/google.golang.org/api/dfareporting/v2.5/dfareporting-gen.go index c42f80a77..3efe0dadd 100644 --- a/vendor/google.golang.org/api/dfareporting/v2.5/dfareporting-gen.go +++ b/vendor/google.golang.org/api/dfareporting/v2.5/dfareporting-gen.go @@ -124,9 +124,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only AccountActiveAdSummaries *AccountActiveAdSummariesService @@ -250,6 +251,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountActiveAdSummariesService(s *Service) *AccountActiveAdSummariesService { rs := &AccountActiveAdSummariesService{s: s} return rs @@ -13014,6 +13019,7 @@ func (c *AccountActiveAdSummariesGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13163,6 +13169,7 @@ func (c *AccountPermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13310,6 +13317,7 @@ func (c *AccountPermissionGroupsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13451,6 +13459,7 @@ func (c *AccountPermissionsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13598,6 +13607,7 @@ func (c *AccountPermissionsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13738,6 +13748,7 @@ func (c *AccountUserProfilesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13876,6 +13887,7 @@ func (c *AccountUserProfilesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14102,6 +14114,7 @@ func (c *AccountUserProfilesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14320,6 +14333,7 @@ func (c *AccountUserProfilesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14462,6 +14476,7 @@ func (c *AccountUserProfilesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14607,6 +14622,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14823,6 +14839,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15029,6 +15046,7 @@ func (c *AccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -15171,6 +15189,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -15316,6 +15335,7 @@ func (c *AdsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15454,6 +15474,7 @@ func (c *AdsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -15864,6 +15885,7 @@ func (c *AdsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16251,6 +16273,7 @@ func (c *AdsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16393,6 +16416,7 @@ func (c *AdsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16527,6 +16551,7 @@ func (c *AdvertiserGroupsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/advertiserGroups/{id}") @@ -16645,6 +16670,7 @@ func (c *AdvertiserGroupsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16783,6 +16809,7 @@ func (c *AdvertiserGroupsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -16988,6 +17015,7 @@ func (c *AdvertiserGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17189,6 +17217,7 @@ func (c *AdvertiserGroupsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17331,6 +17360,7 @@ func (c *AdvertiserGroupsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17476,6 +17506,7 @@ func (c *AdvertisersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17614,6 +17645,7 @@ func (c *AdvertisersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -17874,6 +17906,7 @@ func (c *AdvertisersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18118,6 +18151,7 @@ func (c *AdvertisersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18260,6 +18294,7 @@ func (c *AdvertisersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18403,6 +18438,7 @@ func (c *BrowsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18536,6 +18572,7 @@ func (c *CampaignCreativeAssociationsInsertCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaigncreativeassociation) if err != nil { @@ -18716,6 +18753,7 @@ func (c *CampaignCreativeAssociationsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18912,6 +18950,7 @@ func (c *CampaignsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19052,6 +19091,7 @@ func (c *CampaignsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19335,6 +19375,7 @@ func (c *CampaignsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19579,6 +19620,7 @@ func (c *CampaignsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19721,6 +19763,7 @@ func (c *CampaignsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19866,6 +19909,7 @@ func (c *ChangeLogsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20168,6 +20212,7 @@ func (c *ChangeLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20544,6 +20589,7 @@ func (c *CitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20710,6 +20756,7 @@ func (c *ConnectionTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20857,6 +20904,7 @@ func (c *ConnectionTypesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20986,6 +21034,7 @@ func (c *ContentCategoriesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/contentCategories/{id}") @@ -21104,6 +21153,7 @@ func (c *ContentCategoriesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21242,6 +21292,7 @@ func (c *ContentCategoriesInsertCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -21447,6 +21498,7 @@ func (c *ContentCategoriesListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21648,6 +21700,7 @@ func (c *ContentCategoriesPatchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -21790,6 +21843,7 @@ func (c *ContentCategoriesUpdateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -21924,6 +21978,7 @@ func (c *ConversionsBatchinsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.conversionsbatchinsertrequest) if err != nil { @@ -22069,6 +22124,7 @@ func (c *CountriesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22216,6 +22272,7 @@ func (c *CountriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22400,6 +22457,7 @@ func (c *CreativeAssetsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativeassetmetadata) if err != nil { @@ -22610,6 +22668,7 @@ func (c *CreativeFieldValuesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{creativeFieldId}/creativeFieldValues/{id}") @@ -22739,6 +22798,7 @@ func (c *CreativeFieldValuesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22888,6 +22948,7 @@ func (c *CreativeFieldValuesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23098,6 +23159,7 @@ func (c *CreativeFieldValuesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23310,6 +23372,7 @@ func (c *CreativeFieldValuesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23463,6 +23526,7 @@ func (c *CreativeFieldValuesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23606,6 +23670,7 @@ func (c *CreativeFieldsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{id}") @@ -23724,6 +23789,7 @@ func (c *CreativeFieldsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23862,6 +23928,7 @@ func (c *CreativeFieldsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24078,6 +24145,7 @@ func (c *CreativeFieldsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24286,6 +24354,7 @@ func (c *CreativeFieldsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24428,6 +24497,7 @@ func (c *CreativeFieldsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24573,6 +24643,7 @@ func (c *CreativeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24711,6 +24782,7 @@ func (c *CreativeGroupsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -24934,6 +25006,7 @@ func (c *CreativeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25148,6 +25221,7 @@ func (c *CreativeGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25290,6 +25364,7 @@ func (c *CreativeGroupsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25435,6 +25510,7 @@ func (c *CreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25573,6 +25649,7 @@ func (c *CreativesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -25890,6 +25967,7 @@ func (c *CreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -26205,6 +26283,7 @@ func (c *CreativesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26347,6 +26426,7 @@ func (c *CreativesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26496,6 +26576,7 @@ func (c *DimensionValuesQueryCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dimensionvaluerequest) if err != nil { @@ -26675,6 +26756,7 @@ func (c *DirectorySiteContactsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -26897,6 +26979,7 @@ func (c *DirectorySiteContactsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27115,6 +27198,7 @@ func (c *DirectorySitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27253,6 +27337,7 @@ func (c *DirectorySitesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.directorysite) if err != nil { @@ -27511,6 +27596,7 @@ func (c *DirectorySitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27749,6 +27835,7 @@ func (c *DynamicTargetingKeysDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/dynamicTargetingKeys/{objectId}") @@ -27886,6 +27973,7 @@ func (c *DynamicTargetingKeysInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dynamictargetingkey) if err != nil { @@ -28063,6 +28151,7 @@ func (c *DynamicTargetingKeysListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28227,6 +28316,7 @@ func (c *EventTagsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/eventTags/{id}") @@ -28345,6 +28435,7 @@ func (c *EventTagsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28483,6 +28574,7 @@ func (c *EventTagsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -28734,6 +28826,7 @@ func (c *EventTagsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28947,6 +29040,7 @@ func (c *EventTagsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29089,6 +29183,7 @@ func (c *EventTagsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29234,6 +29329,7 @@ func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29446,6 +29542,7 @@ func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29653,6 +29750,7 @@ func (c *FloodlightActivitiesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/{id}") @@ -29766,6 +29864,7 @@ func (c *FloodlightActivitiesGeneratetagCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/generatetag") @@ -29910,6 +30009,7 @@ func (c *FloodlightActivitiesGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30048,6 +30148,7 @@ func (c *FloodlightActivitiesInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30319,6 +30420,7 @@ func (c *FloodlightActivitiesListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30567,6 +30669,7 @@ func (c *FloodlightActivitiesPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30709,6 +30812,7 @@ func (c *FloodlightActivitiesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30854,6 +30958,7 @@ func (c *FloodlightActivityGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30992,6 +31097,7 @@ func (c *FloodlightActivityGroupsInsertCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31228,6 +31334,7 @@ func (c *FloodlightActivityGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31455,6 +31562,7 @@ func (c *FloodlightActivityGroupsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31597,6 +31705,7 @@ func (c *FloodlightActivityGroupsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31742,6 +31851,7 @@ func (c *FloodlightConfigurationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31902,6 +32012,7 @@ func (c *FloodlightConfigurationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32041,6 +32152,7 @@ func (c *FloodlightConfigurationsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32183,6 +32295,7 @@ func (c *FloodlightConfigurationsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32330,6 +32443,7 @@ func (c *InventoryItemsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32576,6 +32690,7 @@ func (c *InventoryItemsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32813,6 +32928,7 @@ func (c *LandingPagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/campaigns/{campaignId}/landingPages/{id}") @@ -32942,6 +33058,7 @@ func (c *LandingPagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33091,6 +33208,7 @@ func (c *LandingPagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33245,6 +33363,7 @@ func (c *LandingPagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33387,6 +33506,7 @@ func (c *LandingPagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33540,6 +33660,7 @@ func (c *LandingPagesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33692,6 +33813,7 @@ func (c *MetrosListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33832,6 +33954,7 @@ func (c *MobileCarriersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33979,6 +34102,7 @@ func (c *MobileCarriersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34119,6 +34243,7 @@ func (c *OperatingSystemVersionsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34266,6 +34391,7 @@ func (c *OperatingSystemVersionsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34407,6 +34533,7 @@ func (c *OperatingSystemsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34554,6 +34681,7 @@ func (c *OperatingSystemsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34696,6 +34824,7 @@ func (c *OrderDocumentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34945,6 +35074,7 @@ func (c *OrderDocumentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35185,6 +35315,7 @@ func (c *OrdersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35414,6 +35545,7 @@ func (c *OrdersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35640,6 +35772,7 @@ func (c *PlacementGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35778,6 +35911,7 @@ func (c *PlacementGroupsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36123,6 +36257,7 @@ func (c *PlacementGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36426,6 +36561,7 @@ func (c *PlacementGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36568,6 +36704,7 @@ func (c *PlacementGroupsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36702,6 +36839,7 @@ func (c *PlacementStrategiesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placementStrategies/{id}") @@ -36820,6 +36958,7 @@ func (c *PlacementStrategiesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36958,6 +37097,7 @@ func (c *PlacementStrategiesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37163,6 +37303,7 @@ func (c *PlacementStrategiesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -37364,6 +37505,7 @@ func (c *PlacementStrategiesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37506,6 +37648,7 @@ func (c *PlacementStrategiesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37683,6 +37826,7 @@ func (c *PlacementsGeneratetagsCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placements/generatetags") @@ -37879,6 +38023,7 @@ func (c *PlacementsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -38017,6 +38162,7 @@ func (c *PlacementsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -38397,6 +38543,7 @@ func (c *PlacementsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -38734,6 +38881,7 @@ func (c *PlacementsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -38876,6 +39024,7 @@ func (c *PlacementsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -39021,6 +39170,7 @@ func (c *PlatformTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39168,6 +39318,7 @@ func (c *PlatformTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39308,6 +39459,7 @@ func (c *PostalCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39454,6 +39606,7 @@ func (c *PostalCodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39594,6 +39747,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39813,6 +39967,7 @@ func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40028,6 +40183,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40168,6 +40324,7 @@ func (c *RemarketingListSharesGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40308,6 +40465,7 @@ func (c *RemarketingListSharesPatchCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -40450,6 +40608,7 @@ func (c *RemarketingListSharesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -40595,6 +40754,7 @@ func (c *RemarketingListsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40733,6 +40893,7 @@ func (c *RemarketingListsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -40942,6 +41103,7 @@ func (c *RemarketingListsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41155,6 +41317,7 @@ func (c *RemarketingListsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41297,6 +41460,7 @@ func (c *RemarketingListsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41431,6 +41595,7 @@ func (c *ReportsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}") @@ -41549,6 +41714,7 @@ func (c *ReportsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41687,6 +41853,7 @@ func (c *ReportsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -41878,6 +42045,7 @@ func (c *ReportsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42087,6 +42255,7 @@ func (c *ReportsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42237,6 +42406,7 @@ func (c *ReportsRunCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}/run") @@ -42379,6 +42549,7 @@ func (c *ReportsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42524,6 +42695,7 @@ func (c *ReportsCompatibleFieldsQueryCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42671,6 +42843,7 @@ func (c *ReportsFilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42882,6 +43055,7 @@ func (c *ReportsFilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43093,6 +43267,7 @@ func (c *SitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43231,6 +43406,7 @@ func (c *SitesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -43508,6 +43684,7 @@ func (c *SitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43759,6 +43936,7 @@ func (c *SitesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -43901,6 +44079,7 @@ func (c *SitesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44046,6 +44225,7 @@ func (c *SizesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44184,6 +44364,7 @@ func (c *SizesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.size) if err != nil { @@ -44359,6 +44540,7 @@ func (c *SizesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44523,6 +44705,7 @@ func (c *SubaccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44661,6 +44844,7 @@ func (c *SubaccountsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -44865,6 +45049,7 @@ func (c *SubaccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45066,6 +45251,7 @@ func (c *SubaccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45208,6 +45394,7 @@ func (c *SubaccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45353,6 +45540,7 @@ func (c *TargetableRemarketingListsGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45558,6 +45746,7 @@ func (c *TargetableRemarketingListsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45773,6 +45962,7 @@ func (c *UserProfilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45910,6 +46100,7 @@ func (c *UserProfilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46036,6 +46227,7 @@ func (c *UserRolePermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46183,6 +46375,7 @@ func (c *UserRolePermissionGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46324,6 +46517,7 @@ func (c *UserRolePermissionsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46482,6 +46676,7 @@ func (c *UserRolePermissionsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46618,6 +46813,7 @@ func (c *UserRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/userRoles/{id}") @@ -46736,6 +46932,7 @@ func (c *UserRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46874,6 +47071,7 @@ func (c *UserRolesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -47093,6 +47291,7 @@ func (c *UserRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47305,6 +47504,7 @@ func (c *UserRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -47447,6 +47647,7 @@ func (c *UserRolesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { diff --git a/vendor/google.golang.org/api/dfareporting/v2.5beta1/dfareporting-gen.go b/vendor/google.golang.org/api/dfareporting/v2.5beta1/dfareporting-gen.go index dca0f142b..b0425a330 100644 --- a/vendor/google.golang.org/api/dfareporting/v2.5beta1/dfareporting-gen.go +++ b/vendor/google.golang.org/api/dfareporting/v2.5beta1/dfareporting-gen.go @@ -124,9 +124,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only AccountActiveAdSummaries *AccountActiveAdSummariesService @@ -250,6 +251,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountActiveAdSummariesService(s *Service) *AccountActiveAdSummariesService { rs := &AccountActiveAdSummariesService{s: s} return rs @@ -13014,6 +13019,7 @@ func (c *AccountActiveAdSummariesGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13163,6 +13169,7 @@ func (c *AccountPermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13310,6 +13317,7 @@ func (c *AccountPermissionGroupsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13451,6 +13459,7 @@ func (c *AccountPermissionsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13598,6 +13607,7 @@ func (c *AccountPermissionsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13738,6 +13748,7 @@ func (c *AccountUserProfilesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13876,6 +13887,7 @@ func (c *AccountUserProfilesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14102,6 +14114,7 @@ func (c *AccountUserProfilesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14320,6 +14333,7 @@ func (c *AccountUserProfilesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14462,6 +14476,7 @@ func (c *AccountUserProfilesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14607,6 +14622,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14823,6 +14839,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15029,6 +15046,7 @@ func (c *AccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -15171,6 +15189,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -15316,6 +15335,7 @@ func (c *AdsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15454,6 +15474,7 @@ func (c *AdsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -15864,6 +15885,7 @@ func (c *AdsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16251,6 +16273,7 @@ func (c *AdsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16393,6 +16416,7 @@ func (c *AdsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16527,6 +16551,7 @@ func (c *AdvertiserGroupsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/advertiserGroups/{id}") @@ -16645,6 +16670,7 @@ func (c *AdvertiserGroupsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16783,6 +16809,7 @@ func (c *AdvertiserGroupsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -16988,6 +17015,7 @@ func (c *AdvertiserGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17189,6 +17217,7 @@ func (c *AdvertiserGroupsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17331,6 +17360,7 @@ func (c *AdvertiserGroupsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17476,6 +17506,7 @@ func (c *AdvertisersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17614,6 +17645,7 @@ func (c *AdvertisersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -17874,6 +17906,7 @@ func (c *AdvertisersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18118,6 +18151,7 @@ func (c *AdvertisersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18260,6 +18294,7 @@ func (c *AdvertisersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18403,6 +18438,7 @@ func (c *BrowsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18536,6 +18572,7 @@ func (c *CampaignCreativeAssociationsInsertCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaigncreativeassociation) if err != nil { @@ -18716,6 +18753,7 @@ func (c *CampaignCreativeAssociationsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18912,6 +18950,7 @@ func (c *CampaignsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19052,6 +19091,7 @@ func (c *CampaignsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19335,6 +19375,7 @@ func (c *CampaignsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19579,6 +19620,7 @@ func (c *CampaignsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19721,6 +19763,7 @@ func (c *CampaignsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19866,6 +19909,7 @@ func (c *ChangeLogsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20168,6 +20212,7 @@ func (c *ChangeLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20544,6 +20589,7 @@ func (c *CitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20710,6 +20756,7 @@ func (c *ConnectionTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20857,6 +20904,7 @@ func (c *ConnectionTypesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20986,6 +21034,7 @@ func (c *ContentCategoriesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/contentCategories/{id}") @@ -21104,6 +21153,7 @@ func (c *ContentCategoriesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21242,6 +21292,7 @@ func (c *ContentCategoriesInsertCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -21447,6 +21498,7 @@ func (c *ContentCategoriesListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21648,6 +21700,7 @@ func (c *ContentCategoriesPatchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -21790,6 +21843,7 @@ func (c *ContentCategoriesUpdateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -21924,6 +21978,7 @@ func (c *ConversionsBatchinsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.conversionsbatchinsertrequest) if err != nil { @@ -22069,6 +22124,7 @@ func (c *CountriesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22216,6 +22272,7 @@ func (c *CountriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22400,6 +22457,7 @@ func (c *CreativeAssetsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativeassetmetadata) if err != nil { @@ -22610,6 +22668,7 @@ func (c *CreativeFieldValuesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{creativeFieldId}/creativeFieldValues/{id}") @@ -22739,6 +22798,7 @@ func (c *CreativeFieldValuesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22888,6 +22948,7 @@ func (c *CreativeFieldValuesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23098,6 +23159,7 @@ func (c *CreativeFieldValuesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23310,6 +23372,7 @@ func (c *CreativeFieldValuesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23463,6 +23526,7 @@ func (c *CreativeFieldValuesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23606,6 +23670,7 @@ func (c *CreativeFieldsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{id}") @@ -23724,6 +23789,7 @@ func (c *CreativeFieldsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23862,6 +23928,7 @@ func (c *CreativeFieldsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24078,6 +24145,7 @@ func (c *CreativeFieldsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24286,6 +24354,7 @@ func (c *CreativeFieldsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24428,6 +24497,7 @@ func (c *CreativeFieldsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24573,6 +24643,7 @@ func (c *CreativeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24711,6 +24782,7 @@ func (c *CreativeGroupsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -24934,6 +25006,7 @@ func (c *CreativeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25148,6 +25221,7 @@ func (c *CreativeGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25290,6 +25364,7 @@ func (c *CreativeGroupsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25435,6 +25510,7 @@ func (c *CreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25573,6 +25649,7 @@ func (c *CreativesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -25890,6 +25967,7 @@ func (c *CreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -26205,6 +26283,7 @@ func (c *CreativesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26347,6 +26426,7 @@ func (c *CreativesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26496,6 +26576,7 @@ func (c *DimensionValuesQueryCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dimensionvaluerequest) if err != nil { @@ -26675,6 +26756,7 @@ func (c *DirectorySiteContactsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -26897,6 +26979,7 @@ func (c *DirectorySiteContactsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27115,6 +27198,7 @@ func (c *DirectorySitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27253,6 +27337,7 @@ func (c *DirectorySitesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.directorysite) if err != nil { @@ -27511,6 +27596,7 @@ func (c *DirectorySitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27749,6 +27835,7 @@ func (c *DynamicTargetingKeysDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/dynamicTargetingKeys/{objectId}") @@ -27886,6 +27973,7 @@ func (c *DynamicTargetingKeysInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dynamictargetingkey) if err != nil { @@ -28063,6 +28151,7 @@ func (c *DynamicTargetingKeysListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28227,6 +28316,7 @@ func (c *EventTagsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/eventTags/{id}") @@ -28345,6 +28435,7 @@ func (c *EventTagsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28483,6 +28574,7 @@ func (c *EventTagsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -28734,6 +28826,7 @@ func (c *EventTagsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28947,6 +29040,7 @@ func (c *EventTagsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29089,6 +29183,7 @@ func (c *EventTagsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29234,6 +29329,7 @@ func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29446,6 +29542,7 @@ func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29653,6 +29750,7 @@ func (c *FloodlightActivitiesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/{id}") @@ -29766,6 +29864,7 @@ func (c *FloodlightActivitiesGeneratetagCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/generatetag") @@ -29910,6 +30009,7 @@ func (c *FloodlightActivitiesGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30048,6 +30148,7 @@ func (c *FloodlightActivitiesInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30319,6 +30420,7 @@ func (c *FloodlightActivitiesListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30567,6 +30669,7 @@ func (c *FloodlightActivitiesPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30709,6 +30812,7 @@ func (c *FloodlightActivitiesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30854,6 +30958,7 @@ func (c *FloodlightActivityGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30992,6 +31097,7 @@ func (c *FloodlightActivityGroupsInsertCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31228,6 +31334,7 @@ func (c *FloodlightActivityGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31455,6 +31562,7 @@ func (c *FloodlightActivityGroupsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31597,6 +31705,7 @@ func (c *FloodlightActivityGroupsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31742,6 +31851,7 @@ func (c *FloodlightConfigurationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31902,6 +32012,7 @@ func (c *FloodlightConfigurationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32041,6 +32152,7 @@ func (c *FloodlightConfigurationsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32183,6 +32295,7 @@ func (c *FloodlightConfigurationsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32330,6 +32443,7 @@ func (c *InventoryItemsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32576,6 +32690,7 @@ func (c *InventoryItemsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32813,6 +32928,7 @@ func (c *LandingPagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/campaigns/{campaignId}/landingPages/{id}") @@ -32942,6 +33058,7 @@ func (c *LandingPagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33091,6 +33208,7 @@ func (c *LandingPagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33245,6 +33363,7 @@ func (c *LandingPagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33387,6 +33506,7 @@ func (c *LandingPagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33540,6 +33660,7 @@ func (c *LandingPagesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33692,6 +33813,7 @@ func (c *MetrosListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33832,6 +33954,7 @@ func (c *MobileCarriersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33979,6 +34102,7 @@ func (c *MobileCarriersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34119,6 +34243,7 @@ func (c *OperatingSystemVersionsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34266,6 +34391,7 @@ func (c *OperatingSystemVersionsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34407,6 +34533,7 @@ func (c *OperatingSystemsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34554,6 +34681,7 @@ func (c *OperatingSystemsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34696,6 +34824,7 @@ func (c *OrderDocumentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34945,6 +35074,7 @@ func (c *OrderDocumentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35185,6 +35315,7 @@ func (c *OrdersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35414,6 +35545,7 @@ func (c *OrdersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35640,6 +35772,7 @@ func (c *PlacementGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35778,6 +35911,7 @@ func (c *PlacementGroupsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36123,6 +36257,7 @@ func (c *PlacementGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36426,6 +36561,7 @@ func (c *PlacementGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36568,6 +36704,7 @@ func (c *PlacementGroupsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36702,6 +36839,7 @@ func (c *PlacementStrategiesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placementStrategies/{id}") @@ -36820,6 +36958,7 @@ func (c *PlacementStrategiesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36958,6 +37097,7 @@ func (c *PlacementStrategiesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37163,6 +37303,7 @@ func (c *PlacementStrategiesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -37364,6 +37505,7 @@ func (c *PlacementStrategiesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37506,6 +37648,7 @@ func (c *PlacementStrategiesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37683,6 +37826,7 @@ func (c *PlacementsGeneratetagsCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placements/generatetags") @@ -37879,6 +38023,7 @@ func (c *PlacementsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -38017,6 +38162,7 @@ func (c *PlacementsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -38397,6 +38543,7 @@ func (c *PlacementsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -38734,6 +38881,7 @@ func (c *PlacementsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -38876,6 +39024,7 @@ func (c *PlacementsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -39021,6 +39170,7 @@ func (c *PlatformTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39168,6 +39318,7 @@ func (c *PlatformTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39308,6 +39459,7 @@ func (c *PostalCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39454,6 +39606,7 @@ func (c *PostalCodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39594,6 +39747,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39813,6 +39967,7 @@ func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40028,6 +40183,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40168,6 +40324,7 @@ func (c *RemarketingListSharesGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40308,6 +40465,7 @@ func (c *RemarketingListSharesPatchCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -40450,6 +40608,7 @@ func (c *RemarketingListSharesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -40595,6 +40754,7 @@ func (c *RemarketingListsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40733,6 +40893,7 @@ func (c *RemarketingListsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -40942,6 +41103,7 @@ func (c *RemarketingListsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41155,6 +41317,7 @@ func (c *RemarketingListsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41297,6 +41460,7 @@ func (c *RemarketingListsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41431,6 +41595,7 @@ func (c *ReportsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}") @@ -41549,6 +41714,7 @@ func (c *ReportsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41687,6 +41853,7 @@ func (c *ReportsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -41878,6 +42045,7 @@ func (c *ReportsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42087,6 +42255,7 @@ func (c *ReportsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42237,6 +42406,7 @@ func (c *ReportsRunCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}/run") @@ -42379,6 +42549,7 @@ func (c *ReportsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42524,6 +42695,7 @@ func (c *ReportsCompatibleFieldsQueryCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42671,6 +42843,7 @@ func (c *ReportsFilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42882,6 +43055,7 @@ func (c *ReportsFilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43093,6 +43267,7 @@ func (c *SitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43231,6 +43406,7 @@ func (c *SitesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -43508,6 +43684,7 @@ func (c *SitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43759,6 +43936,7 @@ func (c *SitesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -43901,6 +44079,7 @@ func (c *SitesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44046,6 +44225,7 @@ func (c *SizesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44184,6 +44364,7 @@ func (c *SizesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.size) if err != nil { @@ -44359,6 +44540,7 @@ func (c *SizesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44523,6 +44705,7 @@ func (c *SubaccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44661,6 +44844,7 @@ func (c *SubaccountsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -44865,6 +45049,7 @@ func (c *SubaccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45066,6 +45251,7 @@ func (c *SubaccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45208,6 +45394,7 @@ func (c *SubaccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45353,6 +45540,7 @@ func (c *TargetableRemarketingListsGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45558,6 +45746,7 @@ func (c *TargetableRemarketingListsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45773,6 +45962,7 @@ func (c *UserProfilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45910,6 +46100,7 @@ func (c *UserProfilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46036,6 +46227,7 @@ func (c *UserRolePermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46183,6 +46375,7 @@ func (c *UserRolePermissionGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46324,6 +46517,7 @@ func (c *UserRolePermissionsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46482,6 +46676,7 @@ func (c *UserRolePermissionsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46618,6 +46813,7 @@ func (c *UserRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/userRoles/{id}") @@ -46736,6 +46932,7 @@ func (c *UserRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46874,6 +47071,7 @@ func (c *UserRolesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -47093,6 +47291,7 @@ func (c *UserRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47305,6 +47504,7 @@ func (c *UserRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -47447,6 +47647,7 @@ func (c *UserRolesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { diff --git a/vendor/google.golang.org/api/dfareporting/v2.6/dfareporting-gen.go b/vendor/google.golang.org/api/dfareporting/v2.6/dfareporting-gen.go index a3b5a83bc..b90a99ce0 100644 --- a/vendor/google.golang.org/api/dfareporting/v2.6/dfareporting-gen.go +++ b/vendor/google.golang.org/api/dfareporting/v2.6/dfareporting-gen.go @@ -126,9 +126,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only AccountActiveAdSummaries *AccountActiveAdSummariesService @@ -256,6 +257,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountActiveAdSummariesService(s *Service) *AccountActiveAdSummariesService { rs := &AccountActiveAdSummariesService{s: s} return rs @@ -13404,6 +13409,7 @@ func (c *AccountActiveAdSummariesGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13553,6 +13559,7 @@ func (c *AccountPermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13700,6 +13707,7 @@ func (c *AccountPermissionGroupsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13841,6 +13849,7 @@ func (c *AccountPermissionsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13988,6 +13997,7 @@ func (c *AccountPermissionsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14128,6 +14138,7 @@ func (c *AccountUserProfilesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14266,6 +14277,7 @@ func (c *AccountUserProfilesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14492,6 +14504,7 @@ func (c *AccountUserProfilesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14710,6 +14723,7 @@ func (c *AccountUserProfilesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14852,6 +14866,7 @@ func (c *AccountUserProfilesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14997,6 +15012,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15213,6 +15229,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15419,6 +15436,7 @@ func (c *AccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -15561,6 +15579,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -15706,6 +15725,7 @@ func (c *AdsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15844,6 +15864,7 @@ func (c *AdsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16254,6 +16275,7 @@ func (c *AdsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16641,6 +16663,7 @@ func (c *AdsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16783,6 +16806,7 @@ func (c *AdsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16917,6 +16941,7 @@ func (c *AdvertiserGroupsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/advertiserGroups/{id}") @@ -17035,6 +17060,7 @@ func (c *AdvertiserGroupsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17173,6 +17199,7 @@ func (c *AdvertiserGroupsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17378,6 +17405,7 @@ func (c *AdvertiserGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17579,6 +17607,7 @@ func (c *AdvertiserGroupsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17721,6 +17750,7 @@ func (c *AdvertiserGroupsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17866,6 +17896,7 @@ func (c *AdvertisersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18004,6 +18035,7 @@ func (c *AdvertisersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18264,6 +18296,7 @@ func (c *AdvertisersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18508,6 +18541,7 @@ func (c *AdvertisersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18650,6 +18684,7 @@ func (c *AdvertisersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18793,6 +18828,7 @@ func (c *BrowsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18926,6 +18962,7 @@ func (c *CampaignCreativeAssociationsInsertCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaigncreativeassociation) if err != nil { @@ -19106,6 +19143,7 @@ func (c *CampaignCreativeAssociationsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19302,6 +19340,7 @@ func (c *CampaignsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19442,6 +19481,7 @@ func (c *CampaignsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19725,6 +19765,7 @@ func (c *CampaignsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19969,6 +20010,7 @@ func (c *CampaignsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -20111,6 +20153,7 @@ func (c *CampaignsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -20256,6 +20299,7 @@ func (c *ChangeLogsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20558,6 +20602,7 @@ func (c *ChangeLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20934,6 +20979,7 @@ func (c *CitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21100,6 +21146,7 @@ func (c *ConnectionTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21247,6 +21294,7 @@ func (c *ConnectionTypesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21376,6 +21424,7 @@ func (c *ContentCategoriesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/contentCategories/{id}") @@ -21494,6 +21543,7 @@ func (c *ContentCategoriesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21632,6 +21682,7 @@ func (c *ContentCategoriesInsertCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -21837,6 +21888,7 @@ func (c *ContentCategoriesListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22038,6 +22090,7 @@ func (c *ContentCategoriesPatchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -22180,6 +22233,7 @@ func (c *ContentCategoriesUpdateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -22314,6 +22368,7 @@ func (c *ConversionsBatchinsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.conversionsbatchinsertrequest) if err != nil { @@ -22459,6 +22514,7 @@ func (c *CountriesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22606,6 +22662,7 @@ func (c *CountriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22790,6 +22847,7 @@ func (c *CreativeAssetsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativeassetmetadata) if err != nil { @@ -23000,6 +23058,7 @@ func (c *CreativeFieldValuesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{creativeFieldId}/creativeFieldValues/{id}") @@ -23129,6 +23188,7 @@ func (c *CreativeFieldValuesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23278,6 +23338,7 @@ func (c *CreativeFieldValuesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23488,6 +23549,7 @@ func (c *CreativeFieldValuesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23700,6 +23762,7 @@ func (c *CreativeFieldValuesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23853,6 +23916,7 @@ func (c *CreativeFieldValuesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23996,6 +24060,7 @@ func (c *CreativeFieldsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{id}") @@ -24114,6 +24179,7 @@ func (c *CreativeFieldsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24252,6 +24318,7 @@ func (c *CreativeFieldsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24468,6 +24535,7 @@ func (c *CreativeFieldsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24676,6 +24744,7 @@ func (c *CreativeFieldsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24818,6 +24887,7 @@ func (c *CreativeFieldsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24963,6 +25033,7 @@ func (c *CreativeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25101,6 +25172,7 @@ func (c *CreativeGroupsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25324,6 +25396,7 @@ func (c *CreativeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25538,6 +25611,7 @@ func (c *CreativeGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25680,6 +25754,7 @@ func (c *CreativeGroupsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25825,6 +25900,7 @@ func (c *CreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25963,6 +26039,7 @@ func (c *CreativesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26280,6 +26357,7 @@ func (c *CreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -26595,6 +26673,7 @@ func (c *CreativesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26737,6 +26816,7 @@ func (c *CreativesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26886,6 +26966,7 @@ func (c *DimensionValuesQueryCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dimensionvaluerequest) if err != nil { @@ -27065,6 +27146,7 @@ func (c *DirectorySiteContactsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27287,6 +27369,7 @@ func (c *DirectorySiteContactsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27505,6 +27588,7 @@ func (c *DirectorySitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27643,6 +27727,7 @@ func (c *DirectorySitesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.directorysite) if err != nil { @@ -27901,6 +27986,7 @@ func (c *DirectorySitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28139,6 +28225,7 @@ func (c *DynamicTargetingKeysDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/dynamicTargetingKeys/{objectId}") @@ -28276,6 +28363,7 @@ func (c *DynamicTargetingKeysInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dynamictargetingkey) if err != nil { @@ -28453,6 +28541,7 @@ func (c *DynamicTargetingKeysListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28617,6 +28706,7 @@ func (c *EventTagsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/eventTags/{id}") @@ -28735,6 +28825,7 @@ func (c *EventTagsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28873,6 +28964,7 @@ func (c *EventTagsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29124,6 +29216,7 @@ func (c *EventTagsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29337,6 +29430,7 @@ func (c *EventTagsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29479,6 +29573,7 @@ func (c *EventTagsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29624,6 +29719,7 @@ func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29836,6 +29932,7 @@ func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30043,6 +30140,7 @@ func (c *FloodlightActivitiesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/{id}") @@ -30156,6 +30254,7 @@ func (c *FloodlightActivitiesGeneratetagCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/generatetag") @@ -30300,6 +30399,7 @@ func (c *FloodlightActivitiesGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30438,6 +30538,7 @@ func (c *FloodlightActivitiesInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30709,6 +30810,7 @@ func (c *FloodlightActivitiesListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30957,6 +31059,7 @@ func (c *FloodlightActivitiesPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -31099,6 +31202,7 @@ func (c *FloodlightActivitiesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -31244,6 +31348,7 @@ func (c *FloodlightActivityGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31382,6 +31487,7 @@ func (c *FloodlightActivityGroupsInsertCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31618,6 +31724,7 @@ func (c *FloodlightActivityGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31845,6 +31952,7 @@ func (c *FloodlightActivityGroupsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31987,6 +32095,7 @@ func (c *FloodlightActivityGroupsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -32132,6 +32241,7 @@ func (c *FloodlightConfigurationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32292,6 +32402,7 @@ func (c *FloodlightConfigurationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32431,6 +32542,7 @@ func (c *FloodlightConfigurationsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32573,6 +32685,7 @@ func (c *FloodlightConfigurationsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32720,6 +32833,7 @@ func (c *InventoryItemsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32966,6 +33080,7 @@ func (c *InventoryItemsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33203,6 +33318,7 @@ func (c *LandingPagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/campaigns/{campaignId}/landingPages/{id}") @@ -33332,6 +33448,7 @@ func (c *LandingPagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33481,6 +33598,7 @@ func (c *LandingPagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33635,6 +33753,7 @@ func (c *LandingPagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33777,6 +33896,7 @@ func (c *LandingPagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33930,6 +34050,7 @@ func (c *LandingPagesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -34082,6 +34203,7 @@ func (c *LanguagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34220,6 +34342,7 @@ func (c *MetrosListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34360,6 +34483,7 @@ func (c *MobileCarriersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34507,6 +34631,7 @@ func (c *MobileCarriersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34647,6 +34772,7 @@ func (c *OperatingSystemVersionsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34794,6 +34920,7 @@ func (c *OperatingSystemVersionsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34935,6 +35062,7 @@ func (c *OperatingSystemsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35082,6 +35210,7 @@ func (c *OperatingSystemsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35224,6 +35353,7 @@ func (c *OrderDocumentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35473,6 +35603,7 @@ func (c *OrderDocumentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35713,6 +35844,7 @@ func (c *OrdersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35942,6 +36074,7 @@ func (c *OrdersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36168,6 +36301,7 @@ func (c *PlacementGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36306,6 +36440,7 @@ func (c *PlacementGroupsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36651,6 +36786,7 @@ func (c *PlacementGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36954,6 +37090,7 @@ func (c *PlacementGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -37096,6 +37233,7 @@ func (c *PlacementGroupsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -37230,6 +37368,7 @@ func (c *PlacementStrategiesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placementStrategies/{id}") @@ -37348,6 +37487,7 @@ func (c *PlacementStrategiesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -37486,6 +37626,7 @@ func (c *PlacementStrategiesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37691,6 +37832,7 @@ func (c *PlacementStrategiesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -37892,6 +38034,7 @@ func (c *PlacementStrategiesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -38034,6 +38177,7 @@ func (c *PlacementStrategiesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -38211,6 +38355,7 @@ func (c *PlacementsGeneratetagsCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placements/generatetags") @@ -38407,6 +38552,7 @@ func (c *PlacementsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -38545,6 +38691,7 @@ func (c *PlacementsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -38925,6 +39072,7 @@ func (c *PlacementsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39262,6 +39410,7 @@ func (c *PlacementsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -39404,6 +39553,7 @@ func (c *PlacementsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -39549,6 +39699,7 @@ func (c *PlatformTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39696,6 +39847,7 @@ func (c *PlatformTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39836,6 +39988,7 @@ func (c *PostalCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39982,6 +40135,7 @@ func (c *PostalCodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40122,6 +40276,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40341,6 +40496,7 @@ func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40556,6 +40712,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40696,6 +40853,7 @@ func (c *RemarketingListSharesGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40836,6 +40994,7 @@ func (c *RemarketingListSharesPatchCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -40978,6 +41137,7 @@ func (c *RemarketingListSharesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -41123,6 +41283,7 @@ func (c *RemarketingListsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41261,6 +41422,7 @@ func (c *RemarketingListsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41470,6 +41632,7 @@ func (c *RemarketingListsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41683,6 +41846,7 @@ func (c *RemarketingListsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41825,6 +41989,7 @@ func (c *RemarketingListsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41959,6 +42124,7 @@ func (c *ReportsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}") @@ -42077,6 +42243,7 @@ func (c *ReportsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42215,6 +42382,7 @@ func (c *ReportsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42406,6 +42574,7 @@ func (c *ReportsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42615,6 +42784,7 @@ func (c *ReportsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42765,6 +42935,7 @@ func (c *ReportsRunCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}/run") @@ -42907,6 +43078,7 @@ func (c *ReportsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -43052,6 +43224,7 @@ func (c *ReportsCompatibleFieldsQueryCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -43199,6 +43372,7 @@ func (c *ReportsFilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43410,6 +43584,7 @@ func (c *ReportsFilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43621,6 +43796,7 @@ func (c *SitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43759,6 +43935,7 @@ func (c *SitesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44036,6 +44213,7 @@ func (c *SitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44287,6 +44465,7 @@ func (c *SitesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44429,6 +44608,7 @@ func (c *SitesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44574,6 +44754,7 @@ func (c *SizesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44712,6 +44893,7 @@ func (c *SizesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.size) if err != nil { @@ -44887,6 +45069,7 @@ func (c *SizesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45051,6 +45234,7 @@ func (c *SubaccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45189,6 +45373,7 @@ func (c *SubaccountsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45393,6 +45578,7 @@ func (c *SubaccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45594,6 +45780,7 @@ func (c *SubaccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45736,6 +45923,7 @@ func (c *SubaccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45881,6 +46069,7 @@ func (c *TargetableRemarketingListsGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46086,6 +46275,7 @@ func (c *TargetableRemarketingListsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46303,6 +46493,7 @@ func (c *TargetingTemplatesGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46441,6 +46632,7 @@ func (c *TargetingTemplatesInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetingtemplate) if err != nil { @@ -46652,6 +46844,7 @@ func (c *TargetingTemplatesListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46859,6 +47052,7 @@ func (c *TargetingTemplatesPatchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetingtemplate) if err != nil { @@ -47001,6 +47195,7 @@ func (c *TargetingTemplatesUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetingtemplate) if err != nil { @@ -47144,6 +47339,7 @@ func (c *UserProfilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47281,6 +47477,7 @@ func (c *UserProfilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47407,6 +47604,7 @@ func (c *UserRolePermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47554,6 +47752,7 @@ func (c *UserRolePermissionGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47695,6 +47894,7 @@ func (c *UserRolePermissionsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47853,6 +48053,7 @@ func (c *UserRolePermissionsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47989,6 +48190,7 @@ func (c *UserRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/userRoles/{id}") @@ -48107,6 +48309,7 @@ func (c *UserRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -48245,6 +48448,7 @@ func (c *UserRolesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -48464,6 +48668,7 @@ func (c *UserRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -48676,6 +48881,7 @@ func (c *UserRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -48818,6 +49024,7 @@ func (c *UserRolesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { diff --git a/vendor/google.golang.org/api/dfareporting/v2.7/dfareporting-gen.go b/vendor/google.golang.org/api/dfareporting/v2.7/dfareporting-gen.go index 089b16bcd..98d495d67 100644 --- a/vendor/google.golang.org/api/dfareporting/v2.7/dfareporting-gen.go +++ b/vendor/google.golang.org/api/dfareporting/v2.7/dfareporting-gen.go @@ -127,9 +127,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only AccountActiveAdSummaries *AccountActiveAdSummariesService @@ -259,6 +260,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountActiveAdSummariesService(s *Service) *AccountActiveAdSummariesService { rs := &AccountActiveAdSummariesService{s: s} return rs @@ -13749,6 +13754,7 @@ func (c *AccountActiveAdSummariesGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13898,6 +13904,7 @@ func (c *AccountPermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14045,6 +14052,7 @@ func (c *AccountPermissionGroupsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14186,6 +14194,7 @@ func (c *AccountPermissionsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14333,6 +14342,7 @@ func (c *AccountPermissionsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14473,6 +14483,7 @@ func (c *AccountUserProfilesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14611,6 +14622,7 @@ func (c *AccountUserProfilesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -14837,6 +14849,7 @@ func (c *AccountUserProfilesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15055,6 +15068,7 @@ func (c *AccountUserProfilesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -15197,6 +15211,7 @@ func (c *AccountUserProfilesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accountuserprofile) if err != nil { @@ -15342,6 +15357,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15558,6 +15574,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15764,6 +15781,7 @@ func (c *AccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -15906,6 +15924,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -16051,6 +16070,7 @@ func (c *AdsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16189,6 +16209,7 @@ func (c *AdsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -16566,6 +16587,7 @@ func (c *AdsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16896,6 +16918,7 @@ func (c *AdsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -17038,6 +17061,7 @@ func (c *AdsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ad) if err != nil { @@ -17172,6 +17196,7 @@ func (c *AdvertiserGroupsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/advertiserGroups/{id}") @@ -17290,6 +17315,7 @@ func (c *AdvertiserGroupsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17428,6 +17454,7 @@ func (c *AdvertiserGroupsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17633,6 +17660,7 @@ func (c *AdvertiserGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17834,6 +17862,7 @@ func (c *AdvertiserGroupsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -17976,6 +18005,7 @@ func (c *AdvertiserGroupsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertisergroup) if err != nil { @@ -18121,6 +18151,7 @@ func (c *AdvertisersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18259,6 +18290,7 @@ func (c *AdvertisersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18519,6 +18551,7 @@ func (c *AdvertisersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18763,6 +18796,7 @@ func (c *AdvertisersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -18905,6 +18939,7 @@ func (c *AdvertisersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.advertiser) if err != nil { @@ -19048,6 +19083,7 @@ func (c *BrowsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19181,6 +19217,7 @@ func (c *CampaignCreativeAssociationsInsertCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaigncreativeassociation) if err != nil { @@ -19361,6 +19398,7 @@ func (c *CampaignCreativeAssociationsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19557,6 +19595,7 @@ func (c *CampaignsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19697,6 +19736,7 @@ func (c *CampaignsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -19980,6 +20020,7 @@ func (c *CampaignsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20224,6 +20265,7 @@ func (c *CampaignsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -20366,6 +20408,7 @@ func (c *CampaignsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.campaign) if err != nil { @@ -20511,6 +20554,7 @@ func (c *ChangeLogsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20813,6 +20857,7 @@ func (c *ChangeLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21189,6 +21234,7 @@ func (c *CitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21355,6 +21401,7 @@ func (c *ConnectionTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21502,6 +21549,7 @@ func (c *ConnectionTypesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21631,6 +21679,7 @@ func (c *ContentCategoriesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/contentCategories/{id}") @@ -21749,6 +21798,7 @@ func (c *ContentCategoriesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21887,6 +21937,7 @@ func (c *ContentCategoriesInsertCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -22092,6 +22143,7 @@ func (c *ContentCategoriesListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22293,6 +22345,7 @@ func (c *ContentCategoriesPatchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -22435,6 +22488,7 @@ func (c *ContentCategoriesUpdateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contentcategory) if err != nil { @@ -22569,6 +22623,7 @@ func (c *ConversionsBatchinsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.conversionsbatchinsertrequest) if err != nil { @@ -22714,6 +22769,7 @@ func (c *CountriesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22861,6 +22917,7 @@ func (c *CountriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23045,6 +23102,7 @@ func (c *CreativeAssetsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativeassetmetadata) if err != nil { @@ -23255,6 +23313,7 @@ func (c *CreativeFieldValuesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{creativeFieldId}/creativeFieldValues/{id}") @@ -23384,6 +23443,7 @@ func (c *CreativeFieldValuesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23533,6 +23593,7 @@ func (c *CreativeFieldValuesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -23743,6 +23804,7 @@ func (c *CreativeFieldValuesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -23955,6 +24017,7 @@ func (c *CreativeFieldValuesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -24108,6 +24171,7 @@ func (c *CreativeFieldValuesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefieldvalue) if err != nil { @@ -24251,6 +24315,7 @@ func (c *CreativeFieldsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/creativeFields/{id}") @@ -24369,6 +24434,7 @@ func (c *CreativeFieldsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24507,6 +24573,7 @@ func (c *CreativeFieldsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -24723,6 +24790,7 @@ func (c *CreativeFieldsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -24931,6 +24999,7 @@ func (c *CreativeFieldsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -25073,6 +25142,7 @@ func (c *CreativeFieldsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativefield) if err != nil { @@ -25218,6 +25288,7 @@ func (c *CreativeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25356,6 +25427,7 @@ func (c *CreativeGroupsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25579,6 +25651,7 @@ func (c *CreativeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -25793,6 +25866,7 @@ func (c *CreativeGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -25935,6 +26009,7 @@ func (c *CreativeGroupsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creativegroup) if err != nil { @@ -26080,6 +26155,7 @@ func (c *CreativesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -26218,6 +26294,7 @@ func (c *CreativesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26535,6 +26612,7 @@ func (c *CreativesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -26850,6 +26928,7 @@ func (c *CreativesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -26992,6 +27071,7 @@ func (c *CreativesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.creative) if err != nil { @@ -27141,6 +27221,7 @@ func (c *DimensionValuesQueryCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dimensionvaluerequest) if err != nil { @@ -27320,6 +27401,7 @@ func (c *DirectorySiteContactsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27542,6 +27624,7 @@ func (c *DirectorySiteContactsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27760,6 +27843,7 @@ func (c *DirectorySitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -27898,6 +27982,7 @@ func (c *DirectorySitesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.directorysite) if err != nil { @@ -28156,6 +28241,7 @@ func (c *DirectorySitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28394,6 +28480,7 @@ func (c *DynamicTargetingKeysDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/dynamicTargetingKeys/{objectId}") @@ -28531,6 +28618,7 @@ func (c *DynamicTargetingKeysInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dynamictargetingkey) if err != nil { @@ -28708,6 +28796,7 @@ func (c *DynamicTargetingKeysListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -28872,6 +28961,7 @@ func (c *EventTagsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/eventTags/{id}") @@ -28990,6 +29080,7 @@ func (c *EventTagsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29128,6 +29219,7 @@ func (c *EventTagsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29379,6 +29471,7 @@ func (c *EventTagsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -29592,6 +29685,7 @@ func (c *EventTagsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29734,6 +29828,7 @@ func (c *EventTagsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventtag) if err != nil { @@ -29879,6 +29974,7 @@ func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30091,6 +30187,7 @@ func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30298,6 +30395,7 @@ func (c *FloodlightActivitiesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/{id}") @@ -30411,6 +30509,7 @@ func (c *FloodlightActivitiesGeneratetagCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/floodlightActivities/generatetag") @@ -30555,6 +30654,7 @@ func (c *FloodlightActivitiesGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -30693,6 +30793,7 @@ func (c *FloodlightActivitiesInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -30964,6 +31065,7 @@ func (c *FloodlightActivitiesListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31212,6 +31314,7 @@ func (c *FloodlightActivitiesPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -31354,6 +31457,7 @@ func (c *FloodlightActivitiesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivity) if err != nil { @@ -31499,6 +31603,7 @@ func (c *FloodlightActivityGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -31637,6 +31742,7 @@ func (c *FloodlightActivityGroupsInsertCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -31873,6 +31979,7 @@ func (c *FloodlightActivityGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32100,6 +32207,7 @@ func (c *FloodlightActivityGroupsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -32242,6 +32350,7 @@ func (c *FloodlightActivityGroupsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightactivitygroup) if err != nil { @@ -32387,6 +32496,7 @@ func (c *FloodlightConfigurationsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32547,6 +32657,7 @@ func (c *FloodlightConfigurationsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -32686,6 +32797,7 @@ func (c *FloodlightConfigurationsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32828,6 +32940,7 @@ func (c *FloodlightConfigurationsUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.floodlightconfiguration) if err != nil { @@ -32975,6 +33088,7 @@ func (c *InventoryItemsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33221,6 +33335,7 @@ func (c *InventoryItemsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33458,6 +33573,7 @@ func (c *LandingPagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/campaigns/{campaignId}/landingPages/{id}") @@ -33587,6 +33703,7 @@ func (c *LandingPagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -33736,6 +33853,7 @@ func (c *LandingPagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -33890,6 +34008,7 @@ func (c *LandingPagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34032,6 +34151,7 @@ func (c *LandingPagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -34185,6 +34305,7 @@ func (c *LandingPagesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.landingpage) if err != nil { @@ -34337,6 +34458,7 @@ func (c *LanguagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34475,6 +34597,7 @@ func (c *MetrosListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34615,6 +34738,7 @@ func (c *MobileCarriersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34762,6 +34886,7 @@ func (c *MobileCarriersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -34902,6 +35027,7 @@ func (c *OperatingSystemVersionsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35049,6 +35175,7 @@ func (c *OperatingSystemVersionsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35190,6 +35317,7 @@ func (c *OperatingSystemsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35337,6 +35465,7 @@ func (c *OperatingSystemsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35479,6 +35608,7 @@ func (c *OrderDocumentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35728,6 +35858,7 @@ func (c *OrderDocumentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -35968,6 +36099,7 @@ func (c *OrdersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36197,6 +36329,7 @@ func (c *OrdersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36423,6 +36556,7 @@ func (c *PlacementGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -36561,6 +36695,7 @@ func (c *PlacementGroupsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -36906,6 +37041,7 @@ func (c *PlacementGroupsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -37209,6 +37345,7 @@ func (c *PlacementGroupsPatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -37351,6 +37488,7 @@ func (c *PlacementGroupsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementgroup) if err != nil { @@ -37485,6 +37623,7 @@ func (c *PlacementStrategiesDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placementStrategies/{id}") @@ -37603,6 +37742,7 @@ func (c *PlacementStrategiesGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -37741,6 +37881,7 @@ func (c *PlacementStrategiesInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -37946,6 +38087,7 @@ func (c *PlacementStrategiesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -38147,6 +38289,7 @@ func (c *PlacementStrategiesPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -38289,6 +38432,7 @@ func (c *PlacementStrategiesUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placementstrategy) if err != nil { @@ -38466,6 +38610,7 @@ func (c *PlacementsGeneratetagsCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/placements/generatetags") @@ -38662,6 +38807,7 @@ func (c *PlacementsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -38800,6 +38946,7 @@ func (c *PlacementsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -39180,6 +39327,7 @@ func (c *PlacementsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39517,6 +39665,7 @@ func (c *PlacementsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -39659,6 +39808,7 @@ func (c *PlacementsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.placement) if err != nil { @@ -39804,6 +39954,7 @@ func (c *PlatformTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -39951,6 +40102,7 @@ func (c *PlatformTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40091,6 +40243,7 @@ func (c *PostalCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40237,6 +40390,7 @@ func (c *PostalCodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40377,6 +40531,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40596,6 +40751,7 @@ func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40811,6 +40967,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -40951,6 +41108,7 @@ func (c *RemarketingListSharesGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41091,6 +41249,7 @@ func (c *RemarketingListSharesPatchCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -41233,6 +41392,7 @@ func (c *RemarketingListSharesUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglistshare) if err != nil { @@ -41378,6 +41538,7 @@ func (c *RemarketingListsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41516,6 +41677,7 @@ func (c *RemarketingListsInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -41725,6 +41887,7 @@ func (c *RemarketingListsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -41938,6 +42101,7 @@ func (c *RemarketingListsPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -42080,6 +42244,7 @@ func (c *RemarketingListsUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.remarketinglist) if err != nil { @@ -42214,6 +42379,7 @@ func (c *ReportsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}") @@ -42332,6 +42498,7 @@ func (c *ReportsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42470,6 +42637,7 @@ func (c *ReportsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -42661,6 +42829,7 @@ func (c *ReportsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -42870,6 +43039,7 @@ func (c *ReportsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -43020,6 +43190,7 @@ func (c *ReportsRunCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/reports/{reportId}/run") @@ -43162,6 +43333,7 @@ func (c *ReportsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -43307,6 +43479,7 @@ func (c *ReportsCompatibleFieldsQueryCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.report) if err != nil { @@ -43454,6 +43627,7 @@ func (c *ReportsFilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43665,6 +43839,7 @@ func (c *ReportsFilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -43876,6 +44051,7 @@ func (c *SitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44014,6 +44190,7 @@ func (c *SitesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44291,6 +44468,7 @@ func (c *SitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44542,6 +44720,7 @@ func (c *SitesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44684,6 +44863,7 @@ func (c *SitesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.site) if err != nil { @@ -44829,6 +45009,7 @@ func (c *SizesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -44967,6 +45148,7 @@ func (c *SizesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.size) if err != nil { @@ -45142,6 +45324,7 @@ func (c *SizesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45306,6 +45489,7 @@ func (c *SubaccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45444,6 +45628,7 @@ func (c *SubaccountsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45648,6 +45833,7 @@ func (c *SubaccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -45849,6 +46035,7 @@ func (c *SubaccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -45991,6 +46178,7 @@ func (c *SubaccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subaccount) if err != nil { @@ -46136,6 +46324,7 @@ func (c *TargetableRemarketingListsGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46341,6 +46530,7 @@ func (c *TargetableRemarketingListsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46558,6 +46748,7 @@ func (c *TargetingTemplatesGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -46696,6 +46887,7 @@ func (c *TargetingTemplatesInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetingtemplate) if err != nil { @@ -46907,6 +47099,7 @@ func (c *TargetingTemplatesListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47114,6 +47307,7 @@ func (c *TargetingTemplatesPatchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetingtemplate) if err != nil { @@ -47256,6 +47450,7 @@ func (c *TargetingTemplatesUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetingtemplate) if err != nil { @@ -47399,6 +47594,7 @@ func (c *UserProfilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47536,6 +47732,7 @@ func (c *UserProfilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47662,6 +47859,7 @@ func (c *UserRolePermissionGroupsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47809,6 +48007,7 @@ func (c *UserRolePermissionGroupsListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -47950,6 +48149,7 @@ func (c *UserRolePermissionsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -48108,6 +48308,7 @@ func (c *UserRolePermissionsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -48244,6 +48445,7 @@ func (c *UserRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "userprofiles/{profileId}/userRoles/{id}") @@ -48362,6 +48564,7 @@ func (c *UserRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -48500,6 +48703,7 @@ func (c *UserRolesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -48719,6 +48923,7 @@ func (c *UserRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -48931,6 +49136,7 @@ func (c *UserRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -49073,6 +49279,7 @@ func (c *UserRolesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.userrole) if err != nil { @@ -49218,6 +49425,7 @@ func (c *VideoFormatsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -49365,6 +49573,7 @@ func (c *VideoFormatsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/discovery/v1/discovery-gen.go b/vendor/google.golang.org/api/discovery/v1/discovery-gen.go index 9192e275c..ae63affce 100644 --- a/vendor/google.golang.org/api/discovery/v1/discovery-gen.go +++ b/vendor/google.golang.org/api/discovery/v1/discovery-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Apis *ApisService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewApisService(s *Service) *ApisService { rs := &ApisService{s: s} return rs @@ -1005,6 +1010,7 @@ func (c *ApisGetRestCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1159,6 +1165,7 @@ func (c *ApisListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index fa66410d6..675111a20 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -73,9 +73,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Changes *ChangesService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewChangesService(s *Service) *ChangesService { rs := &ChangesService{s: s} return rs @@ -588,6 +593,7 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) if err != nil { @@ -743,6 +749,7 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -934,6 +941,7 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1122,6 +1130,7 @@ func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) if err != nil { @@ -1256,6 +1265,7 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") @@ -1373,6 +1383,7 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1545,6 +1556,7 @@ func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1722,6 +1734,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1896,6 +1909,7 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go b/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go index 303d5c497..14438550a 100644 --- a/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go @@ -75,9 +75,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Changes *ChangesService @@ -99,6 +100,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewChangesService(s *Service) *ChangesService { rs := &ChangesService{s: s} return rs @@ -1176,6 +1181,7 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) if err != nil { @@ -1345,6 +1351,7 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1541,6 +1548,7 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1760,6 +1768,7 @@ func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1953,6 +1962,7 @@ func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2151,6 +2161,7 @@ func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2341,6 +2352,7 @@ func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2536,6 +2548,7 @@ func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) if err != nil { @@ -2684,6 +2697,7 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") @@ -2843,6 +2857,7 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3020,6 +3035,7 @@ func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3200,6 +3216,7 @@ func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) if err != nil { @@ -3358,6 +3375,7 @@ func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) if err != nil { @@ -3523,6 +3541,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3702,6 +3721,7 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/doubleclickbidmanager/v1/doubleclickbidmanager-gen.go b/vendor/google.golang.org/api/doubleclickbidmanager/v1/doubleclickbidmanager-gen.go index 2d49fa797..8585004e6 100644 --- a/vendor/google.golang.org/api/doubleclickbidmanager/v1/doubleclickbidmanager-gen.go +++ b/vendor/google.golang.org/api/doubleclickbidmanager/v1/doubleclickbidmanager-gen.go @@ -58,9 +58,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Lineitems *LineitemsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewLineitemsService(s *Service) *LineitemsService { rs := &LineitemsService{s: s} return rs @@ -1659,6 +1664,7 @@ func (c *LineitemsDownloadlineitemsCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.downloadlineitemsrequest) if err != nil { @@ -1773,6 +1779,7 @@ func (c *LineitemsUploadlineitemsCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.uploadlineitemsrequest) if err != nil { @@ -1887,6 +1894,7 @@ func (c *QueriesCreatequeryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.query) if err != nil { @@ -2002,6 +2010,7 @@ func (c *QueriesDeletequeryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "query/{queryId}") @@ -2106,6 +2115,7 @@ func (c *QueriesGetqueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2239,6 +2249,7 @@ func (c *QueriesListqueriesCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2350,6 +2361,7 @@ func (c *QueriesRunqueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runqueryrequest) if err != nil { @@ -2462,6 +2474,7 @@ func (c *ReportsListreportsCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2586,6 +2599,7 @@ func (c *SdfDownloadCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.downloadrequest) if err != nil { diff --git a/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-api.json b/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-api.json index d8021102b..28265804f 100644 --- a/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-api.json +++ b/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/HRuJCCIlMtHocq2WCX0lbnhTmso\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/moKwEsFuKXY57BghNYmOvjYDG_c\"", "discoveryVersion": "v1", "id": "doubleclicksearch:v2", "name": "doubleclicksearch", "version": "v2", - "revision": "20161108", + "revision": "20170207", "title": "DoubleClick Search API", "description": "Reports and modifies your advertising data in DoubleClick Search (for example, campaigns, ad groups, keywords, and conversions).", "ownerDomain": "google.com", @@ -175,7 +175,7 @@ }, "conversionId": { "type": "string", - "description": "For offline conversions, this is an ID that advertisers are required to provide. Advertisers can specify any ID that is meaningful to them. For online conversions, DS copies the dsConversionId or floodlightOrderId into this property depending on the advertiser's Floodlight instructions.", + "description": "For offline conversions, advertisers provide this ID. Advertisers can specify any ID that is meaningful to them. Each conversion in a request must specify a unique ID, and the combination of ID and timestamp must be unique amongst all conversions within the advertiser.\nFor online conversions, DS copies the dsConversionId or floodlightOrderId into this property depending on the advertiser's Floodlight instructions.", "annotations": { "required": [ "doubleclicksearch.conversion.insert" diff --git a/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-gen.go b/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-gen.go index c98027125..16e571f53 100644 --- a/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-gen.go +++ b/vendor/google.golang.org/api/doubleclicksearch/v2/doubleclicksearch-gen.go @@ -63,9 +63,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Conversion *ConversionService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewConversionService(s *Service) *ConversionService { rs := &ConversionService{s: s} return rs @@ -186,11 +191,14 @@ type Conversion struct { // ClickId: DS click ID for the conversion. ClickId string `json:"clickId,omitempty"` - // ConversionId: For offline conversions, this is an ID that advertisers - // are required to provide. Advertisers can specify any ID that is - // meaningful to them. For online conversions, DS copies the - // dsConversionId or floodlightOrderId into this property depending on - // the advertiser's Floodlight instructions. + // ConversionId: For offline conversions, advertisers provide this ID. + // Advertisers can specify any ID that is meaningful to them. Each + // conversion in a request must specify a unique ID, and the combination + // of ID and timestamp must be unique amongst all conversions within the + // advertiser. + // For online conversions, DS copies the dsConversionId or + // floodlightOrderId into this property depending on the advertiser's + // Floodlight instructions. ConversionId string `json:"conversionId,omitempty"` // ConversionModifiedTimestamp: The time at which the conversion was @@ -1096,6 +1104,7 @@ func (c *ConversionGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1303,6 +1312,7 @@ func (c *ConversionInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.conversionlist) if err != nil { @@ -1428,6 +1438,7 @@ func (c *ConversionPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.conversionlist) if err != nil { @@ -1611,6 +1622,7 @@ func (c *ConversionUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.conversionlist) if err != nil { @@ -1729,6 +1741,7 @@ func (c *ConversionUpdateAvailabilityCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateavailabilityrequest) if err != nil { @@ -1847,6 +1860,7 @@ func (c *ReportsGenerateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reportrequest) if err != nil { @@ -1976,6 +1990,7 @@ func (c *ReportsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2115,6 +2130,7 @@ func (c *ReportsGetFileCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2241,6 +2257,7 @@ func (c *ReportsRequestCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reportrequest) if err != nil { @@ -2372,6 +2389,7 @@ func (c *SavedColumnsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/drive/v2/drive-api.json b/vendor/google.golang.org/api/drive/v2/drive-api.json index 15dbba61f..70aeb9312 100644 --- a/vendor/google.golang.org/api/drive/v2/drive-api.json +++ b/vendor/google.golang.org/api/drive/v2/drive-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/aTsDjhJX4jyEgcQh1bdOjEtV2EI\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/5vvnUnaYZD7vqEjmjYVCOzuEk5s\"", "discoveryVersion": "v1", "id": "drive:v2", "name": "drive", "version": "v2", - "revision": "20170113", + "revision": "20170207", "title": "Drive API", "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", "ownerDomain": "google.com", @@ -1755,6 +1755,21 @@ } } }, + "StartPageToken": { + "id": "StartPageToken", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#startPageToken\".", + "default": "drive#startPageToken" + }, + "startPageToken": { + "type": "string", + "description": "The starting page token for listing changes." + } + } + }, "User": { "id": "User", "type": "object", @@ -1934,6 +1949,25 @@ "https://www.googleapis.com/auth/drive.readonly" ] }, + "getStartPageToken": { + "id": "drive.changes.getStartPageToken", + "path": "changes/startPageToken", + "httpMethod": "GET", + "description": "Gets the starting pageToken for listing future changes.", + "response": { + "$ref": "StartPageToken" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.apps.readonly", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, "list": { "id": "drive.changes.list", "path": "changes", @@ -1962,7 +1996,7 @@ }, "pageToken": { "type": "string", - "description": "Page token for changes.", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", "location": "query" }, "spaces": { @@ -2020,7 +2054,7 @@ }, "pageToken": { "type": "string", - "description": "Page token for changes.", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", "location": "query" }, "spaces": { diff --git a/vendor/google.golang.org/api/drive/v2/drive-gen.go b/vendor/google.golang.org/api/drive/v2/drive-gen.go index 32cadd6bc..22d7baddb 100644 --- a/vendor/google.golang.org/api/drive/v2/drive-gen.go +++ b/vendor/google.golang.org/api/drive/v2/drive-gen.go @@ -98,9 +98,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only About *AboutService @@ -136,6 +137,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAboutService(s *Service) *AboutService { rs := &AboutService{s: s} return rs @@ -2518,6 +2523,41 @@ func (s *RevisionList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type StartPageToken struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#startPageToken". + Kind string `json:"kind,omitempty"` + + // StartPageToken: The starting page token for listing changes. + StartPageToken string `json:"startPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StartPageToken) MarshalJSON() ([]byte, error) { + type noMethod StartPageToken + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // User: Information about a Drive user. type User struct { // DisplayName: A plain text displayable name for this user. @@ -2674,6 +2714,7 @@ func (c *AboutGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2824,6 +2865,7 @@ func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2995,6 +3037,7 @@ func (c *AppsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3137,6 +3180,7 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3222,6 +3266,136 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { } +// method id "drive.changes.getStartPageToken": + +type ChangesGetStartPageTokenCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetStartPageToken: Gets the starting pageToken for listing future +// changes. +func (r *ChangesService) GetStartPageToken() *ChangesGetStartPageTokenCall { + c := &ChangesGetStartPageTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesGetStartPageTokenCall) Fields(s ...googleapi.Field) *ChangesGetStartPageTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesGetStartPageTokenCall) IfNoneMatch(entityTag string) *ChangesGetStartPageTokenCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesGetStartPageTokenCall) Context(ctx context.Context) *ChangesGetStartPageTokenCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesGetStartPageTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChangesGetStartPageTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "changes/startPageToken") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "drive.changes.getStartPageToken" call. +// Exactly one of *StartPageToken or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *StartPageToken.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChangesGetStartPageTokenCall) Do(opts ...googleapi.CallOption) (*StartPageToken, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &StartPageToken{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the starting pageToken for listing future changes.", + // "httpMethod": "GET", + // "id": "drive.changes.getStartPageToken", + // "path": "changes/startPageToken", + // "response": { + // "$ref": "StartPageToken" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.apps.readonly", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + // method id "drive.changes.list": type ChangesListCall struct { @@ -3261,8 +3435,10 @@ func (c *ChangesListCall) MaxResults(maxResults int64) *ChangesListCall { return c } -// PageToken sets the optional parameter "pageToken": Page token for -// changes. +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response or to +// the response from the getStartPageToken method. func (c *ChangesListCall) PageToken(pageToken string) *ChangesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -3324,6 +3500,7 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3399,7 +3576,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangeList, error) // "type": "integer" // }, // "pageToken": { - // "description": "Page token for changes.", + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", // "location": "query", // "type": "string" // }, @@ -3495,8 +3672,10 @@ func (c *ChangesWatchCall) MaxResults(maxResults int64) *ChangesWatchCall { return c } -// PageToken sets the optional parameter "pageToken": Page token for -// changes. +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response or to +// the response from the getStartPageToken method. func (c *ChangesWatchCall) PageToken(pageToken string) *ChangesWatchCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -3548,6 +3727,7 @@ func (c *ChangesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -3625,7 +3805,7 @@ func (c *ChangesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { // "type": "integer" // }, // "pageToken": { - // "description": "Page token for changes.", + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", // "location": "query", // "type": "string" // }, @@ -3712,6 +3892,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -3811,6 +3992,7 @@ func (c *ChildrenDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{folderId}/children/{childId}") @@ -3928,6 +4110,7 @@ func (c *ChildrenGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4070,6 +4253,7 @@ func (c *ChildrenInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.childreference) if err != nil { @@ -4249,6 +4433,7 @@ func (c *ChildrenListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4427,6 +4612,7 @@ func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") @@ -4552,6 +4738,7 @@ func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4696,6 +4883,7 @@ func (c *CommentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -4871,6 +5059,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5050,6 +5239,7 @@ func (c *CommentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -5194,6 +5384,7 @@ func (c *CommentsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -5392,6 +5583,7 @@ func (c *FilesCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { @@ -5575,6 +5767,7 @@ func (c *FilesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") @@ -5670,6 +5863,7 @@ func (c *FilesEmptyTrashCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/trash") @@ -5764,6 +5958,7 @@ func (c *FilesExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5913,6 +6108,7 @@ func (c *FilesGenerateIdsCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6090,6 +6286,7 @@ func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6387,6 +6584,7 @@ func (c *FilesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { @@ -6701,6 +6899,7 @@ func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7011,6 +7210,7 @@ func (c *FilesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { @@ -7235,6 +7435,7 @@ func (c *FilesTouchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/touch") @@ -7363,6 +7564,7 @@ func (c *FilesTrashCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/trash") @@ -7489,6 +7691,7 @@ func (c *FilesUntrashCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/untrash") @@ -7781,6 +7984,7 @@ func (c *FilesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { @@ -8107,6 +8311,7 @@ func (c *FilesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -8296,6 +8501,7 @@ func (c *ParentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/parents/{parentId}") @@ -8413,6 +8619,7 @@ func (c *ParentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8555,6 +8762,7 @@ func (c *ParentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.parentreference) if err != nil { @@ -8699,6 +8907,7 @@ func (c *ParentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8833,6 +9042,7 @@ func (c *PermissionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") @@ -8950,6 +9160,7 @@ func (c *PermissionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9100,6 +9311,7 @@ func (c *PermissionsGetIdForEmailCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9251,6 +9463,7 @@ func (c *PermissionsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) if err != nil { @@ -9405,6 +9618,7 @@ func (c *PermissionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9555,6 +9769,7 @@ func (c *PermissionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) if err != nil { @@ -9726,6 +9941,7 @@ func (c *PermissionsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) if err != nil { @@ -9887,6 +10103,7 @@ func (c *PropertiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/properties/{propertyKey}") @@ -10019,6 +10236,7 @@ func (c *PropertiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10168,6 +10386,7 @@ func (c *PropertiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.property) if err != nil { @@ -10313,6 +10532,7 @@ func (c *PropertiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10457,6 +10677,7 @@ func (c *PropertiesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.property) if err != nil { @@ -10616,6 +10837,7 @@ func (c *PropertiesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.property) if err != nil { @@ -10785,6 +11007,7 @@ func (c *RealtimeGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10974,6 +11197,7 @@ func (c *RealtimeUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/realtime") @@ -11144,6 +11368,7 @@ func (c *RepliesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") @@ -11278,6 +11503,7 @@ func (c *RepliesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11432,6 +11658,7 @@ func (c *RepliesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commentreply) if err != nil { @@ -11609,6 +11836,7 @@ func (c *RepliesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11793,6 +12021,7 @@ func (c *RepliesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commentreply) if err != nil { @@ -11947,6 +12176,7 @@ func (c *RepliesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commentreply) if err != nil { @@ -12097,6 +12327,7 @@ func (c *RevisionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") @@ -12215,6 +12446,7 @@ func (c *RevisionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12381,6 +12613,7 @@ func (c *RevisionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12552,6 +12785,7 @@ func (c *RevisionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.revision) if err != nil { @@ -12697,6 +12931,7 @@ func (c *RevisionsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.revision) if err != nil { diff --git a/vendor/google.golang.org/api/drive/v3/drive-gen.go b/vendor/google.golang.org/api/drive/v3/drive-gen.go index 48c955338..d889e5d42 100644 --- a/vendor/google.golang.org/api/drive/v3/drive-gen.go +++ b/vendor/google.golang.org/api/drive/v3/drive-gen.go @@ -90,9 +90,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only About *AboutService @@ -118,6 +119,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAboutService(s *Service) *AboutService { rs := &AboutService{s: s} return rs @@ -1712,6 +1717,7 @@ func (c *AboutGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1840,6 +1846,7 @@ func (c *ChangesGetStartPageTokenCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2001,6 +2008,7 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2192,6 +2200,7 @@ func (c *ChangesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -2355,6 +2364,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -2453,6 +2463,7 @@ func (c *CommentsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -2587,6 +2598,7 @@ func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") @@ -2712,6 +2724,7 @@ func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2896,6 +2909,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3074,6 +3088,7 @@ func (c *CommentsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -3244,6 +3259,7 @@ func (c *FilesCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { @@ -3483,6 +3499,7 @@ func (c *FilesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { @@ -3695,6 +3712,7 @@ func (c *FilesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") @@ -3790,6 +3808,7 @@ func (c *FilesEmptyTrashCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/trash") @@ -3884,6 +3903,7 @@ func (c *FilesExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4033,6 +4053,7 @@ func (c *FilesGenerateIdsCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4184,6 +4205,7 @@ func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4405,6 +4427,7 @@ func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4684,6 +4707,7 @@ func (c *FilesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { @@ -4922,6 +4946,7 @@ func (c *FilesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -5113,6 +5138,7 @@ func (c *PermissionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) if err != nil { @@ -5263,6 +5289,7 @@ func (c *PermissionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") @@ -5380,6 +5407,7 @@ func (c *PermissionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5530,6 +5558,7 @@ func (c *PermissionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5681,6 +5710,7 @@ func (c *PermissionsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) if err != nil { @@ -5837,6 +5867,7 @@ func (c *RepliesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) if err != nil { @@ -5981,6 +6012,7 @@ func (c *RepliesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") @@ -6116,6 +6148,7 @@ func (c *RepliesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6302,6 +6335,7 @@ func (c *RepliesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6485,6 +6519,7 @@ func (c *RepliesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) if err != nil { @@ -6636,6 +6671,7 @@ func (c *RevisionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") @@ -6763,6 +6799,7 @@ func (c *RevisionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6953,6 +6990,7 @@ func (c *RevisionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7124,6 +7162,7 @@ func (c *RevisionsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.revision) if err != nil { diff --git a/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-api.json b/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-api.json index 1bc1f6a50..d8485dc11 100644 --- a/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-api.json +++ b/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-api.json @@ -1,100 +1,33 @@ { + "revision": "20170207", + "documentationLink": "https://firebase.google.com/docs/dynamic-links/", + "id": "firebasedynamiclinks:v1", + "discoveryVersion": "v1", + "version_module": "True", "schemas": { - "CreateShortDynamicLinkResponse": { - "id": "CreateShortDynamicLinkResponse", - "description": "Response to create a short Dynamic Link.", - "type": "object", - "properties": { - "shortLink": { - "description": "Short Dynamic Link value. e.g. https://abcd.app.goo.gl/wxyz", - "type": "string" - }, - "previewLink": { - "description": "Preivew link to show the link flow chart.", - "type": "string" - }, - "warning": { - "description": "Information about potential warnings on link creation.", - "type": "array", - "items": { - "$ref": "DynamicLinkWarning" - } - } - } - }, - "IosInfo": { - "description": "iOS related attributes to the Dynamic Link..", - "type": "object", - "properties": { - "iosFallbackLink": { - "description": "Link to open on iOS if the app is not installed.", - "type": "string" - }, - "iosAppStoreId": { - "description": "iOS App Store ID.", - "type": "string" - }, - "iosIpadFallbackLink": { - "description": "If specified, this overrides the ios_fallback_link value on iPads.", - "type": "string" - }, - "iosIpadBundleId": { - "description": "iPad bundle ID of the app.", - "type": "string" - }, - "iosCustomScheme": { - "description": "Custom (destination) scheme to use for iOS. By default, we’ll use the\nbundle ID as the custom scheme. Developer can override this behavior using\nthis param.", - "type": "string" - }, - "iosBundleId": { - "description": "iOS bundle ID of the app.", - "type": "string" - } - }, - "id": "IosInfo" - }, - "AnalyticsInfo": { - "id": "AnalyticsInfo", - "description": "Tracking parameters supported by Dynamic Link.", - "type": "object", - "properties": { - "itunesConnectAnalytics": { - "$ref": "ITunesConnectAnalytics", - "description": "iTunes Connect App Analytics." - }, - "googlePlayAnalytics": { - "description": "Google Play Campaign Measurements.", - "$ref": "GooglePlayAnalytics" - } - } - }, "CreateShortDynamicLinkRequest": { - "id": "CreateShortDynamicLinkRequest", "description": "Request to create a short Dynamic Link.", "type": "object", "properties": { + "dynamicLinkInfo": { + "$ref": "DynamicLinkInfo", + "description": "Information about the Dynamic Link to be shortened.\n[Learn more](https://firebase.google.com/docs/dynamic-links/android#create-a-dynamic-link-programmatically)." + }, "longDynamicLink": { "description": "Full long Dynamic Link URL with desired query parameters specified.\nFor example,\n\"https://sample.app.goo.gl/?link=http://www.google.com&apn=com.sample\",\n[Learn more](https://firebase.google.com/docs/dynamic-links/android#create-a-dynamic-link-programmatically).", "type": "string" }, "suffix": { - "description": "Short Dynamic Link suffix. Optional.", - "$ref": "Suffix" - }, - "dynamicLinkInfo": { - "$ref": "DynamicLinkInfo", - "description": "Information about the Dynamic Link to be shortened.\n[Learn more](https://firebase.google.com/docs/dynamic-links/android#create-a-dynamic-link-programmatically)." + "$ref": "Suffix", + "description": "Short Dynamic Link suffix. Optional." } - } + }, + "id": "CreateShortDynamicLinkRequest" }, "Suffix": { - "id": "Suffix", - "description": "Short Dynamic Link suffix.", "type": "object", "properties": { "option": { - "description": "Suffix option.", - "type": "string", "enumDescriptions": [ "The suffix option is not specified, performs as NOT_GUESSABLE .", "Short Dynamic Link suffix is a base62 [0-9A-Za-z] encoded string of\na random generated 96 bit random number, which has a length of 17 chars.\nFor example, \"nlAR8U4SlKRZw1cb2\".\nIt prevents other people from guessing and crawling short Dynamic Links\nthat contain personal identifiable information.", @@ -104,19 +37,18 @@ "OPTION_UNSPECIFIED", "UNGUESSABLE", "SHORT" - ] + ], + "description": "Suffix option.", + "type": "string" } - } + }, + "id": "Suffix", + "description": "Short Dynamic Link suffix." }, "GooglePlayAnalytics": { - "id": "GooglePlayAnalytics", "description": "Parameters for Google Play Campaign Measurements.\n[Learn more](https://developers.google.com/analytics/devguides/collection/android/v4/campaigns#campaign-params)", "type": "object", "properties": { - "utmMedium": { - "description": "Campaign medium; used to identify a medium such as email or cost-per-click.", - "type": "string" - }, "utmTerm": { "description": "Campaign term; used with paid search to supply the keywords for ads.", "type": "string" @@ -125,24 +57,33 @@ "description": "Campaign source; used to identify a search engine, newsletter, or other\nsource.", "type": "string" }, - "utmCampaign": { - "description": "Campaign name; used for keyword analysis to identify a specific product\npromotion or strategic campaign.", - "type": "string" - }, "gclid": { "description": "[AdWords autotagging parameter](https://support.google.com/analytics/answer/1033981?hl=en);\nused to measure Google AdWords ads. This value is generated dynamically\nand should never be modified.", "type": "string" }, + "utmCampaign": { + "type": "string", + "description": "Campaign name; used for keyword analysis to identify a specific product\npromotion or strategic campaign." + }, "utmContent": { "description": "Campaign content; used for A/B testing and content-targeted ads to\ndifferentiate ads or links that point to the same URL.", "type": "string" + }, + "utmMedium": { + "description": "Campaign medium; used to identify a medium such as email or cost-per-click.", + "type": "string" } - } + }, + "id": "GooglePlayAnalytics" }, "DynamicLinkInfo": { "description": "Information about a Dynamic Link.", "type": "object", "properties": { + "dynamicLinkDomain": { + "description": "Dynamic Links domain that the project owns, e.g. abcd.app.goo.gl\n[Learn more](https://firebase.google.com/docs/dynamic-links/android#set-up-firebase-and-the-dynamic-links-sdk)\non how to set up Dynamic Link domain associated with your Firebase project.\n\nRequired.", + "type": "string" + }, "link": { "description": "The link your app will open, You can specify any URL your app can handle.\nThis link must be a well-formatted URL, be properly URL-encoded, and use\nthe HTTP or HTTPS scheme. See 'link' parameters in the\n[documentation](https://firebase.google.com/docs/dynamic-links/android#create-a-dynamic-link-programmatically).\n\nRequired.", "type": "string" @@ -152,8 +93,8 @@ "$ref": "IosInfo" }, "socialMetaTagInfo": { - "$ref": "SocialMetaTagInfo", - "description": "Parameters for social meta tag params.\nUsed to set meta tag data for link previews on social sites." + "description": "Parameters for social meta tag params.\nUsed to set meta tag data for link previews on social sites.", + "$ref": "SocialMetaTagInfo" }, "androidInfo": { "description": "Android related information. See Android related parameters in the\n[documentation](https://firebase.google.com/docs/dynamic-links/android#create-a-dynamic-link-programmatically).", @@ -162,27 +103,14 @@ "analyticsInfo": { "$ref": "AnalyticsInfo", "description": "Parameters used for tracking. See all tracking parameters in the\n[documentation](https://firebase.google.com/docs/dynamic-links/android#create-a-dynamic-link-programmatically)." - }, - "dynamicLinkDomain": { - "description": "Dynamic Links domain that the project owns, e.g. abcd.app.goo.gl\n[Learn more](https://firebase.google.com/docs/dynamic-links/android#set-up-firebase-and-the-dynamic-links-sdk)\non how to set up Dynamic Link domain associated with your Firebase project.\n\nRequired.", - "type": "string" } }, "id": "DynamicLinkInfo" }, "ITunesConnectAnalytics": { - "id": "ITunesConnectAnalytics", "description": "Parameters for iTunes Connect App Analytics.", "type": "object", "properties": { - "pt": { - "description": "Provider token that enables analytics for Dynamic Links from within iTunes\nConnect.", - "type": "string" - }, - "at": { - "description": "Affiliate token used to create affiliate-coded links.", - "type": "string" - }, "ct": { "description": "Campaign text that developers can optionally add to any link in order to\ntrack sales from a specific marketing campaign.", "type": "string" @@ -190,12 +118,19 @@ "mt": { "description": "iTune media types, including music, podcasts, audiobooks and so on.", "type": "string" + }, + "pt": { + "description": "Provider token that enables analytics for Dynamic Links from within iTunes\nConnect.", + "type": "string" + }, + "at": { + "description": "Affiliate token used to create affiliate-coded links.", + "type": "string" } - } + }, + "id": "ITunesConnectAnalytics" }, "SocialMetaTagInfo": { - "description": "Parameters for social meta tag params.\nUsed to set meta tag data for link previews on social sites.", - "type": "object", "properties": { "socialDescription": { "description": "A short description of the link. Optional.", @@ -210,14 +145,69 @@ "type": "string" } }, - "id": "SocialMetaTagInfo" + "id": "SocialMetaTagInfo", + "description": "Parameters for social meta tag params.\nUsed to set meta tag data for link previews on social sites.", + "type": "object" + }, + "AndroidInfo": { + "description": "Android related attributes to the Dynamic Link.", + "type": "object", + "properties": { + "androidLink": { + "type": "string", + "description": "If specified, this overrides the ‘link’ parameter on Android." + }, + "androidFallbackLink": { + "description": "Link to open on Android if the app is not installed.", + "type": "string" + }, + "androidPackageName": { + "description": "Android package name of the app.", + "type": "string" + }, + "androidMinPackageVersionCode": { + "description": "Minimum version code for the Android app. If the installed app’s version\ncode is lower, then the user is taken to the Play Store.", + "type": "string" + } + }, + "id": "AndroidInfo" }, "DynamicLinkWarning": { - "id": "DynamicLinkWarning", "description": "Dynamic Links warning messages.", "type": "object", "properties": { "warningCode": { + "enum": [ + "CODE_UNSPECIFIED", + "NOT_IN_PROJECT_ANDROID_PACKAGE_NAME", + "NOT_INTEGER_ANDROID_PACKAGE_MIN_VERSION", + "UNNECESSARY_ANDROID_PACKAGE_MIN_VERSION", + "NOT_URI_ANDROID_LINK", + "UNNECESSARY_ANDROID_LINK", + "NOT_URI_ANDROID_FALLBACK_LINK", + "BAD_URI_SCHEME_ANDROID_FALLBACK_LINK", + "NOT_IN_PROJECT_IOS_BUNDLE_ID", + "NOT_IN_PROJECT_IPAD_BUNDLE_ID", + "UNNECESSARY_IOS_URL_SCHEME", + "NOT_NUMERIC_IOS_APP_STORE_ID", + "UNNECESSARY_IOS_APP_STORE_ID", + "NOT_URI_IOS_FALLBACK_LINK", + "BAD_URI_SCHEME_IOS_FALLBACK_LINK", + "NOT_URI_IPAD_FALLBACK_LINK", + "BAD_URI_SCHEME_IPAD_FALLBACK_LINK", + "BAD_DEBUG_PARAM", + "BAD_AD_PARAM", + "DEPRECATED_PARAM", + "UNRECOGNIZED_PARAM", + "TOO_LONG_PARAM", + "NOT_URI_SOCIAL_IMAGE_LINK", + "BAD_URI_SCHEME_SOCIAL_IMAGE_LINK", + "NOT_URI_SOCIAL_URL", + "BAD_URI_SCHEME_SOCIAL_URL", + "LINK_LENGTH_TOO_LONG", + "LINK_WITH_FRAGMENTS", + "NOT_MATCHING_IOS_BUNDLE_ID_AND_STORE_ID" + ], "description": "The warning code.", "type": "string", "enumDescriptions": [ @@ -250,74 +240,89 @@ "Dynamic Link URL length is too long.", "Dynamic Link URL contains fragments.", "The iOS bundle ID does not match with the given iOS store ID." - ], - "enum": [ - "CODE_UNSPECIFIED", - "NOT_IN_PROJECT_ANDROID_PACKAGE_NAME", - "NOT_INTEGER_ANDROID_PACKAGE_MIN_VERSION", - "UNNECESSARY_ANDROID_PACKAGE_MIN_VERSION", - "NOT_URI_ANDROID_LINK", - "UNNECESSARY_ANDROID_LINK", - "NOT_URI_ANDROID_FALLBACK_LINK", - "BAD_URI_SCHEME_ANDROID_FALLBACK_LINK", - "NOT_IN_PROJECT_IOS_BUNDLE_ID", - "NOT_IN_PROJECT_IPAD_BUNDLE_ID", - "UNNECESSARY_IOS_URL_SCHEME", - "NOT_NUMERIC_IOS_APP_STORE_ID", - "UNNECESSARY_IOS_APP_STORE_ID", - "NOT_URI_IOS_FALLBACK_LINK", - "BAD_URI_SCHEME_IOS_FALLBACK_LINK", - "NOT_URI_IPAD_FALLBACK_LINK", - "BAD_URI_SCHEME_IPAD_FALLBACK_LINK", - "BAD_DEBUG_PARAM", - "BAD_AD_PARAM", - "DEPRECATED_PARAM", - "UNRECOGNIZED_PARAM", - "TOO_LONG_PARAM", - "NOT_URI_SOCIAL_IMAGE_LINK", - "BAD_URI_SCHEME_SOCIAL_IMAGE_LINK", - "NOT_URI_SOCIAL_URL", - "BAD_URI_SCHEME_SOCIAL_URL", - "LINK_LENGTH_TOO_LONG", - "LINK_WITH_FRAGMENTS", - "NOT_MATCHING_IOS_BUNDLE_ID_AND_STORE_ID" ] }, "warningMessage": { "description": "The warning message to help developers improve their requests.", "type": "string" } - } + }, + "id": "DynamicLinkWarning" }, - "AndroidInfo": { - "description": "Android related attributes to the Dynamic Link.", + "IosInfo": { "type": "object", "properties": { - "androidFallbackLink": { - "description": "Link to open on Android if the app is not installed.", + "iosIpadFallbackLink": { + "description": "If specified, this overrides the ios_fallback_link value on iPads.", "type": "string" }, - "androidPackageName": { - "description": "Android package name of the app.", + "iosIpadBundleId": { + "description": "iPad bundle ID of the app.", "type": "string" }, - "androidMinPackageVersionCode": { - "description": "Minimum version code for the Android app. If the installed app’s version\ncode is lower, then the user is taken to the Play Store.", + "iosCustomScheme": { + "description": "Custom (destination) scheme to use for iOS. By default, we’ll use the\nbundle ID as the custom scheme. Developer can override this behavior using\nthis param.", "type": "string" }, - "androidLink": { - "description": "If specified, this overrides the ‘link’ parameter on Android.", + "iosBundleId": { + "description": "iOS bundle ID of the app.", + "type": "string" + }, + "iosFallbackLink": { + "description": "Link to open on iOS if the app is not installed.", + "type": "string" + }, + "iosAppStoreId": { + "description": "iOS App Store ID.", "type": "string" } }, - "id": "AndroidInfo" + "id": "IosInfo", + "description": "iOS related attributes to the Dynamic Link.." + }, + "CreateShortDynamicLinkResponse": { + "type": "object", + "properties": { + "warning": { + "description": "Information about potential warnings on link creation.", + "type": "array", + "items": { + "$ref": "DynamicLinkWarning" + } + }, + "shortLink": { + "description": "Short Dynamic Link value. e.g. https://abcd.app.goo.gl/wxyz", + "type": "string" + }, + "previewLink": { + "description": "Preivew link to show the link flow chart.", + "type": "string" + } + }, + "id": "CreateShortDynamicLinkResponse", + "description": "Response to create a short Dynamic Link." + }, + "AnalyticsInfo": { + "description": "Tracking parameters supported by Dynamic Link.", + "type": "object", + "properties": { + "googlePlayAnalytics": { + "$ref": "GooglePlayAnalytics", + "description": "Google Play Campaign Measurements." + }, + "itunesConnectAnalytics": { + "description": "iTunes Connect App Analytics.", + "$ref": "ITunesConnectAnalytics" + } + }, + "id": "AnalyticsInfo" } }, + "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "protocol": "rest", "canonicalName": "Firebase Dynamic Links", "auth": { "oauth2": { @@ -343,23 +348,35 @@ "https://www.googleapis.com/auth/firebase" ], "flatPath": "v1/shortLinks", - "path": "v1/shortLinks", "id": "firebasedynamiclinks.shortLinks.create", + "path": "v1/shortLinks", "request": { "$ref": "CreateShortDynamicLinkRequest" }, "description": "Creates a short Dynamic Link given either a valid long Dynamic Link or\ndetails such as Dynamic Link domain, Android and iOS app information.\nThe created short Dynamic Link will not expire.\n\nRepeated calls with the same long Dynamic Link or Dynamic Link information\nwill produce the same short Dynamic Link.\n\nThe Dynamic Link domain in the request must be owned by requester's\nFirebase project.", + "httpMethod": "POST", + "parameterOrder": [], "response": { "$ref": "CreateShortDynamicLinkResponse" - }, - "parameterOrder": [], - "httpMethod": "POST" + } } } } }, "parameters": { + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, "$.xgafv": { + "description": "V1 error format.", + "type": "string", "enumDescriptions": [ "v1 error format", "v2 error format" @@ -368,9 +385,7 @@ "enum": [ "1", "2" - ], - "description": "V1 error format.", - "type": "string" + ] }, "callback": { "description": "JSONP", @@ -393,14 +408,14 @@ "proto" ] }, - "access_token": { - "description": "OAuth access token.", + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "type": "string", "location": "query" }, - "key": { + "access_token": { "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "description": "OAuth access token.", "type": "string" }, "quotaUser": { @@ -425,36 +440,21 @@ "type": "string" }, "upload_protocol": { + "location": "query", "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "type": "string" }, "prettyPrint": { - "description": "Returns response with indentations and line breaks.", "type": "boolean", "default": "true", - "location": "query" - }, - "fields": { - "location": "query", - "description": "Selector specifying which fields to include in a partial response.", - "type": "string" - }, - "uploadType": { "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" + "description": "Returns response with indentations and line breaks." } }, "version": "v1", "baseUrl": "https://firebasedynamiclinks.googleapis.com/", - "servicePath": "", - "description": "Firebase Dynamic Links API enables third party developers to programmatically create and manage Dynamic Links.", "kind": "discovery#restDescription", - "basePath": "", - "id": "firebasedynamiclinks:v1", - "documentationLink": "https://firebase.google.com/docs/dynamic-links/", - "revision": "20170119", - "discoveryVersion": "v1", - "version_module": "True" + "description": "Firebase Dynamic Links API enables third party developers to programmatically create and manage Dynamic Links.", + "servicePath": "", + "basePath": "" } diff --git a/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-gen.go b/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-gen.go index e03f30a9e..592bdf861 100644 --- a/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-gen.go +++ b/vendor/google.golang.org/api/firebasedynamiclinks/v1/firebasedynamiclinks-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only ShortLinks *ShortLinksService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewShortLinksService(s *Service) *ShortLinksService { rs := &ShortLinksService{s: s} return rs @@ -691,6 +696,7 @@ func (c *ShortLinksCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createshortdynamiclinkrequest) if err != nil { diff --git a/vendor/google.golang.org/api/firebaserules/v1/firebaserules-api.json b/vendor/google.golang.org/api/firebaserules/v1/firebaserules-api.json index 0d5f9b8ca..4e945e8bc 100644 --- a/vendor/google.golang.org/api/firebaserules/v1/firebaserules-api.json +++ b/vendor/google.golang.org/api/firebaserules/v1/firebaserules-api.json @@ -1,5 +1,5 @@ { - "id": "firebaserules:v1", + "canonicalName": "Firebase Rules API", "auth": { "oauth2": { "scopes": { @@ -15,403 +15,483 @@ } } }, - "description": "Creates and manages rules that determine when a Firebase Rules-enabled service should permit a request.\n", - "protocol": "rest", + "rootUrl": "https://firebaserules.googleapis.com/", + "ownerDomain": "google.com", + "name": "firebaserules", + "batchPath": "batch", "title": "Firebase Rules API", + "ownerName": "Google", "resources": { "projects": { + "methods": { + "test": { + "request": { + "$ref": "TestRulesetRequest" + }, + "description": "Test `Source` for syntactic and semantic correctness. Issues present in the\nrules, if any, will be returned to the caller with a description, severity,\nand source location.\n\nThe test method will typically be executed with a developer provided\n`Source`, but if regression testing is desired, this method may be\nexecuted against a `Ruleset` resource name and the `Source` will be\nretrieved from the persisted `Ruleset`.\n\nThe following is an example of `Source` that permits users to upload images\nto a bucket bearing their user id and matching the correct metadata:\n\n_*Example*_\n\n // Users are allowed to subscribe and unsubscribe to the blog.\n service firebase.storage {\n match /users/{userId}/images/{imageName} {\n allow write: if userId == request.userId\n && (imageName.endsWith('.png') || imageName.endsWith('.jpg'))\n && resource.mimeType.startsWith('image/')\n }\n }", + "response": { + "$ref": "TestRulesetResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "parameters": { + "name": { + "description": "Name of the project.\n\nFormat: `projects/{project_id}`", + "required": true, + "type": "string", + "pattern": "^projects/.+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/firebase", + "https://www.googleapis.com/auth/firebase.readonly" + ], + "flatPath": "v1/projects/{projectsId}:test", + "path": "v1/{+name}:test", + "id": "firebaserules.projects.test" + } + }, "resources": { "rulesets": { "methods": { - "create": { - "id": "firebaserules.projects.rulesets.create", - "response": { - "$ref": "Ruleset" - }, + "get": { + "description": "Get a `Ruleset` by name including the full `Source` contents.", + "httpMethod": "GET", "parameterOrder": [ "name" ], - "description": "Create a `Ruleset` from `Source`.\n\nThe `Ruleset` is given a unique generated name which is returned to the\ncaller. `Source` containing syntactic or semantics errors will result in an\nerror response indicating the first error encountered. For a detailed view\nof `Source` issues, use TestRuleset.", - "request": { + "response": { "$ref": "Ruleset" }, - "flatPath": "v1/projects/{projectsId}/rulesets", - "httpMethod": "POST", "parameters": { "name": { - "description": "Resource name for Project which owns this `Ruleset`.\n\nFormat: `projects/{project_id}`", - "required": true, - "pattern": "^projects/[^/]+$", + "pattern": "^projects/[^/]+/rulesets/[^/]+$", "location": "path", + "description": "Resource name for the ruleset to get.\n\nFormat: `projects/{project_id}/rulesets/{ruleset_id}`", + "required": true, "type": "string" } }, - "path": "v1/{+name}/rulesets", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase" - ] - }, - "get": { + "https://www.googleapis.com/auth/firebase", + "https://www.googleapis.com/auth/firebase.readonly" + ], + "flatPath": "v1/projects/{projectsId}/rulesets/{rulesetsId}", "id": "firebaserules.projects.rulesets.get", - "response": { - "$ref": "Ruleset" - }, + "path": "v1/{+name}" + }, + "list": { + "httpMethod": "GET", "parameterOrder": [ "name" ], - "description": "Get a `Ruleset` by name including the full `Source` contents.", - "flatPath": "v1/projects/{projectsId}/rulesets/{rulesetsId}", - "httpMethod": "GET", + "response": { + "$ref": "ListRulesetsResponse" + }, "parameters": { "name": { - "description": "Resource name for the ruleset to get.\n\nFormat: `projects/{project_id}/rulesets/{ruleset_id}`", - "required": true, - "pattern": "^projects/[^/]+/rulesets/[^/]+$", "location": "path", - "type": "string" + "description": "Resource name for the project.\n\nFormat: `projects/{project_id}`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + }, + "pageToken": { + "description": "Next page token for loading the next batch of `Ruleset` instances.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Page size to load. Maximum of 100. Defaults to 10.\nNote: `page_size` is just a hint and the service may choose to load less\nthan `page_size` due to the size of the output. To traverse all of the\nreleases, caller should iterate until the `page_token` is empty.", + "format": "int32", + "type": "integer" } }, - "path": "v1/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/firebase", "https://www.googleapis.com/auth/firebase.readonly" - ] - }, - "list": { + ], + "flatPath": "v1/projects/{projectsId}/rulesets", "id": "firebaserules.projects.rulesets.list", - "response": { - "$ref": "ListRulesetsResponse" - }, + "path": "v1/{+name}/rulesets", + "description": "List `Ruleset` metadata only and optionally filter the results by Ruleset\nname.\n\nThe full `Source` contents of a `Ruleset` may be retrieved with\nGetRuleset." + }, + "create": { + "httpMethod": "POST", "parameterOrder": [ "name" ], - "description": "List `Ruleset` metadata only and optionally filter the results by Ruleset\nname.\n\nThe full `Source` contents of a `Ruleset` may be retrieved with\nGetRuleset.", - "flatPath": "v1/projects/{projectsId}/rulesets", - "httpMethod": "GET", + "response": { + "$ref": "Ruleset" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/firebase" + ], "parameters": { - "pageSize": { - "description": "Page size to load. Maximum of 100. Defaults to 10.\nNote: `page_size` is just a hint and the service may choose to load less\nthan `page_size` due to the size of the output. To traverse all of the\nreleases, caller should iterate until the `page_token` is empty.", - "location": "query", - "type": "integer", - "format": "int32" - }, "name": { - "description": "Resource name for the project.\n\nFormat: `projects/{project_id}`", - "required": true, "pattern": "^projects/[^/]+$", "location": "path", - "type": "string" - }, - "pageToken": { - "description": "Next page token for loading the next batch of `Ruleset` instances.", - "location": "query", + "description": "Resource name for Project which owns this `Ruleset`.\n\nFormat: `projects/{project_id}`", + "required": true, "type": "string" } }, + "flatPath": "v1/projects/{projectsId}/rulesets", + "id": "firebaserules.projects.rulesets.create", "path": "v1/{+name}/rulesets", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase", - "https://www.googleapis.com/auth/firebase.readonly" - ] + "description": "Create a `Ruleset` from `Source`.\n\nThe `Ruleset` is given a unique generated name which is returned to the\ncaller. `Source` containing syntactic or semantics errors will result in an\nerror response indicating the first error encountered. For a detailed view\nof `Source` issues, use TestRuleset.", + "request": { + "$ref": "Ruleset" + } }, "delete": { - "id": "firebaserules.projects.rulesets.delete", "response": { "$ref": "Empty" }, "parameterOrder": [ "name" ], - "description": "Delete a `Ruleset` by resource name.\n\nIf the `Ruleset` is referenced by a `Release` the operation will fail.", - "flatPath": "v1/projects/{projectsId}/rulesets/{rulesetsId}", "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/firebase" + ], "parameters": { "name": { - "description": "Resource name for the ruleset to delete.\n\nFormat: `projects/{project_id}/rulesets/{ruleset_id}`", - "required": true, "pattern": "^projects/[^/]+/rulesets/[^/]+$", "location": "path", + "description": "Resource name for the ruleset to delete.\n\nFormat: `projects/{project_id}/rulesets/{ruleset_id}`", + "required": true, "type": "string" } }, + "flatPath": "v1/projects/{projectsId}/rulesets/{rulesetsId}", "path": "v1/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase" - ] + "id": "firebaserules.projects.rulesets.delete", + "description": "Delete a `Ruleset` by resource name.\n\nIf the `Ruleset` is referenced by a `Release` the operation will fail." } } }, "releases": { "methods": { - "update": { - "id": "firebaserules.projects.releases.update", + "delete": { + "description": "Delete a `Release` by resource name.", "response": { - "$ref": "Release" + "$ref": "Empty" }, "parameterOrder": [ "name" ], - "description": "Update a `Release`.\n\nOnly updates to the `ruleset_name` field will be honored. `Release` rename\nis not supported. To create a `Release` use the CreateRelease method\ninstead.", - "request": { - "$ref": "Release" - }, - "flatPath": "v1/projects/{projectsId}/releases/{releasesId}", - "httpMethod": "PUT", - "parameters": { - "name": { - "description": "Resource name for the `Release`.\n\n`Release` names may be structured `app1/prod/v2` or flat `app1_prod_v2`\nwhich affords developers a great deal of flexibility in mapping the name\nto the style that best fits their existing development practices. For\nexample, a name could refer to an environment, an app, a version, or some\ncombination of three.\n\nIn the table below, for the project name `projects/foo`, the following\nrelative release paths show how flat and structured names might be chosen\nto match a desired development / deployment strategy.\n\nUse Case | Flat Name | Structured Name\n-------------|---------------------|----------------\nEnvironments | releases/qa | releases/qa\nApps | releases/app1_qa | releases/app1/qa\nVersions | releases/app1_v2_qa | releases/app1/v2/qa\n\nThe delimiter between the release name path elements can be almost anything\nand it should work equally well with the release name list filter, but in\nmany ways the structured paths provide a clearer picture of the\nrelationship between `Release` instances.\n\nFormat: `projects/{project_id}/releases/{release_id}`", - "required": true, - "pattern": "^projects/[^/]+/releases/.+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+name}", + "httpMethod": "DELETE", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/firebase" - ] - }, - "create": { - "id": "firebaserules.projects.releases.create", - "response": { - "$ref": "Release" - }, - "parameterOrder": [ - "name" ], - "description": "Create a `Release`.\n\nRelease names should reflect the developer's deployment practices. For\nexample, the release name may include the environment name, application\nname, application version, or any other name meaningful to the developer.\nOnce a `Release` refers to a `Ruleset`, the rules can be enforced by\nFirebase Rules-enabled services.\n\nMore than one `Release` may be 'live' concurrently. Consider the following\nthree `Release` names for `projects/foo` and the `Ruleset` to which they\nrefer.\n\nRelease Name | Ruleset Name\n--------------------------------|-------------\nprojects/foo/releases/prod | projects/foo/rulesets/uuid123\nprojects/foo/releases/prod/beta | projects/foo/rulesets/uuid123\nprojects/foo/releases/prod/v23 | projects/foo/rulesets/uuid456\n\nThe table reflects the `Ruleset` rollout in progress. The `prod` and\n`prod/beta` releases refer to the same `Ruleset`. However, `prod/v23`\nrefers to a new `Ruleset`. The `Ruleset` reference for a `Release` may be\nupdated using the UpdateRelease method, and the custom `Release` name\nmay be referenced by specifying the `X-Firebase-Rules-Release-Name` header.", - "request": { - "$ref": "Release" - }, - "flatPath": "v1/projects/{projectsId}/releases", - "httpMethod": "POST", "parameters": { "name": { - "description": "Resource name for the project which owns this `Release`.\n\nFormat: `projects/{project_id}`", + "description": "Resource name for the `Release` to delete.\n\nFormat: `projects/{project_id}/releases/{release_id}`", "required": true, - "pattern": "^projects/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/releases/.+$", + "location": "path" } }, - "path": "v1/{+name}/releases", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase" - ] + "flatPath": "v1/projects/{projectsId}/releases/{releasesId}", + "path": "v1/{+name}", + "id": "firebaserules.projects.releases.delete" }, "get": { - "id": "firebaserules.projects.releases.get", + "description": "Get a `Release` by name.", "response": { "$ref": "Release" }, "parameterOrder": [ "name" ], - "description": "Get a `Release` by name.", - "flatPath": "v1/projects/{projectsId}/releases/{releasesId}", "httpMethod": "GET", "parameters": { "name": { "description": "Resource name of the `Release`.\n\n\nFormat: `projects/{project_id}/releases/{release_id}`", "required": true, + "type": "string", "pattern": "^projects/[^/]+/releases/.+$", - "location": "path", - "type": "string" + "location": "path" } }, - "path": "v1/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/firebase", "https://www.googleapis.com/auth/firebase.readonly" - ] + ], + "flatPath": "v1/projects/{projectsId}/releases/{releasesId}", + "path": "v1/{+name}", + "id": "firebaserules.projects.releases.get" }, "list": { "id": "firebaserules.projects.releases.list", + "path": "v1/{+name}/releases", + "description": "List the `Release` values for a project. This list may optionally be\nfiltered by `Release` name or `Ruleset` id or both.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], "response": { "$ref": "ListReleasesResponse" }, - "parameterOrder": [ - "name" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/firebase", + "https://www.googleapis.com/auth/firebase.readonly" ], - "description": "List the `Release` values for a project. This list may optionally be\nfiltered by `Release` name or `Ruleset` id or both.", - "flatPath": "v1/projects/{projectsId}/releases", - "httpMethod": "GET", "parameters": { "pageSize": { - "description": "Page size to load. Maximum of 100. Defaults to 10.\nNote: `page_size` is just a hint and the service may choose to load less\nthan `page_size` due to the size of the output. To traverse all of the\nreleases, caller should iterate until the `page_token` is empty.", "location": "query", - "type": "integer", - "format": "int32" + "description": "Page size to load. Maximum of 100. Defaults to 10.\nNote: `page_size` is just a hint and the service may choose to load less\nthan `page_size` due to the size of the output. To traverse all of the\nreleases, caller should iterate until the `page_token` is empty.", + "format": "int32", + "type": "integer" }, "filter": { "description": "`Release` filter. The list method supports filters with restrictions on the\n`Release` `name` and also on the `Ruleset` `ruleset_name`.\n\nExample 1) A filter of 'name=prod*' might return `Release`s with names\nwithin 'projects/foo' prefixed with 'prod':\n\nName | Ruleset Name\n------------------------------|-------------\nprojects/foo/releases/prod | projects/foo/rulesets/uuid1234\nprojects/foo/releases/prod/v1 | projects/foo/rulesets/uuid1234\nprojects/foo/releases/prod/v2 | projects/foo/rulesets/uuid8888\n\nExample 2) A filter of `name=prod* ruleset_name=uuid1234` would return only\n`Release` instances for 'projects/foo' with names prefixed with 'prod'\nreferring to the same `Ruleset` name of 'uuid1234':\n\nName | Ruleset Name\n------------------------------|-------------\nprojects/foo/releases/prod | projects/foo/rulesets/1234\nprojects/foo/releases/prod/v1 | projects/foo/rulesets/1234\n\nIn the examples, the filter parameters refer to the search filters for\nrelease and ruleset names are relative to the project releases and rulesets\ncollections. Fully qualified prefixed may also be used. e.g.\n`name=projects/foo/releases/prod* ruleset_name=projects/foo/rulesets/uuid1`", - "location": "query", - "type": "string" + "type": "string", + "location": "query" }, "name": { - "description": "Resource name for the project.\n\nFormat: `projects/{project_id}`", "required": true, + "type": "string", "pattern": "^projects/[^/]+$", "location": "path", - "type": "string" + "description": "Resource name for the project.\n\nFormat: `projects/{project_id}`" }, "pageToken": { - "description": "Next page token for the next batch of `Release` instances.", "location": "query", + "description": "Next page token for the next batch of `Release` instances.", "type": "string" } }, - "path": "v1/{+name}/releases", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase", - "https://www.googleapis.com/auth/firebase.readonly" - ] + "flatPath": "v1/projects/{projectsId}/releases" }, - "delete": { - "id": "firebaserules.projects.releases.delete", + "update": { + "path": "v1/{+name}", + "id": "firebaserules.projects.releases.update", + "description": "Update a `Release`.\n\nOnly updates to the `ruleset_name` field will be honored. `Release` rename\nis not supported. To create a `Release` use the CreateRelease method\ninstead.", + "request": { + "$ref": "Release" + }, "response": { - "$ref": "Empty" + "$ref": "Release" }, "parameterOrder": [ "name" ], - "description": "Delete a `Release` by resource name.", - "flatPath": "v1/projects/{projectsId}/releases/{releasesId}", - "httpMethod": "DELETE", + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/firebase" + ], "parameters": { "name": { - "description": "Resource name for the `Release` to delete.\n\nFormat: `projects/{project_id}/releases/{release_id}`", - "required": true, "pattern": "^projects/[^/]+/releases/.+$", "location": "path", + "description": "Resource name for the `Release`.\n\n`Release` names may be structured `app1/prod/v2` or flat `app1_prod_v2`\nwhich affords developers a great deal of flexibility in mapping the name\nto the style that best fits their existing development practices. For\nexample, a name could refer to an environment, an app, a version, or some\ncombination of three.\n\nIn the table below, for the project name `projects/foo`, the following\nrelative release paths show how flat and structured names might be chosen\nto match a desired development / deployment strategy.\n\nUse Case | Flat Name | Structured Name\n-------------|---------------------|----------------\nEnvironments | releases/qa | releases/qa\nApps | releases/app1_qa | releases/app1/qa\nVersions | releases/app1_v2_qa | releases/app1/v2/qa\n\nThe delimiter between the release name path elements can be almost anything\nand it should work equally well with the release name list filter, but in\nmany ways the structured paths provide a clearer picture of the\nrelationship between `Release` instances.\n\nFormat: `projects/{project_id}/releases/{release_id}`", + "required": true, "type": "string" } }, - "path": "v1/{+name}", + "flatPath": "v1/projects/{projectsId}/releases/{releasesId}" + }, + "create": { + "description": "Create a `Release`.\n\nRelease names should reflect the developer's deployment practices. For\nexample, the release name may include the environment name, application\nname, application version, or any other name meaningful to the developer.\nOnce a `Release` refers to a `Ruleset`, the rules can be enforced by\nFirebase Rules-enabled services.\n\nMore than one `Release` may be 'live' concurrently. Consider the following\nthree `Release` names for `projects/foo` and the `Ruleset` to which they\nrefer.\n\nRelease Name | Ruleset Name\n--------------------------------|-------------\nprojects/foo/releases/prod | projects/foo/rulesets/uuid123\nprojects/foo/releases/prod/beta | projects/foo/rulesets/uuid123\nprojects/foo/releases/prod/v23 | projects/foo/rulesets/uuid456\n\nThe table reflects the `Ruleset` rollout in progress. The `prod` and\n`prod/beta` releases refer to the same `Ruleset`. However, `prod/v23`\nrefers to a new `Ruleset`. The `Ruleset` reference for a `Release` may be\nupdated using the UpdateRelease method, and the custom `Release` name\nmay be referenced by specifying the `X-Firebase-Rules-Release-Name` header.", + "request": { + "$ref": "Release" + }, + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Release" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/firebase" - ] + ], + "parameters": { + "name": { + "description": "Resource name for the project which owns this `Release`.\n\nFormat: `projects/{project_id}`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/releases", + "id": "firebaserules.projects.releases.create", + "path": "v1/{+name}/releases" } } } - }, - "methods": { - "test": { - "id": "firebaserules.projects.test", - "response": { - "$ref": "TestRulesetResponse" - }, - "parameterOrder": [ - "name" - ], - "description": "Test `Source` for syntactic and semantic correctness. Issues present in the\nrules, if any, will be returned to the caller with a description, severity,\nand source location.\n\nThe test method will typically be executed with a developer provided\n`Source`, but if regression testing is desired, this method may be\nexecuted against a `Ruleset` resource name and the `Source` will be\nretrieved from the persisted `Ruleset`.\n\nThe following is an example of `Source` that permits users to upload images\nto a bucket bearing their user id and matching the correct metadata:\n\n_*Example*_\n\n // Users are allowed to subscribe and unsubscribe to the blog.\n service firebase.storage {\n match /users/{userId}/images/{imageName} {\n allow write: if userId == request.userId\n && (imageName.endsWith('.png') || imageName.endsWith('.jpg'))\n && resource.mimeType.startsWith('image/')\n }\n }", - "request": { - "$ref": "TestRulesetRequest" - }, - "flatPath": "v1/projects/{projectsId}:test", - "httpMethod": "POST", - "parameters": { - "name": { - "description": "Name of the project.\n\nFormat: `projects/{project_id}`", - "required": true, - "pattern": "^projects/.+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+name}:test", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase", - "https://www.googleapis.com/auth/firebase.readonly" - ] - } } } }, - "schemas": { - "Release": { - "description": "`Release` is a named reference to a `Ruleset`. Once a `Release` refers to a\n`Ruleset`, rules-enabled services will be able to enforce the `Ruleset`.", - "type": "object", - "properties": { - "updateTime": { - "description": "Time the release was updated.\n@OutputOnly", - "type": "string", - "format": "google-datetime" - }, - "createTime": { - "description": "Time the release was created.\n@OutputOnly", - "type": "string", - "format": "google-datetime" - }, - "name": { - "description": "Resource name for the `Release`.\n\n`Release` names may be structured `app1/prod/v2` or flat `app1_prod_v2`\nwhich affords developers a great deal of flexibility in mapping the name\nto the style that best fits their existing development practices. For\nexample, a name could refer to an environment, an app, a version, or some\ncombination of three.\n\nIn the table below, for the project name `projects/foo`, the following\nrelative release paths show how flat and structured names might be chosen\nto match a desired development / deployment strategy.\n\nUse Case | Flat Name | Structured Name\n-------------|---------------------|----------------\nEnvironments | releases/qa | releases/qa\nApps | releases/app1_qa | releases/app1/qa\nVersions | releases/app1_v2_qa | releases/app1/v2/qa\n\nThe delimiter between the release name path elements can be almost anything\nand it should work equally well with the release name list filter, but in\nmany ways the structured paths provide a clearer picture of the\nrelationship between `Release` instances.\n\nFormat: `projects/{project_id}/releases/{release_id}`", - "type": "string" - }, - "rulesetName": { - "description": "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must\nexist the `Release` to be created.", - "type": "string" - } - }, - "id": "Release" + "parameters": { + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" }, - "Source": { - "description": "`Source` is one or more `File` messages comprising a logical set of rules.", - "type": "object", - "properties": { - "files": { - "description": "`File` set constituting the `Source` bundle.", - "type": "array", - "items": { - "$ref": "File" - } - } - }, - "id": "Source" + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] }, + "callback": { + "type": "string", + "location": "query", + "description": "JSONP" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "bearer_token": { + "type": "string", + "location": "query", + "description": "OAuth bearer token." + } + }, + "version": "v1", + "baseUrl": "https://firebaserules.googleapis.com/", + "description": "Creates and manages rules that determine when a Firebase Rules-enabled service should permit a request.\n", + "kind": "discovery#restDescription", + "servicePath": "", + "basePath": "", + "revision": "20170103", + "id": "firebaserules:v1", + "documentationLink": "https://firebase.google.com/docs/storage/security", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { "SourcePosition": { "description": "Position in the `Source` content including its line, column number, and an\nindex of the `File` in the `Source` message. Used for debug purposes.", "type": "object", "properties": { + "column": { + "description": "First column on the source line associated with the source fragment.", + "format": "int32", + "type": "integer" + }, "fileName": { "description": "Name of the `File`.", "type": "string" }, - "column": { - "description": "First column on the source line associated with the source fragment.", - "type": "integer", - "format": "int32" - }, "line": { "description": "Line number of the source fragment. 1-based.", - "type": "integer", - "format": "int32" + "format": "int32", + "type": "integer" } }, "id": "SourcePosition" }, - "TestRulesetResponse": { - "description": "The response for FirebaseRulesService.TestRuleset.", + "Issue": { + "id": "Issue", + "description": "Issues include warnings, errors, and deprecation notices.", "type": "object", "properties": { - "issues": { - "description": "Syntactic and semantic `Source` issues of varying severity. Issues of\n`ERROR` severity will prevent tests from executing.", - "type": "array", - "items": { - "$ref": "Issue" - } + "severity": { + "enum": [ + "SEVERITY_UNSPECIFIED", + "DEPRECATION", + "WARNING", + "ERROR" + ], + "description": "The severity of the issue.", + "type": "string", + "enumDescriptions": [ + "An unspecified severity.", + "Deprecation issue for statements and method that may no longer be\nsupported or maintained.", + "Warnings such as: unused variables.", + "Errors such as: unmatched curly braces or variable redefinition." + ] + }, + "description": { + "description": "Short error description.", + "type": "string" + }, + "sourcePosition": { + "description": "Position of the issue in the `Source`.", + "$ref": "SourcePosition" } - }, - "id": "TestRulesetResponse" + } }, "Ruleset": { "description": "`Ruleset` is an immutable copy of `Source` with a globally unique identifier\nand a creation time.", @@ -422,8 +502,8 @@ "$ref": "Source" }, "createTime": { - "description": "Time the `Ruleset` was created.\n@OutputOnly", "type": "string", + "description": "Time the `Ruleset` was created.\n@OutputOnly", "format": "google-datetime" }, "name": { @@ -433,216 +513,136 @@ }, "id": "Ruleset" }, - "ListReleasesResponse": { - "description": "The response for FirebaseRulesService.ListReleases.", + "TestRulesetRequest": { "type": "object", "properties": { - "nextPageToken": { - "description": "The pagination token to retrieve the next page of results. If the value is\nempty, no further results remain.", + "source": { + "$ref": "Source", + "description": "`Source` to be checked for correctness." + } + }, + "id": "TestRulesetRequest", + "description": "The request for FirebaseRulesService.TestRuleset." + }, + "Empty": { + "properties": {}, + "id": "Empty", + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object" + }, + "File": { + "type": "object", + "properties": { + "fingerprint": { + "description": "Fingerprint (e.g. github sha) associated with the `File`.", + "format": "byte", "type": "string" }, - "releases": { - "description": "List of `Release` instances.", - "type": "array", - "items": { - "$ref": "Release" - } + "name": { + "description": "File name.", + "type": "string" + }, + "content": { + "description": "Textual Content.", + "type": "string" } }, - "id": "ListReleasesResponse" + "id": "File", + "description": "`File` containing source content." }, - "ListRulesetsResponse": { - "description": "The response for FirebaseRulesService.ListRulesets", + "ListReleasesResponse": { + "id": "ListReleasesResponse", + "description": "The response for FirebaseRulesService.ListReleases.", "type": "object", "properties": { - "rulesets": { - "description": "List of `Ruleset` instances.", + "releases": { + "description": "List of `Release` instances.", "type": "array", "items": { - "$ref": "Ruleset" + "$ref": "Release" } }, "nextPageToken": { "description": "The pagination token to retrieve the next page of results. If the value is\nempty, no further results remain.", "type": "string" } - }, - "id": "ListRulesetsResponse" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" + } }, - "File": { - "description": "`File` containing source content.", + "Release": { + "description": "`Release` is a named reference to a `Ruleset`. Once a `Release` refers to a\n`Ruleset`, rules-enabled services will be able to enforce the `Ruleset`.", "type": "object", "properties": { - "content": { - "description": "Textual Content.", + "createTime": { + "description": "Time the release was created.\n@OutputOnly", + "format": "google-datetime", + "type": "string" + }, + "updateTime": { + "description": "Time the release was updated.\n@OutputOnly", + "format": "google-datetime", "type": "string" }, "name": { - "description": "File name.", + "description": "Resource name for the `Release`.\n\n`Release` names may be structured `app1/prod/v2` or flat `app1_prod_v2`\nwhich affords developers a great deal of flexibility in mapping the name\nto the style that best fits their existing development practices. For\nexample, a name could refer to an environment, an app, a version, or some\ncombination of three.\n\nIn the table below, for the project name `projects/foo`, the following\nrelative release paths show how flat and structured names might be chosen\nto match a desired development / deployment strategy.\n\nUse Case | Flat Name | Structured Name\n-------------|---------------------|----------------\nEnvironments | releases/qa | releases/qa\nApps | releases/app1_qa | releases/app1/qa\nVersions | releases/app1_v2_qa | releases/app1/v2/qa\n\nThe delimiter between the release name path elements can be almost anything\nand it should work equally well with the release name list filter, but in\nmany ways the structured paths provide a clearer picture of the\nrelationship between `Release` instances.\n\nFormat: `projects/{project_id}/releases/{release_id}`", "type": "string" }, - "fingerprint": { - "description": "Fingerprint (e.g. github sha) associated with the `File`.", - "type": "string", - "format": "byte" + "rulesetName": { + "description": "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must\nexist the `Release` to be created.", + "type": "string" } }, - "id": "File" + "id": "Release" }, - "TestRulesetRequest": { - "description": "The request for FirebaseRulesService.TestRuleset.", + "TestRulesetResponse": { + "description": "The response for FirebaseRulesService.TestRuleset.", "type": "object", "properties": { - "source": { - "description": "`Source` to be checked for correctness.", - "$ref": "Source" + "issues": { + "description": "Syntactic and semantic `Source` issues of varying severity. Issues of\n`ERROR` severity will prevent tests from executing.", + "type": "array", + "items": { + "$ref": "Issue" + } } }, - "id": "TestRulesetRequest" + "id": "TestRulesetResponse" }, - "Issue": { - "description": "Issues include warnings, errors, and deprecation notices.", + "ListRulesetsResponse": { + "description": "The response for FirebaseRulesService.ListRulesets", "type": "object", "properties": { - "description": { - "description": "Short error description.", - "type": "string" + "rulesets": { + "description": "List of `Ruleset` instances.", + "type": "array", + "items": { + "$ref": "Ruleset" + } }, - "severity": { - "description": "The severity of the issue.", - "enum": [ - "SEVERITY_UNSPECIFIED", - "DEPRECATION", - "WARNING", - "ERROR" - ], - "enumDescriptions": [ - "An unspecified severity.", - "Deprecation issue for statements and method that may no longer be\nsupported or maintained.", - "Warnings such as: unused variables.", - "Errors such as: unmatched curly braces or variable redefinition." - ], + "nextPageToken": { + "description": "The pagination token to retrieve the next page of results. If the value is\nempty, no further results remain.", "type": "string" - }, - "sourcePosition": { - "description": "Position of the issue in the `Source`.", - "$ref": "SourcePosition" } }, - "id": "Issue" + "id": "ListRulesetsResponse" + }, + "Source": { + "type": "object", + "properties": { + "files": { + "description": "`File` set constituting the `Source` bundle.", + "type": "array", + "items": { + "$ref": "File" + } + } + }, + "id": "Source", + "description": "`Source` is one or more `File` messages comprising a logical set of rules." } }, - "revision": "20170103", - "basePath": "", "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "version_module": "True", - "canonicalName": "Firebase Rules API", - "discoveryVersion": "v1", - "baseUrl": "https://firebaserules.googleapis.com/", - "name": "firebaserules", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - } + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, - "documentationLink": "https://firebase.google.com/docs/storage/security", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1", - "rootUrl": "https://firebaserules.googleapis.com/", - "kind": "discovery#restDescription" + "protocol": "rest" } diff --git a/vendor/google.golang.org/api/firebaserules/v1/firebaserules-gen.go b/vendor/google.golang.org/api/firebaserules/v1/firebaserules-gen.go index 7bad7e0a9..494eac440 100644 --- a/vendor/google.golang.org/api/firebaserules/v1/firebaserules-gen.go +++ b/vendor/google.golang.org/api/firebaserules/v1/firebaserules-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Releases = NewProjectsReleasesService(s) @@ -620,6 +625,7 @@ func (c *ProjectsTestCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testrulesetrequest) if err != nil { @@ -794,6 +800,7 @@ func (c *ProjectsReleasesCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.release) if err != nil { @@ -928,6 +935,7 @@ func (c *ProjectsReleasesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") @@ -1065,6 +1073,7 @@ func (c *ProjectsReleasesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1272,6 +1281,7 @@ func (c *ProjectsReleasesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1447,6 +1457,7 @@ func (c *ProjectsReleasesUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.release) if err != nil { @@ -1591,6 +1602,7 @@ func (c *ProjectsRulesetsCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.ruleset) if err != nil { @@ -1728,6 +1740,7 @@ func (c *ProjectsRulesetsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") @@ -1865,6 +1878,7 @@ func (c *ProjectsRulesetsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2031,6 +2045,7 @@ func (c *ProjectsRulesetsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/fitness/v1/fitness-gen.go b/vendor/google.golang.org/api/fitness/v1/fitness-gen.go index f33c29e97..71ff5e9d6 100644 --- a/vendor/google.golang.org/api/fitness/v1/fitness-gen.go +++ b/vendor/google.golang.org/api/fitness/v1/fitness-gen.go @@ -112,9 +112,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Users *UsersService } @@ -126,6 +127,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewUsersService(s *Service) *UsersService { rs := &UsersService{s: s} rs.DataSources = NewUsersDataSourcesService(s) @@ -1318,6 +1323,7 @@ func (c *UsersDataSourcesCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.datasource) if err != nil { @@ -1460,6 +1466,7 @@ func (c *UsersDataSourcesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/dataSources/{dataSourceId}") @@ -1612,6 +1619,7 @@ func (c *UsersDataSourcesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1785,6 +1793,7 @@ func (c *UsersDataSourcesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1943,6 +1952,7 @@ func (c *UsersDataSourcesPatchCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.datasource) if err != nil { @@ -2098,6 +2108,7 @@ func (c *UsersDataSourcesUpdateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.datasource) if err != nil { @@ -2269,6 +2280,7 @@ func (c *UsersDataSourcesDatasetsDeleteCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/dataSources/{dataSourceId}/datasets/{datasetId}") @@ -2439,6 +2451,7 @@ func (c *UsersDataSourcesDatasetsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2649,6 +2662,7 @@ func (c *UsersDataSourcesDatasetsPatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { @@ -2836,6 +2850,7 @@ func (c *UsersDatasetAggregateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.aggregaterequest) if err != nil { @@ -2993,6 +3008,7 @@ func (c *UsersSessionsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/sessions/{sessionId}") @@ -3147,6 +3163,7 @@ func (c *UsersSessionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3342,6 +3359,7 @@ func (c *UsersSessionsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.session) if err != nil { diff --git a/vendor/google.golang.org/api/fusiontables/v1/fusiontables-gen.go b/vendor/google.golang.org/api/fusiontables/v1/fusiontables-gen.go index c242c94b2..8fa1ee6d3 100644 --- a/vendor/google.golang.org/api/fusiontables/v1/fusiontables-gen.go +++ b/vendor/google.golang.org/api/fusiontables/v1/fusiontables-gen.go @@ -69,9 +69,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Column *ColumnService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewColumnService(s *Service) *ColumnService { rs := &ColumnService{s: s} return rs @@ -1281,6 +1286,7 @@ func (c *ColumnDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/columns/{columnId}") @@ -1397,6 +1403,7 @@ func (c *ColumnGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1534,6 +1541,7 @@ func (c *ColumnInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.column) if err != nil { @@ -1690,6 +1698,7 @@ func (c *ColumnListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1855,6 +1864,7 @@ func (c *ColumnPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.column) if err != nil { @@ -1998,6 +2008,7 @@ func (c *ColumnUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.column) if err != nil { @@ -2152,6 +2163,7 @@ func (c *QuerySqlCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "query") @@ -2326,6 +2338,7 @@ func (c *QuerySqlGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2480,6 +2493,7 @@ func (c *StyleDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/styles/{styleId}") @@ -2597,6 +2611,7 @@ func (c *StyleGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2735,6 +2750,7 @@ func (c *StyleInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.stylesetting) if err != nil { @@ -2891,6 +2907,7 @@ func (c *StyleListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3056,6 +3073,7 @@ func (c *StylePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.stylesetting) if err != nil { @@ -3200,6 +3218,7 @@ func (c *StyleUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.stylesetting) if err != nil { @@ -3347,6 +3366,7 @@ func (c *TableCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/copy") @@ -3476,6 +3496,7 @@ func (c *TableDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}") @@ -3582,6 +3603,7 @@ func (c *TableGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3807,6 +3829,7 @@ func (c *TableImportRowsCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/import") @@ -4090,6 +4113,7 @@ func (c *TableImportTableCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/import") @@ -4285,6 +4309,7 @@ func (c *TableInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -4425,6 +4450,7 @@ func (c *TableListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4586,6 +4612,7 @@ func (c *TablePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -4734,6 +4761,7 @@ func (c *TableUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -4872,6 +4900,7 @@ func (c *TaskDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/tasks/{taskId}") @@ -4987,6 +5016,7 @@ func (c *TaskGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5151,6 +5181,7 @@ func (c *TaskListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5317,6 +5348,7 @@ func (c *TemplateDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/templates/{templateId}") @@ -5434,6 +5466,7 @@ func (c *TemplateGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5572,6 +5605,7 @@ func (c *TemplateInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.template) if err != nil { @@ -5728,6 +5762,7 @@ func (c *TemplateListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5893,6 +5928,7 @@ func (c *TemplatePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.template) if err != nil { @@ -6037,6 +6073,7 @@ func (c *TemplateUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.template) if err != nil { diff --git a/vendor/google.golang.org/api/fusiontables/v2/fusiontables-gen.go b/vendor/google.golang.org/api/fusiontables/v2/fusiontables-gen.go index 3d47880f6..e429a1b95 100644 --- a/vendor/google.golang.org/api/fusiontables/v2/fusiontables-gen.go +++ b/vendor/google.golang.org/api/fusiontables/v2/fusiontables-gen.go @@ -69,9 +69,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Column *ColumnService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewColumnService(s *Service) *ColumnService { rs := &ColumnService{s: s} return rs @@ -1339,6 +1344,7 @@ func (c *ColumnDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/columns/{columnId}") @@ -1455,6 +1461,7 @@ func (c *ColumnGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1592,6 +1599,7 @@ func (c *ColumnInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.column) if err != nil { @@ -1748,6 +1756,7 @@ func (c *ColumnListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1913,6 +1922,7 @@ func (c *ColumnPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.column) if err != nil { @@ -2056,6 +2066,7 @@ func (c *ColumnUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.column) if err != nil { @@ -2216,6 +2227,7 @@ func (c *QuerySqlCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "query") @@ -2393,6 +2405,7 @@ func (c *QuerySqlGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2547,6 +2560,7 @@ func (c *StyleDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/styles/{styleId}") @@ -2664,6 +2678,7 @@ func (c *StyleGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2802,6 +2817,7 @@ func (c *StyleInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.stylesetting) if err != nil { @@ -2958,6 +2974,7 @@ func (c *StyleListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3123,6 +3140,7 @@ func (c *StylePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.stylesetting) if err != nil { @@ -3267,6 +3285,7 @@ func (c *StyleUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.stylesetting) if err != nil { @@ -3414,6 +3433,7 @@ func (c *TableCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/copy") @@ -3543,6 +3563,7 @@ func (c *TableDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}") @@ -3649,6 +3670,7 @@ func (c *TableGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3869,6 +3891,7 @@ func (c *TableImportRowsCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/import") @@ -4152,6 +4175,7 @@ func (c *TableImportTableCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/import") @@ -4347,6 +4371,7 @@ func (c *TableInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -4487,6 +4512,7 @@ func (c *TableListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4648,6 +4674,7 @@ func (c *TablePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -4880,6 +4907,7 @@ func (c *TableReplaceRowsCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/replace") @@ -5107,6 +5135,7 @@ func (c *TableUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { @@ -5246,6 +5275,7 @@ func (c *TaskDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/tasks/{taskId}") @@ -5362,6 +5392,7 @@ func (c *TaskGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5529,6 +5560,7 @@ func (c *TaskListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5697,6 +5729,7 @@ func (c *TemplateDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "tables/{tableId}/templates/{templateId}") @@ -5814,6 +5847,7 @@ func (c *TemplateGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5952,6 +5986,7 @@ func (c *TemplateInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.template) if err != nil { @@ -6108,6 +6143,7 @@ func (c *TemplateListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6273,6 +6309,7 @@ func (c *TemplatePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.template) if err != nil { @@ -6417,6 +6454,7 @@ func (c *TemplateUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.template) if err != nil { diff --git a/vendor/google.golang.org/api/games/v1/games-gen.go b/vendor/google.golang.org/api/games/v1/games-gen.go index 83064f113..fcf798cef 100644 --- a/vendor/google.golang.org/api/games/v1/games-gen.go +++ b/vendor/google.golang.org/api/games/v1/games-gen.go @@ -82,9 +82,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only AchievementDefinitions *AchievementDefinitionsService @@ -124,6 +125,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAchievementDefinitionsService(s *Service) *AchievementDefinitionsService { rs := &AchievementDefinitionsService{s: s} return rs @@ -5117,6 +5122,7 @@ func (c *AchievementDefinitionsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5296,6 +5302,7 @@ func (c *AchievementsIncrementCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/{achievementId}/increment") @@ -5497,6 +5504,7 @@ func (c *AchievementsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5694,6 +5702,7 @@ func (c *AchievementsRevealCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/{achievementId}/reveal") @@ -5835,6 +5844,7 @@ func (c *AchievementsSetStepsAtLeastCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/{achievementId}/setStepsAtLeast") @@ -5983,6 +5993,7 @@ func (c *AchievementsUnlockCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/{achievementId}/unlock") @@ -6121,6 +6132,7 @@ func (c *AchievementsUpdateMultipleCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.achievementupdatemultiplerequest) if err != nil { @@ -6289,6 +6301,7 @@ func (c *ApplicationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6448,6 +6461,7 @@ func (c *ApplicationsPlayedCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "applications/played") @@ -6558,6 +6572,7 @@ func (c *ApplicationsVerifyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6731,6 +6746,7 @@ func (c *EventsListByPlayerCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6931,6 +6947,7 @@ func (c *EventsListDefinitionsCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7106,6 +7123,7 @@ func (c *EventsRecordCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventrecordrequest) if err != nil { @@ -7262,6 +7280,7 @@ func (c *LeaderboardsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7439,6 +7458,7 @@ func (c *LeaderboardsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7616,6 +7636,7 @@ func (c *MetagameGetMetagameConfigCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7781,6 +7802,7 @@ func (c *MetagameListCategoriesByPlayerCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7993,6 +8015,7 @@ func (c *PlayersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8173,6 +8196,7 @@ func (c *PlayersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8365,6 +8389,7 @@ func (c *PushtokensRemoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pushtokenid) if err != nil { @@ -8470,6 +8495,7 @@ func (c *PushtokensUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pushtoken) if err != nil { @@ -8580,6 +8606,7 @@ func (c *QuestMilestonesClaimCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "quests/{questId}/milestones/{milestoneId}/claim") @@ -8713,6 +8740,7 @@ func (c *QuestsAcceptCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "quests/{questId}/accept") @@ -8891,6 +8919,7 @@ func (c *QuestsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9080,6 +9109,7 @@ func (c *RevisionsCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9225,6 +9255,7 @@ func (c *RoomsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.roomcreaterequest) if err != nil { @@ -9371,6 +9402,7 @@ func (c *RoomsDeclineCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "rooms/{roomId}/decline") @@ -9514,6 +9546,7 @@ func (c *RoomsDismissCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "rooms/{roomId}/dismiss") @@ -9641,6 +9674,7 @@ func (c *RoomsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9796,6 +9830,7 @@ func (c *RoomsJoinCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.roomjoinrequest) if err != nil { @@ -9956,6 +9991,7 @@ func (c *RoomsLeaveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.roomleaverequest) if err != nil { @@ -10138,6 +10174,7 @@ func (c *RoomsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10316,6 +10353,7 @@ func (c *RoomsReportStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.roomp2pstatuses) if err != nil { @@ -10523,6 +10561,7 @@ func (c *ScoresGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10783,6 +10822,7 @@ func (c *ScoresListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11053,6 +11093,7 @@ func (c *ScoresListWindowCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11295,6 +11336,7 @@ func (c *ScoresSubmitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "leaderboards/{leaderboardId}/scores") @@ -11458,6 +11500,7 @@ func (c *ScoresSubmitMultipleCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.playerscoresubmissionlist) if err != nil { @@ -11614,6 +11657,7 @@ func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11795,6 +11839,7 @@ func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11975,6 +12020,7 @@ func (c *TurnBasedMatchesCancelCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/{matchId}/cancel") @@ -12091,6 +12137,7 @@ func (c *TurnBasedMatchesCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.turnbasedmatchcreaterequest) if err != nil { @@ -12236,6 +12283,7 @@ func (c *TurnBasedMatchesDeclineCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/{matchId}/decline") @@ -12380,6 +12428,7 @@ func (c *TurnBasedMatchesDismissCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/{matchId}/dismiss") @@ -12500,6 +12549,7 @@ func (c *TurnBasedMatchesFinishCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.turnbasedmatchresults) if err != nil { @@ -12675,6 +12725,7 @@ func (c *TurnBasedMatchesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12832,6 +12883,7 @@ func (c *TurnBasedMatchesJoinCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/{matchId}/join") @@ -12982,6 +13034,7 @@ func (c *TurnBasedMatchesLeaveCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/{matchId}/leave") @@ -13143,6 +13196,7 @@ func (c *TurnBasedMatchesLeaveTurnCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/{matchId}/leaveTurn") @@ -13350,6 +13404,7 @@ func (c *TurnBasedMatchesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13549,6 +13604,7 @@ func (c *TurnBasedMatchesRematchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/{matchId}/rematch") @@ -13752,6 +13808,7 @@ func (c *TurnBasedMatchesSyncCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13941,6 +13998,7 @@ func (c *TurnBasedMatchesTakeTurnCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.turnbasedmatchturn) if err != nil { diff --git a/vendor/google.golang.org/api/gamesconfiguration/v1configuration/gamesconfiguration-gen.go b/vendor/google.golang.org/api/gamesconfiguration/v1configuration/gamesconfiguration-gen.go index 9aa15d137..6420515da 100644 --- a/vendor/google.golang.org/api/gamesconfiguration/v1configuration/gamesconfiguration-gen.go +++ b/vendor/google.golang.org/api/gamesconfiguration/v1configuration/gamesconfiguration-gen.go @@ -63,9 +63,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only AchievementConfigurations *AchievementConfigurationsService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAchievementConfigurationsService(s *Service) *AchievementConfigurationsService { rs := &AchievementConfigurationsService{s: s} return rs @@ -662,6 +667,7 @@ func (c *AchievementConfigurationsDeleteCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/{achievementId}") @@ -769,6 +775,7 @@ func (c *AchievementConfigurationsGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -897,6 +904,7 @@ func (c *AchievementConfigurationsInsertCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.achievementconfiguration) if err != nil { @@ -1056,6 +1064,7 @@ func (c *AchievementConfigurationsListCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1220,6 +1229,7 @@ func (c *AchievementConfigurationsPatchCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.achievementconfiguration) if err != nil { @@ -1354,6 +1364,7 @@ func (c *AchievementConfigurationsUpdateCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.achievementconfiguration) if err != nil { @@ -1541,6 +1552,7 @@ func (c *ImageConfigurationsUploadCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "images/{resourceId}/imageType/{imageType}") @@ -1745,6 +1757,7 @@ func (c *LeaderboardConfigurationsDeleteCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "leaderboards/{leaderboardId}") @@ -1852,6 +1865,7 @@ func (c *LeaderboardConfigurationsGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1980,6 +1994,7 @@ func (c *LeaderboardConfigurationsInsertCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.leaderboardconfiguration) if err != nil { @@ -2139,6 +2154,7 @@ func (c *LeaderboardConfigurationsListCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2303,6 +2319,7 @@ func (c *LeaderboardConfigurationsPatchCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.leaderboardconfiguration) if err != nil { @@ -2437,6 +2454,7 @@ func (c *LeaderboardConfigurationsUpdateCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.leaderboardconfiguration) if err != nil { diff --git a/vendor/google.golang.org/api/gamesmanagement/v1management/gamesmanagement-gen.go b/vendor/google.golang.org/api/gamesmanagement/v1management/gamesmanagement-gen.go index d8ce0870f..fe4faddf7 100644 --- a/vendor/google.golang.org/api/gamesmanagement/v1management/gamesmanagement-gen.go +++ b/vendor/google.golang.org/api/gamesmanagement/v1management/gamesmanagement-gen.go @@ -72,9 +72,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Achievements *AchievementsService @@ -100,6 +101,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAchievementsService(s *Service) *AchievementsService { rs := &AchievementsService{s: s} return rs @@ -854,6 +859,7 @@ func (c *AchievementsResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/{achievementId}/reset") @@ -978,6 +984,7 @@ func (c *AchievementsResetAllCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/reset") @@ -1088,6 +1095,7 @@ func (c *AchievementsResetAllForAllPlayersCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/resetAllForAllPlayers") @@ -1172,6 +1180,7 @@ func (c *AchievementsResetForAllPlayersCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "achievements/{achievementId}/resetForAllPlayers") @@ -1270,6 +1279,7 @@ func (c *AchievementsResetMultipleForAllPlayersCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.achievementresetmultipleforallrequest) if err != nil { @@ -1389,6 +1399,7 @@ func (c *ApplicationsListHiddenCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1553,6 +1564,7 @@ func (c *EventsResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "events/{eventId}/reset") @@ -1650,6 +1662,7 @@ func (c *EventsResetAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "events/reset") @@ -1732,6 +1745,7 @@ func (c *EventsResetAllForAllPlayersCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "events/resetAllForAllPlayers") @@ -1817,6 +1831,7 @@ func (c *EventsResetForAllPlayersCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "events/{eventId}/resetForAllPlayers") @@ -1916,6 +1931,7 @@ func (c *EventsResetMultipleForAllPlayersCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.eventsresetmultipleforallrequest) if err != nil { @@ -2010,6 +2026,7 @@ func (c *PlayersHideCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "applications/{applicationId}/players/hidden/{playerId}") @@ -2118,6 +2135,7 @@ func (c *PlayersUnhideCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "applications/{applicationId}/players/hidden/{playerId}") @@ -2224,6 +2242,7 @@ func (c *QuestsResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "quests/{questId}/reset") @@ -2320,6 +2339,7 @@ func (c *QuestsResetAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "quests/reset") @@ -2401,6 +2421,7 @@ func (c *QuestsResetAllForAllPlayersCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "quests/resetAllForAllPlayers") @@ -2485,6 +2506,7 @@ func (c *QuestsResetForAllPlayersCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "quests/{questId}/resetForAllPlayers") @@ -2583,6 +2605,7 @@ func (c *QuestsResetMultipleForAllPlayersCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.questsresetmultipleforallrequest) if err != nil { @@ -2673,6 +2696,7 @@ func (c *RoomsResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "rooms/reset") @@ -2755,6 +2779,7 @@ func (c *RoomsResetForAllPlayersCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "rooms/resetForAllPlayers") @@ -2839,6 +2864,7 @@ func (c *ScoresResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "leaderboards/{leaderboardId}/scores/reset") @@ -2963,6 +2989,7 @@ func (c *ScoresResetAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "scores/reset") @@ -3073,6 +3100,7 @@ func (c *ScoresResetAllForAllPlayersCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "scores/resetAllForAllPlayers") @@ -3157,6 +3185,7 @@ func (c *ScoresResetForAllPlayersCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "leaderboards/{leaderboardId}/scores/resetForAllPlayers") @@ -3256,6 +3285,7 @@ func (c *ScoresResetMultipleForAllPlayersCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.scoresresetmultipleforallrequest) if err != nil { @@ -3345,6 +3375,7 @@ func (c *TurnBasedMatchesResetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/reset") @@ -3428,6 +3459,7 @@ func (c *TurnBasedMatchesResetForAllPlayersCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "turnbasedmatches/resetForAllPlayers") diff --git a/vendor/google.golang.org/api/genomics/v1/genomics-api.json b/vendor/google.golang.org/api/genomics/v1/genomics-api.json index c8823d872..39f255b89 100644 --- a/vendor/google.golang.org/api/genomics/v1/genomics-api.json +++ b/vendor/google.golang.org/api/genomics/v1/genomics-api.json @@ -1,1699 +1,1594 @@ { + "rootUrl": "https://genomics.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "genomics", + "batchPath": "batch", + "revision": "20170209", "id": "genomics:v1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/devstorage.read_write": { - "description": "Manage your data in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/bigquery": { - "description": "View and manage your data in Google BigQuery" - }, - "https://www.googleapis.com/auth/genomics": { - "description": "View and manage Genomics data" - }, - "https://www.googleapis.com/auth/genomics.readonly": { - "description": "View Genomics data" - } - } - } - }, - "description": "Upload, process, query, and search Genomics data in the cloud.", - "protocol": "rest", + "documentationLink": "https://cloud.google.com/genomics", "title": "Genomics API", + "ownerName": "Google", + "discoveryVersion": "v1", "resources": { - "callsets": { + "datasets": { "methods": { - "create": { - "id": "genomics.callsets.create", - "response": { - "$ref": "CallSet" - }, - "parameterOrder": [], - "description": "Creates a new call set.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "setIamPolicy": { + "description": "Sets the access control policy on the specified dataset. Replaces any\nexisting policy.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nSee \u003ca href=\"/iam/docs/managing-policies#setting_a_policy\"\u003eSetting a\nPolicy\u003c/a\u003e for more information.", "request": { - "$ref": "CallSet" + "$ref": "SetIamPolicyRequest" }, - "flatPath": "v1/callsets", + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], "httpMethod": "POST", - "parameters": {}, - "path": "v1/callsets", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] - }, - "get": { - "id": "genomics.callsets.get", - "response": { - "$ref": "CallSet" - }, - "parameterOrder": [ - "callSetId" ], - "description": "Gets a call set by ID.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/callsets/{callSetId}", - "httpMethod": "GET", "parameters": { - "callSetId": { - "description": "The ID of the call set.", - "required": true, + "resource": { + "pattern": "^datasets/[^/]+$", "location": "path", + "description": "REQUIRED: The resource for which policy is being specified. Format is\n`datasets/\u003cdataset ID\u003e`.", + "required": true, "type": "string" } }, - "path": "v1/callsets/{callSetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] + "flatPath": "v1/datasets/{datasetsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "genomics.datasets.setIamPolicy" }, - "search": { - "id": "genomics.callsets.search", + "create": { + "path": "v1/datasets", + "id": "genomics.datasets.create", + "description": "Creates a new dataset.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "request": { + "$ref": "Dataset" + }, "response": { - "$ref": "SearchCallSetsResponse" + "$ref": "Dataset" }, "parameterOrder": [], - "description": "Gets a list of call sets matching the criteria.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchCallSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L178).", - "request": { - "$ref": "SearchCallSetsRequest" - }, - "flatPath": "v1/callsets/search", "httpMethod": "POST", - "parameters": {}, - "path": "v1/callsets/search", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] + "https://www.googleapis.com/auth/genomics" + ], + "parameters": {}, + "flatPath": "v1/datasets" }, - "patch": { - "id": "genomics.callsets.patch", - "response": { - "$ref": "CallSet" + "getIamPolicy": { + "request": { + "$ref": "GetIamPolicyRequest" }, + "description": "Gets the access control policy for the dataset. This is empty if the\npolicy or resource does not exist.\n\nSee \u003ca href=\"/iam/docs/managing-policies#getting_a_policy\"\u003eGetting a\nPolicy\u003c/a\u003e for more information.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "httpMethod": "POST", "parameterOrder": [ - "callSetId" + "resource" ], - "description": "Updates a call set.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics.", - "request": { - "$ref": "CallSet" + "response": { + "$ref": "Policy" }, - "flatPath": "v1/callsets/{callSetId}", - "httpMethod": "PATCH", "parameters": { - "callSetId": { - "description": "The ID of the call set to be updated.", + "resource": { + "description": "REQUIRED: The resource for which policy is being specified. Format is\n`datasets/\u003cdataset ID\u003e`.", "required": true, - "location": "path", - "type": "string" - }, - "updateMask": { - "description": "An optional mask specifying which fields to update. At this time, the only\nmutable field is name. The only\nacceptable value is \"name\". If unspecified, all mutable fields will be\nupdated.", - "location": "query", "type": "string", - "format": "google-fieldmask" + "pattern": "^datasets/[^/]+$", + "location": "path" } }, - "path": "v1/callsets/{callSetId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] + ], + "flatPath": "v1/datasets/{datasetsId}:getIamPolicy", + "id": "genomics.datasets.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy" }, - "delete": { - "id": "genomics.callsets.delete", + "undelete": { "response": { - "$ref": "Empty" + "$ref": "Dataset" }, "parameterOrder": [ - "callSetId" + "datasetId" ], - "description": "Deletes a call set.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/callsets/{callSetId}", - "httpMethod": "DELETE", + "httpMethod": "POST", "parameters": { - "callSetId": { - "description": "The ID of the call set to be deleted.", + "datasetId": { + "description": "The ID of the dataset to be undeleted.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/callsets/{callSetId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] - } - } - }, - "annotationsets": { - "methods": { - "update": { - "id": "genomics.annotationsets.update", + ], + "flatPath": "v1/datasets/{datasetId}:undelete", + "path": "v1/datasets/{datasetId}:undelete", + "id": "genomics.datasets.undelete", + "request": { + "$ref": "UndeleteDatasetRequest" + }, + "description": "Undeletes a dataset by restoring a dataset which was deleted via this API.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis operation is only possible for a week after the deletion occurred." + }, + "patch": { "response": { - "$ref": "AnnotationSet" + "$ref": "Dataset" }, "parameterOrder": [ - "annotationSetId" + "datasetId" ], - "description": "Updates an annotation set. The update must respect all mutability\nrestrictions and other invariants described on the annotation set resource.\nCaller must have WRITE permission for the associated dataset.", - "request": { - "$ref": "AnnotationSet" - }, - "flatPath": "v1/annotationsets/{annotationSetId}", - "httpMethod": "PUT", + "httpMethod": "PATCH", "parameters": { - "updateMask": { - "description": "An optional mask specifying which fields to update. Mutable fields are\nname,\nsource_uri, and\ninfo. If unspecified, all\nmutable fields will be updated.", - "location": "query", - "type": "string", - "format": "google-fieldmask" - }, - "annotationSetId": { - "description": "The ID of the annotation set to be updated.", - "required": true, + "datasetId": { "location": "path", + "description": "The ID of the dataset to be updated.", + "required": true, "type": "string" + }, + "updateMask": { + "description": "An optional mask specifying which fields to update. At this time, the only\nmutable field is name. The only\nacceptable value is \"name\". If unspecified, all mutable fields will be\nupdated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" } }, - "path": "v1/annotationsets/{annotationSetId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] - }, - "create": { - "id": "genomics.annotationsets.create", - "response": { - "$ref": "AnnotationSet" - }, - "parameterOrder": [], - "description": "Creates a new annotation set. Caller must have WRITE permission for the\nassociated dataset.\n\nThe following fields are required:\n\n * datasetId\n * referenceSetId\n\nAll other fields may be optionally specified, unless documented as being\nserver-generated (for example, the `id` field).", + ], + "flatPath": "v1/datasets/{datasetId}", + "path": "v1/datasets/{datasetId}", + "id": "genomics.datasets.patch", "request": { - "$ref": "AnnotationSet" + "$ref": "Dataset" }, - "flatPath": "v1/annotationsets", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/annotationsets", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "description": "Updates a dataset.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics." }, "get": { - "id": "genomics.annotationsets.get", + "description": "Gets a dataset by ID.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "httpMethod": "GET", "response": { - "$ref": "AnnotationSet" + "$ref": "Dataset" }, "parameterOrder": [ - "annotationSetId" + "datasetId" ], - "description": "Gets an annotation set. Caller must have READ permission for\nthe associated dataset.", - "flatPath": "v1/annotationsets/{annotationSetId}", - "httpMethod": "GET", - "parameters": { - "annotationSetId": { - "description": "The ID of the annotation set to be retrieved.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/annotationsets/{annotationSetId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] + ], + "parameters": { + "datasetId": { + "description": "The ID of the dataset.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/datasets/{datasetId}", + "id": "genomics.datasets.get", + "path": "v1/datasets/{datasetId}" }, - "search": { - "id": "genomics.annotationsets.search", + "testIamPermissions": { "response": { - "$ref": "SearchAnnotationSetsResponse" - }, - "parameterOrder": [], - "description": "Searches for annotation sets that match the given criteria. Annotation sets\nare returned in an unspecified order. This order is consistent, such that\ntwo queries for the same content (regardless of page size) yield annotation\nsets in the same order across their respective streams of paginated\nresponses. Caller must have READ permission for the queried datasets.", - "request": { - "$ref": "SearchAnnotationSetsRequest" + "$ref": "TestIamPermissionsResponse" }, - "flatPath": "v1/annotationsets/search", + "parameterOrder": [ + "resource" + ], "httpMethod": "POST", - "parameters": {}, - "path": "v1/annotationsets/search", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "resource": { + "pattern": "^datasets/[^/]+$", + "location": "path", + "description": "REQUIRED: The resource for which policy is being specified. Format is\n`datasets/\u003cdataset ID\u003e`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/datasets/{datasetsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "genomics.datasets.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nSee \u003ca href=\"/iam/docs/managing-policies#testing_permissions\"\u003eTesting\nPermissions\u003c/a\u003e for more information.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "request": { + "$ref": "TestIamPermissionsRequest" + } }, "delete": { - "id": "genomics.annotationsets.delete", "response": { "$ref": "Empty" }, "parameterOrder": [ - "annotationSetId" + "datasetId" ], - "description": "Deletes an annotation set. Caller must have WRITE permission\nfor the associated annotation set.", - "flatPath": "v1/annotationsets/{annotationSetId}", "httpMethod": "DELETE", "parameters": { - "annotationSetId": { - "description": "The ID of the annotation set to be deleted.", + "datasetId": { + "description": "The ID of the dataset to be deleted.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/annotationsets/{annotationSetId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] + ], + "flatPath": "v1/datasets/{datasetId}", + "path": "v1/datasets/{datasetId}", + "id": "genomics.datasets.delete", + "description": "Deletes a dataset and all of its contents (all read group sets,\nreference sets, variant sets, call sets, annotation sets, etc.)\nThis is reversible (up to one week after the deletion) via\nthe\ndatasets.undelete\noperation.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)" + }, + "list": { + "id": "genomics.datasets.list", + "path": "v1/datasets", + "description": "Lists datasets within a project.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "httpMethod": "GET", + "response": { + "$ref": "ListDatasetsResponse" + }, + "parameterOrder": [], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 50. The maximum value is 1024.", + "format": "int32", + "type": "integer" + }, + "projectId": { + "description": "Required. The Google Cloud project ID to list datasets for.", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1/datasets" } } }, - "operations": { + "annotations": { "methods": { + "create": { + "response": { + "$ref": "Annotation" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "flatPath": "v1/annotations", + "path": "v1/annotations", + "id": "genomics.annotations.create", + "request": { + "$ref": "Annotation" + }, + "description": "Creates a new annotation. Caller must have WRITE permission\nfor the associated annotation set.\n\nThe following fields are required:\n\n* annotationSetId\n* referenceName or\n referenceId\n\n### Transcripts\n\nFor annotations of type TRANSCRIPT, the following fields of\ntranscript must be provided:\n\n* exons.start\n* exons.end\n\nAll other fields may be optionally specified, unless documented as being\nserver-generated (for example, the `id` field). The annotated\nrange must be no longer than 100Mbp (mega base pairs). See the\nAnnotation resource\nfor additional restrictions on each field." + }, + "batchCreate": { + "request": { + "$ref": "BatchCreateAnnotationsRequest" + }, + "description": "Creates one or more new annotations atomically. All annotations must\nbelong to the same annotation set. Caller must have WRITE\npermission for this annotation set. For optimal performance, batch\npositionally adjacent annotations together.\n\nIf the request has a systemic issue, such as an attempt to write to\nan inaccessible annotation set, the entire RPC will fail accordingly. For\nlesser data issues, when possible an error will be isolated to the\ncorresponding batch entry in the response; the remaining well formed\nannotations will be created normally.\n\nFor details on the requirements for each individual annotation resource,\nsee\nCreateAnnotation.", + "response": { + "$ref": "BatchCreateAnnotationsResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "flatPath": "v1/annotations:batchCreate", + "path": "v1/annotations:batchCreate", + "id": "genomics.annotations.batchCreate" + }, + "search": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "SearchAnnotationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "parameters": {}, + "flatPath": "v1/annotations/search", + "id": "genomics.annotations.search", + "path": "v1/annotations/search", + "description": "Searches for annotations that match the given criteria. Results are\nordered by genomic coordinate (by reference sequence, then position).\nAnnotations with equivalent genomic coordinates are returned in an\nunspecified order. This order is consistent, such that two queries for the\nsame content (regardless of page size) yield annotations in the same order\nacross their respective streams of paginated responses. Caller must have\nREAD permission for the queried annotation sets.", + "request": { + "$ref": "SearchAnnotationsRequest" + } + }, "get": { - "id": "genomics.operations.get", + "httpMethod": "GET", "response": { - "$ref": "Operation" + "$ref": "Annotation" }, "parameterOrder": [ - "name" + "annotationId" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" ], - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "flatPath": "v1/operations/{operationsId}", - "httpMethod": "GET", "parameters": { - "name": { - "description": "The name of the operation resource.", - "required": true, - "pattern": "^operations/.+$", + "annotationId": { "location": "path", + "description": "The ID of the annotation to be retrieved.", + "required": true, "type": "string" } }, - "path": "v1/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "flatPath": "v1/annotations/{annotationId}", + "id": "genomics.annotations.get", + "path": "v1/annotations/{annotationId}", + "description": "Gets an annotation. Caller must have READ permission\nfor the associated annotation set." }, - "list": { - "id": "genomics.operations.list", - "response": { - "$ref": "ListOperationsResponse" + "update": { + "id": "genomics.annotations.update", + "path": "v1/annotations/{annotationId}", + "request": { + "$ref": "Annotation" }, + "description": "Updates an annotation. Caller must have\nWRITE permission for the associated dataset.", + "httpMethod": "PUT", "parameterOrder": [ - "name" + "annotationId" ], - "description": "Lists operations that match the specified filter in the request.", - "flatPath": "v1/operations", - "httpMethod": "GET", + "response": { + "$ref": "Annotation" + }, "parameters": { - "pageSize": { - "description": "The maximum number of results to return. If unspecified, defaults to\n256. The maximum value is 2048.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "filter": { - "description": "A string for filtering Operations.\nThe following filter fields are supported:\n\n* projectId: Required. Corresponds to\n OperationMetadata.projectId.\n* createTime: The time this job was created, in seconds from the\n [epoch](http://en.wikipedia.org/wiki/Unix_time). Can use `\u003e=` and/or `\u003c=`\n operators.\n* status: Can be `RUNNING`, `SUCCESS`, `FAILURE`, or `CANCELED`. Only\n one status may be specified.\n* labels.key where key is a label key.\n\nExamples:\n\n* `projectId = my-project AND createTime \u003e= 1432140000`\n* `projectId = my-project AND createTime \u003e= 1432140000 AND createTime \u003c= 1432150000 AND status = RUNNING`\n* `projectId = my-project AND labels.color = *`\n* `projectId = my-project AND labels.color = red`", + "updateMask": { "location": "query", + "description": "An optional mask specifying which fields to update. Mutable fields are\nname,\nvariant,\ntranscript, and\ninfo. If unspecified, all mutable\nfields will be updated.", + "format": "google-fieldmask", "type": "string" }, - "name": { - "description": "The name of the operation collection.", + "annotationId": { + "description": "The ID of the annotation to be updated.", "required": true, - "pattern": "^operations$", - "location": "path", - "type": "string" - }, - "pageToken": { - "description": "The standard list page token.", - "location": "query", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] + ], + "flatPath": "v1/annotations/{annotationId}" }, - "cancel": { - "id": "genomics.operations.cancel", + "delete": { + "description": "Deletes an annotation. Caller must have WRITE permission for\nthe associated annotation set.", "response": { "$ref": "Empty" }, "parameterOrder": [ - "name" + "annotationId" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" ], - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients may use Operations.GetOperation or Operations.ListOperations to check whether the cancellation succeeded or the operation completed despite cancellation.", - "request": { - "$ref": "CancelOperationRequest" - }, - "flatPath": "v1/operations/{operationsId}:cancel", - "httpMethod": "POST", "parameters": { - "name": { - "description": "The name of the operation resource to be cancelled.", - "required": true, - "pattern": "^operations/.+$", + "annotationId": { "location": "path", + "description": "The ID of the annotation to be deleted.", + "required": true, "type": "string" } }, - "path": "v1/{+name}:cancel", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "flatPath": "v1/annotations/{annotationId}", + "path": "v1/annotations/{annotationId}", + "id": "genomics.annotations.delete" } } }, - "references": { - "resources": { - "bases": { - "methods": { - "list": { - "id": "genomics.references.bases.list", - "response": { - "$ref": "ListBasesResponse" - }, - "parameterOrder": [ - "referenceId" - ], - "description": "Lists the bases in a reference, optionally restricted to a range.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.getReferenceBases](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L221).", - "flatPath": "v1/references/{referenceId}/bases", - "httpMethod": "GET", - "parameters": { - "end": { - "description": "The end position (0-based, exclusive) of this query. Defaults to the length\nof this reference.", - "location": "query", - "type": "string", - "format": "int64" - }, - "pageSize": { - "description": "The maximum number of bases to return in a single page. If unspecified,\ndefaults to 200Kbp (kilo base pairs). The maximum value is 10Mbp (mega base\npairs).", - "location": "query", - "type": "integer", - "format": "int32" - }, - "referenceId": { - "description": "The ID of the reference.", - "required": true, - "location": "path", - "type": "string" - }, - "start": { - "description": "The start position (0-based) of this query. Defaults to 0.", - "location": "query", - "type": "string", - "format": "int64" - }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", - "location": "query", - "type": "string" - } - }, - "path": "v1/references/{referenceId}/bases", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] - } - } - } - }, + "variantsets": { "methods": { - "get": { - "id": "genomics.references.get", - "response": { - "$ref": "Reference" - }, + "delete": { + "description": "Deletes a variant set including all variants, call sets, and calls within.\nThis is not reversible.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", "parameterOrder": [ - "referenceId" + "variantSetId" ], - "description": "Gets a reference.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.getReference](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L158).", - "flatPath": "v1/references/{referenceId}", - "httpMethod": "GET", + "httpMethod": "DELETE", + "response": { + "$ref": "Empty" + }, "parameters": { - "referenceId": { - "description": "The ID of the reference.", + "variantSetId": { + "description": "The ID of the variant set to be deleted.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/references/{referenceId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] + "https://www.googleapis.com/auth/genomics" + ], + "flatPath": "v1/variantsets/{variantSetId}", + "path": "v1/variantsets/{variantSetId}", + "id": "genomics.variantsets.delete" }, - "search": { - "id": "genomics.references.search", + "create": { "response": { - "$ref": "SearchReferencesResponse" + "$ref": "VariantSet" }, "parameterOrder": [], - "description": "Searches for references which match the given criteria.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchReferences](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L146).", - "request": { - "$ref": "SearchReferencesRequest" - }, - "flatPath": "v1/references/search", "httpMethod": "POST", "parameters": {}, - "path": "v1/references/search", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] - } - } - }, - "readgroupsets": { - "resources": { - "coveragebuckets": { - "methods": { - "list": { - "id": "genomics.readgroupsets.coveragebuckets.list", - "response": { - "$ref": "ListCoverageBucketsResponse" - }, - "parameterOrder": [ - "readGroupSetId" - ], - "description": "Lists fixed width coverage buckets for a read group set, each of which\ncorrespond to a range of a reference sequence. Each bucket summarizes\ncoverage information across its corresponding genomic range.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nCoverage is defined as the number of reads which are aligned to a given\nbase in the reference sequence. Coverage buckets are available at several\nprecomputed bucket widths, enabling retrieval of various coverage 'zoom\nlevels'. The caller must have READ permissions for the target read group\nset.", - "flatPath": "v1/readgroupsets/{readGroupSetId}/coveragebuckets", - "httpMethod": "GET", - "parameters": { - "referenceName": { - "description": "The name of the reference to query, within the reference set associated\nwith this query. Optional.", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024. The maximum value is 2048.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "targetBucketWidth": { - "description": "The desired width of each reported coverage bucket in base pairs. This\nwill be rounded down to the nearest precomputed bucket width; the value\nof which is returned as `bucketWidth` in the response. Defaults\nto infinity (each bucket spans an entire reference sequence) or the length\nof the target range, if specified. The smallest precomputed\n`bucketWidth` is currently 2048 base pairs; this is subject to\nchange.", - "location": "query", - "type": "string", - "format": "int64" - }, - "readGroupSetId": { - "description": "Required. The ID of the read group set over which coverage is requested.", - "required": true, - "location": "path", - "type": "string" - }, - "start": { - "description": "The start position of the range on the reference, 0-based inclusive. If\nspecified, `referenceName` must also be specified. Defaults to 0.", - "location": "query", - "type": "string", - "format": "int64" - }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", - "location": "query", - "type": "string" - }, - "end": { - "description": "The end position of the range on the reference, 0-based exclusive. If\nspecified, `referenceName` must also be specified. If unset or 0, defaults\nto the length of the reference.", - "location": "query", - "type": "string", - "format": "int64" - } - }, - "path": "v1/readgroupsets/{readGroupSetId}/coveragebuckets", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] - } - } - } - }, - "methods": { + "https://www.googleapis.com/auth/genomics" + ], + "flatPath": "v1/variantsets", + "path": "v1/variantsets", + "id": "genomics.variantsets.create", + "request": { + "$ref": "VariantSet" + }, + "description": "Creates a new variant set.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThe provided variant set must have a valid `datasetId` set - all other\nfields are optional. Note that the `id` field will be ignored, as this is\nassigned by the server." + }, "export": { - "id": "genomics.readgroupsets.export", + "httpMethod": "POST", + "parameterOrder": [ + "variantSetId" + ], "response": { "$ref": "Operation" }, - "parameterOrder": [ - "readGroupSetId" + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" ], - "description": "Exports a read group set to a BAM file in Google Cloud Storage.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nNote that currently there may be some differences between exported BAM\nfiles and the original BAM file at the time of import. See\nImportReadGroupSets\nfor caveats.", - "request": { - "$ref": "ExportReadGroupSetRequest" - }, - "flatPath": "v1/readgroupsets/{readGroupSetId}:export", - "httpMethod": "POST", "parameters": { - "readGroupSetId": { - "description": "Required. The ID of the read group set to export. The caller must have\nREAD access to this read group set.", + "variantSetId": { + "description": "Required. The ID of the variant set that contains variant data which\nshould be exported. The caller must have READ access to this variant set.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/readgroupsets/{readGroupSetId}:export", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.read_write", - "https://www.googleapis.com/auth/genomics" - ] + "flatPath": "v1/variantsets/{variantSetId}:export", + "id": "genomics.variantsets.export", + "path": "v1/variantsets/{variantSetId}:export", + "description": "Exports variant set data to an external destination.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "request": { + "$ref": "ExportVariantSetRequest" + } }, "search": { - "id": "genomics.readgroupsets.search", + "request": { + "$ref": "SearchVariantSetsRequest" + }, + "description": "Returns a list of all variant sets matching search criteria.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchVariantSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L49).", "response": { - "$ref": "SearchReadGroupSetsResponse" + "$ref": "SearchVariantSetsResponse" }, "parameterOrder": [], - "description": "Searches for read group sets matching the criteria.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchReadGroupSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L135).", - "request": { - "$ref": "SearchReadGroupSetsRequest" - }, - "flatPath": "v1/readgroupsets/search", "httpMethod": "POST", "parameters": {}, - "path": "v1/readgroupsets/search", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] + ], + "flatPath": "v1/variantsets/search", + "path": "v1/variantsets/search", + "id": "genomics.variantsets.search" + }, + "patch": { + "response": { + "$ref": "VariantSet" + }, + "parameterOrder": [ + "variantSetId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "updateMask": { + "description": "An optional mask specifying which fields to update. Supported fields:\n\n* metadata.\n* name.\n* description.\n\nLeaving `updateMask` unset is equivalent to specifying all mutable\nfields.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + }, + "variantSetId": { + "description": "The ID of the variant to be updated (must already exist).", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/variantsets/{variantSetId}", + "path": "v1/variantsets/{variantSetId}", + "id": "genomics.variantsets.patch", + "description": "Updates a variant set using patch semantics.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "request": { + "$ref": "VariantSet" + } }, "get": { - "id": "genomics.readgroupsets.get", "response": { - "$ref": "ReadGroupSet" + "$ref": "VariantSet" }, "parameterOrder": [ - "readGroupSetId" + "variantSetId" ], - "description": "Gets a read group set by ID.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/readgroupsets/{readGroupSetId}", "httpMethod": "GET", "parameters": { - "readGroupSetId": { - "description": "The ID of the read group set.", - "required": true, + "variantSetId": { "location": "path", + "description": "Required. The ID of the variant set.", + "required": true, "type": "string" } }, - "path": "v1/readgroupsets/{readGroupSetId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] - }, - "patch": { - "id": "genomics.readgroupsets.patch", - "response": { - "$ref": "ReadGroupSet" - }, - "parameterOrder": [ - "readGroupSetId" ], - "description": "Updates a read group set.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics.", - "request": { - "$ref": "ReadGroupSet" - }, - "flatPath": "v1/readgroupsets/{readGroupSetId}", - "httpMethod": "PATCH", - "parameters": { - "readGroupSetId": { - "description": "The ID of the read group set to be updated. The caller must have WRITE\npermissions to the dataset associated with this read group set.", - "required": true, - "location": "path", - "type": "string" - }, - "updateMask": { - "description": "An optional mask specifying which fields to update. Supported fields:\n\n* name.\n* referenceSetId.\n\nLeaving `updateMask` unset is equivalent to specifying all mutable\nfields.", - "location": "query", - "type": "string", - "format": "google-fieldmask" - } - }, - "path": "v1/readgroupsets/{readGroupSetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "delete": { - "id": "genomics.readgroupsets.delete", + "flatPath": "v1/variantsets/{variantSetId}", + "path": "v1/variantsets/{variantSetId}", + "id": "genomics.variantsets.get", + "description": "Gets a variant set by ID.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)" + } + } + }, + "operations": { + "methods": { + "cancel": { "response": { "$ref": "Empty" }, "parameterOrder": [ - "readGroupSetId" + "name" ], - "description": "Deletes a read group set.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/readgroupsets/{readGroupSetId}", - "httpMethod": "DELETE", - "parameters": { - "readGroupSetId": { - "description": "The ID of the read group set to be deleted. The caller must have WRITE\npermissions to the dataset associated with this read group set.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/readgroupsets/{readGroupSetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "import": { - "id": "genomics.readgroupsets.import", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [], - "description": "Creates read group sets by asynchronously importing the provided\ninformation.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThe caller must have WRITE permissions to the dataset.\n\n## Notes on [BAM](https://samtools.github.io/hts-specs/SAMv1.pdf) import\n\n- Tags will be converted to strings - tag types are not preserved\n- Comments (`@CO`) in the input file header will not be preserved\n- Original header order of references (`@SQ`) will not be preserved\n- Any reverse stranded unmapped reads will be reverse complemented, and\ntheir qualities (also the \"BQ\" and \"OQ\" tags, if any) will be reversed\n- Unmapped reads will be stripped of positional information (reference name\nand position)", - "request": { - "$ref": "ImportReadGroupSetsRequest" - }, - "flatPath": "v1/readgroupsets:import", "httpMethod": "POST", - "parameters": {}, - "path": "v1/readgroupsets:import", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/genomics" - ] - } - } - }, - "variantsets": { - "methods": { - "export": { - "id": "genomics.variantsets.export", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "variantSetId" ], - "description": "Exports variant set data to an external destination.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "request": { - "$ref": "ExportVariantSetRequest" - }, - "flatPath": "v1/variantsets/{variantSetId}:export", - "httpMethod": "POST", "parameters": { - "variantSetId": { - "description": "Required. The ID of the variant set that contains variant data which\nshould be exported. The caller must have READ access to this variant set.", + "name": { + "description": "The name of the operation resource to be cancelled.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "pattern": "^operations/.+$", + "location": "path" } }, - "path": "v1/variantsets/{variantSetId}:export", - "scopes": [ - "https://www.googleapis.com/auth/bigquery", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "search": { - "id": "genomics.variantsets.search", - "response": { - "$ref": "SearchVariantSetsResponse" - }, - "parameterOrder": [], - "description": "Returns a list of all variant sets matching search criteria.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchVariantSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L49).", + "flatPath": "v1/operations/{operationsId}:cancel", + "path": "v1/{+name}:cancel", + "id": "genomics.operations.cancel", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients may use Operations.GetOperation or Operations.ListOperations to check whether the cancellation succeeded or the operation completed despite cancellation.", "request": { - "$ref": "SearchVariantSetsRequest" - }, - "flatPath": "v1/variantsets/search", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/variantsets/search", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] + "$ref": "CancelOperationRequest" + } }, - "get": { - "id": "genomics.variantsets.get", + "list": { + "description": "Lists operations that match the specified filter in the request.", "response": { - "$ref": "VariantSet" + "$ref": "ListOperationsResponse" }, "parameterOrder": [ - "variantSetId" + "name" ], - "description": "Gets a variant set by ID.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/variantsets/{variantSetId}", "httpMethod": "GET", - "parameters": { - "variantSetId": { - "description": "Required. The ID of the variant set.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/variantsets/{variantSetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] - }, - "create": { - "id": "genomics.variantsets.create", - "response": { - "$ref": "VariantSet" - }, - "parameterOrder": [], - "description": "Creates a new variant set.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThe provided variant set must have a valid `datasetId` set - all other\nfields are optional. Note that the `id` field will be ignored, as this is\nassigned by the server.", - "request": { - "$ref": "VariantSet" - }, - "flatPath": "v1/variantsets", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/variantsets", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] - }, - "patch": { - "id": "genomics.variantsets.patch", - "response": { - "$ref": "VariantSet" - }, - "parameterOrder": [ - "variantSetId" ], - "description": "Updates a variant set using patch semantics.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "request": { - "$ref": "VariantSet" - }, - "flatPath": "v1/variantsets/{variantSetId}", - "httpMethod": "PATCH", "parameters": { - "variantSetId": { - "description": "The ID of the variant to be updated (must already exist).", - "required": true, + "name": { + "pattern": "^operations$", "location": "path", + "description": "The name of the operation collection.", + "required": true, "type": "string" }, - "updateMask": { - "description": "An optional mask specifying which fields to update. Supported fields:\n\n* metadata.\n* name.\n* description.\n\nLeaving `updateMask` unset is equivalent to specifying all mutable\nfields.", + "pageToken": { "location": "query", - "type": "string", - "format": "google-fieldmask" + "description": "The standard list page token.", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of results to return. If unspecified, defaults to\n256. The maximum value is 2048.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "filter": { + "location": "query", + "description": "A string for filtering Operations.\nThe following filter fields are supported:\n\n* projectId: Required. Corresponds to\n OperationMetadata.projectId.\n* createTime: The time this job was created, in seconds from the\n [epoch](http://en.wikipedia.org/wiki/Unix_time). Can use `\u003e=` and/or `\u003c=`\n operators.\n* status: Can be `RUNNING`, `SUCCESS`, `FAILURE`, or `CANCELED`. Only\n one status may be specified.\n* labels.key where key is a label key.\n\nExamples:\n\n* `projectId = my-project AND createTime \u003e= 1432140000`\n* `projectId = my-project AND createTime \u003e= 1432140000 AND createTime \u003c= 1432150000 AND status = RUNNING`\n* `projectId = my-project AND labels.color = *`\n* `projectId = my-project AND labels.color = red`", + "type": "string" } }, - "path": "v1/variantsets/{variantSetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "flatPath": "v1/operations", + "path": "v1/{+name}", + "id": "genomics.operations.list" }, - "delete": { - "id": "genomics.variantsets.delete", + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "httpMethod": "GET", "response": { - "$ref": "Empty" + "$ref": "Operation" }, "parameterOrder": [ - "variantSetId" + "name" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" ], - "description": "Deletes a variant set including all variants, call sets, and calls within.\nThis is not reversible.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/variantsets/{variantSetId}", - "httpMethod": "DELETE", "parameters": { - "variantSetId": { - "description": "The ID of the variant set to be deleted.", - "required": true, + "name": { + "pattern": "^operations/.+$", "location": "path", + "description": "The name of the operation resource.", + "required": true, "type": "string" } }, - "path": "v1/variantsets/{variantSetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "flatPath": "v1/operations/{operationsId}", + "id": "genomics.operations.get", + "path": "v1/{+name}" } } }, - "annotations": { + "referencesets": { "methods": { "search": { - "id": "genomics.annotations.search", - "response": { - "$ref": "SearchAnnotationsResponse" - }, - "parameterOrder": [], - "description": "Searches for annotations that match the given criteria. Results are\nordered by genomic coordinate (by reference sequence, then position).\nAnnotations with equivalent genomic coordinates are returned in an\nunspecified order. This order is consistent, such that two queries for the\nsame content (regardless of page size) yield annotations in the same order\nacross their respective streams of paginated responses. Caller must have\nREAD permission for the queried annotation sets.", + "id": "genomics.referencesets.search", + "path": "v1/referencesets/search", + "description": "Searches for reference sets which match the given criteria.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchReferenceSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L71)", "request": { - "$ref": "SearchAnnotationsRequest" + "$ref": "SearchReferenceSetsRequest" }, - "flatPath": "v1/annotations/search", "httpMethod": "POST", - "parameters": {}, - "path": "v1/annotations/search", + "parameterOrder": [], + "response": { + "$ref": "SearchReferenceSetsResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] + ], + "parameters": {}, + "flatPath": "v1/referencesets/search" }, "get": { - "id": "genomics.annotations.get", + "id": "genomics.referencesets.get", + "path": "v1/referencesets/{referenceSetId}", + "description": "Gets a reference set.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.getReferenceSet](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L83).", + "httpMethod": "GET", "response": { - "$ref": "Annotation" + "$ref": "ReferenceSet" }, "parameterOrder": [ - "annotationId" + "referenceSetId" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" ], - "description": "Gets an annotation. Caller must have READ permission\nfor the associated annotation set.", - "flatPath": "v1/annotations/{annotationId}", - "httpMethod": "GET", "parameters": { - "annotationId": { - "description": "The ID of the annotation to be retrieved.", - "required": true, + "referenceSetId": { "location": "path", + "description": "The ID of the reference set.", + "required": true, "type": "string" } }, - "path": "v1/annotations/{annotationId}", + "flatPath": "v1/referencesets/{referenceSetId}" + } + } + }, + "readgroupsets": { + "resources": { + "coveragebuckets": { + "methods": { + "list": { + "httpMethod": "GET", + "response": { + "$ref": "ListCoverageBucketsResponse" + }, + "parameterOrder": [ + "readGroupSetId" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "parameters": { + "start": { + "location": "query", + "description": "The start position of the range on the reference, 0-based inclusive. If\nspecified, `referenceName` must also be specified. Defaults to 0.", + "format": "int64", + "type": "string" + }, + "readGroupSetId": { + "location": "path", + "description": "Required. The ID of the read group set over which coverage is requested.", + "required": true, + "type": "string" + }, + "targetBucketWidth": { + "location": "query", + "description": "The desired width of each reported coverage bucket in base pairs. This\nwill be rounded down to the nearest precomputed bucket width; the value\nof which is returned as `bucketWidth` in the response. Defaults\nto infinity (each bucket spans an entire reference sequence) or the length\nof the target range, if specified. The smallest precomputed\n`bucketWidth` is currently 2048 base pairs; this is subject to\nchange.", + "format": "int64", + "type": "string" + }, + "referenceName": { + "location": "query", + "description": "The name of the reference to query, within the reference set associated\nwith this query. Optional.", + "type": "string" + }, + "end": { + "location": "query", + "description": "The end position of the range on the reference, 0-based exclusive. If\nspecified, `referenceName` must also be specified. If unset or 0, defaults\nto the length of the reference.", + "format": "int64", + "type": "string" + }, + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string", + "location": "query" + }, + "pageSize": { + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024. The maximum value is 2048.", + "format": "int32", + "type": "integer", + "location": "query" + } + }, + "flatPath": "v1/readgroupsets/{readGroupSetId}/coveragebuckets", + "id": "genomics.readgroupsets.coveragebuckets.list", + "path": "v1/readgroupsets/{readGroupSetId}/coveragebuckets", + "description": "Lists fixed width coverage buckets for a read group set, each of which\ncorrespond to a range of a reference sequence. Each bucket summarizes\ncoverage information across its corresponding genomic range.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nCoverage is defined as the number of reads which are aligned to a given\nbase in the reference sequence. Coverage buckets are available at several\nprecomputed bucket widths, enabling retrieval of various coverage 'zoom\nlevels'. The caller must have READ permissions for the target read group\nset." + } + } + } + }, + "methods": { + "delete": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "readGroupSetId" + ], + "httpMethod": "DELETE", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] - }, - "create": { - "id": "genomics.annotations.create", - "response": { - "$ref": "Annotation" + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "readGroupSetId": { + "location": "path", + "description": "The ID of the read group set to be deleted. The caller must have WRITE\npermissions to the dataset associated with this read group set.", + "required": true, + "type": "string" + } }, + "flatPath": "v1/readgroupsets/{readGroupSetId}", + "path": "v1/readgroupsets/{readGroupSetId}", + "id": "genomics.readgroupsets.delete", + "description": "Deletes a read group set.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)" + }, + "import": { + "httpMethod": "POST", "parameterOrder": [], - "description": "Creates a new annotation. Caller must have WRITE permission\nfor the associated annotation set.\n\nThe following fields are required:\n\n* annotationSetId\n* referenceName or\n referenceId\n\n### Transcripts\n\nFor annotations of type TRANSCRIPT, the following fields of\ntranscript must be provided:\n\n* exons.start\n* exons.end\n\nAll other fields may be optionally specified, unless documented as being\nserver-generated (for example, the `id` field). The annotated\nrange must be no longer than 100Mbp (mega base pairs). See the\nAnnotation resource\nfor additional restrictions on each field.", - "request": { - "$ref": "Annotation" + "response": { + "$ref": "Operation" }, - "flatPath": "v1/annotations", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/annotations", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/genomics" - ] + ], + "parameters": {}, + "flatPath": "v1/readgroupsets:import", + "id": "genomics.readgroupsets.import", + "path": "v1/readgroupsets:import", + "description": "Creates read group sets by asynchronously importing the provided\ninformation.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThe caller must have WRITE permissions to the dataset.\n\n## Notes on [BAM](https://samtools.github.io/hts-specs/SAMv1.pdf) import\n\n- Tags will be converted to strings - tag types are not preserved\n- Comments (`@CO`) in the input file header will not be preserved\n- Original header order of references (`@SQ`) will not be preserved\n- Any reverse stranded unmapped reads will be reverse complemented, and\ntheir qualities (also the \"BQ\" and \"OQ\" tags, if any) will be reversed\n- Unmapped reads will be stripped of positional information (reference name\nand position)", + "request": { + "$ref": "ImportReadGroupSetsRequest" + } }, - "update": { - "id": "genomics.annotations.update", - "response": { - "$ref": "Annotation" + "export": { + "id": "genomics.readgroupsets.export", + "path": "v1/readgroupsets/{readGroupSetId}:export", + "request": { + "$ref": "ExportReadGroupSetRequest" }, + "description": "Exports a read group set to a BAM file in Google Cloud Storage.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nNote that currently there may be some differences between exported BAM\nfiles and the original BAM file at the time of import. See\nImportReadGroupSets\nfor caveats.", + "httpMethod": "POST", "parameterOrder": [ - "annotationId" + "readGroupSetId" ], - "description": "Updates an annotation. Caller must have\nWRITE permission for the associated dataset.", - "request": { - "$ref": "Annotation" + "response": { + "$ref": "Operation" }, - "flatPath": "v1/annotations/{annotationId}", - "httpMethod": "PUT", "parameters": { - "annotationId": { - "description": "The ID of the annotation to be updated.", + "readGroupSetId": { + "description": "Required. The ID of the read group set to export. The caller must have\nREAD access to this read group set.", "required": true, - "location": "path", - "type": "string" - }, - "updateMask": { - "description": "An optional mask specifying which fields to update. Mutable fields are\nname,\nvariant,\ntranscript, and\ninfo. If unspecified, all mutable\nfields will be updated.", - "location": "query", "type": "string", - "format": "google-fieldmask" + "location": "path" } }, - "path": "v1/annotations/{annotationId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/genomics" - ] + ], + "flatPath": "v1/readgroupsets/{readGroupSetId}:export" }, - "batchCreate": { - "id": "genomics.annotations.batchCreate", + "search": { "response": { - "$ref": "BatchCreateAnnotationsResponse" + "$ref": "SearchReadGroupSetsResponse" }, "parameterOrder": [], - "description": "Creates one or more new annotations atomically. All annotations must\nbelong to the same annotation set. Caller must have WRITE\npermission for this annotation set. For optimal performance, batch\npositionally adjacent annotations together.\n\nIf the request has a systemic issue, such as an attempt to write to\nan inaccessible annotation set, the entire RPC will fail accordingly. For\nlesser data issues, when possible an error will be isolated to the\ncorresponding batch entry in the response; the remaining well formed\nannotations will be created normally.\n\nFor details on the requirements for each individual annotation resource,\nsee\nCreateAnnotation.", - "request": { - "$ref": "BatchCreateAnnotationsRequest" - }, - "flatPath": "v1/annotations:batchCreate", "httpMethod": "POST", "parameters": {}, - "path": "v1/annotations:batchCreate", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "flatPath": "v1/readgroupsets/search", + "path": "v1/readgroupsets/search", + "id": "genomics.readgroupsets.search", + "request": { + "$ref": "SearchReadGroupSetsRequest" + }, + "description": "Searches for read group sets matching the criteria.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchReadGroupSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L135)." }, - "delete": { - "id": "genomics.annotations.delete", + "get": { + "description": "Gets a read group set by ID.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "httpMethod": "GET", "response": { - "$ref": "Empty" + "$ref": "ReadGroupSet" }, "parameterOrder": [ - "annotationId" + "readGroupSetId" ], - "description": "Deletes an annotation. Caller must have WRITE permission for\nthe associated annotation set.", - "flatPath": "v1/annotations/{annotationId}", - "httpMethod": "DELETE", "parameters": { - "annotationId": { - "description": "The ID of the annotation to be deleted.", + "readGroupSetId": { + "description": "The ID of the read group set.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/annotations/{annotationId}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "flatPath": "v1/readgroupsets/{readGroupSetId}", + "id": "genomics.readgroupsets.get", + "path": "v1/readgroupsets/{readGroupSetId}" + }, + "patch": { + "httpMethod": "PATCH", + "parameterOrder": [ + "readGroupSetId" + ], + "response": { + "$ref": "ReadGroupSet" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] + ], + "parameters": { + "readGroupSetId": { + "description": "The ID of the read group set to be updated. The caller must have WRITE\npermissions to the dataset associated with this read group set.", + "required": true, + "type": "string", + "location": "path" + }, + "updateMask": { + "location": "query", + "description": "An optional mask specifying which fields to update. Supported fields:\n\n* name.\n* referenceSetId.\n\nLeaving `updateMask` unset is equivalent to specifying all mutable\nfields.", + "format": "google-fieldmask", + "type": "string" + } + }, + "flatPath": "v1/readgroupsets/{readGroupSetId}", + "id": "genomics.readgroupsets.patch", + "path": "v1/readgroupsets/{readGroupSetId}", + "description": "Updates a read group set.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics.", + "request": { + "$ref": "ReadGroupSet" + } } } }, "reads": { "methods": { "search": { - "id": "genomics.reads.search", "response": { "$ref": "SearchReadsResponse" }, "parameterOrder": [], - "description": "Gets a list of reads for one or more read group sets.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nReads search operates over a genomic coordinate space of reference sequence\n& position defined over the reference sequences to which the requested\nread group sets are aligned.\n\nIf a target positional range is specified, search returns all reads whose\nalignment to the reference genome overlap the range. A query which\nspecifies only read group set IDs yields all reads in those read group\nsets, including unmapped reads.\n\nAll reads returned (including reads on subsequent pages) are ordered by\ngenomic coordinate (by reference sequence, then position). Reads with\nequivalent genomic coordinates are returned in an unspecified order. This\norder is consistent, such that two queries for the same content (regardless\nof page size) yield reads in the same order across their respective streams\nof paginated responses.\n\nImplements\n[GlobalAllianceApi.searchReads](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L85).", - "request": { - "$ref": "SearchReadsRequest" - }, - "flatPath": "v1/reads/search", "httpMethod": "POST", - "parameters": {}, - "path": "v1/reads/search", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] + ], + "parameters": {}, + "flatPath": "v1/reads/search", + "path": "v1/reads/search", + "id": "genomics.reads.search", + "description": "Gets a list of reads for one or more read group sets.\n\nFor the definitions of read group sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nReads search operates over a genomic coordinate space of reference sequence\n& position defined over the reference sequences to which the requested\nread group sets are aligned.\n\nIf a target positional range is specified, search returns all reads whose\nalignment to the reference genome overlap the range. A query which\nspecifies only read group set IDs yields all reads in those read group\nsets, including unmapped reads.\n\nAll reads returned (including reads on subsequent pages) are ordered by\ngenomic coordinate (by reference sequence, then position). Reads with\nequivalent genomic coordinates are returned in an unspecified order. This\norder is consistent, such that two queries for the same content (regardless\nof page size) yield reads in the same order across their respective streams\nof paginated responses.\n\nImplements\n[GlobalAllianceApi.searchReads](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L85).", + "request": { + "$ref": "SearchReadsRequest" + } } } }, - "variants": { + "callsets": { "methods": { - "merge": { - "id": "genomics.variants.merge", + "delete": { + "description": "Deletes a call set.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", "response": { "$ref": "Empty" }, - "parameterOrder": [], - "description": "Merges the given variants with existing variants.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nEach variant will be\nmerged with an existing variant that matches its reference sequence,\nstart, end, reference bases, and alternative bases. If no such variant\nexists, a new one will be created.\n\nWhen variants are merged, the call information from the new variant\nis added to the existing variant. Variant info fields are merged as\nspecified in the\ninfoMergeConfig\nfield of the MergeVariantsRequest.\n\nPlease exercise caution when using this method! It is easy to introduce\nmistakes in existing variants and difficult to back out of them. For\nexample,\nsuppose you were trying to merge a new variant with an existing one and\nboth\nvariants contain calls that belong to callsets with the same callset ID.\n\n // Existing variant - irrelevant fields trimmed for clarity\n {\n \"variantSetId\": \"10473108253681171589\",\n \"referenceName\": \"1\",\n \"start\": \"10582\",\n \"referenceBases\": \"G\",\n \"alternateBases\": [\n \"A\"\n ],\n \"calls\": [\n {\n \"callSetId\": \"10473108253681171589-0\",\n \"callSetName\": \"CALLSET0\",\n \"genotype\": [\n 0,\n 1\n ],\n }\n ]\n }\n\n // New variant with conflicting call information\n {\n \"variantSetId\": \"10473108253681171589\",\n \"referenceName\": \"1\",\n \"start\": \"10582\",\n \"referenceBases\": \"G\",\n \"alternateBases\": [\n \"A\"\n ],\n \"calls\": [\n {\n \"callSetId\": \"10473108253681171589-0\",\n \"callSetName\": \"CALLSET0\",\n \"genotype\": [\n 1,\n 1\n ],\n }\n ]\n }\n\nThe resulting merged variant would overwrite the existing calls with those\nfrom the new variant:\n\n {\n \"variantSetId\": \"10473108253681171589\",\n \"referenceName\": \"1\",\n \"start\": \"10582\",\n \"referenceBases\": \"G\",\n \"alternateBases\": [\n \"A\"\n ],\n \"calls\": [\n {\n \"callSetId\": \"10473108253681171589-0\",\n \"callSetName\": \"CALLSET0\",\n \"genotype\": [\n 1,\n 1\n ],\n }\n ]\n }\n\nThis may be the desired outcome, but it is up to the user to determine if\nif that is indeed the case.", - "request": { - "$ref": "MergeVariantsRequest" - }, - "flatPath": "v1/variants:merge", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/variants:merge", + "parameterOrder": [ + "callSetId" + ], + "httpMethod": "DELETE", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] + ], + "parameters": { + "callSetId": { + "description": "The ID of the call set to be deleted.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/callsets/{callSetId}", + "path": "v1/callsets/{callSetId}", + "id": "genomics.callsets.delete" }, "search": { - "id": "genomics.variants.search", + "path": "v1/callsets/search", + "id": "genomics.callsets.search", + "description": "Gets a list of call sets matching the criteria.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchCallSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L178).", + "request": { + "$ref": "SearchCallSetsRequest" + }, "response": { - "$ref": "SearchVariantsResponse" + "$ref": "SearchCallSetsResponse" }, "parameterOrder": [], - "description": "Gets a list of variants matching the criteria.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchVariants](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L126).", - "request": { - "$ref": "SearchVariantsRequest" - }, - "flatPath": "v1/variants/search", "httpMethod": "POST", - "parameters": {}, - "path": "v1/variants/search", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] + ], + "parameters": {}, + "flatPath": "v1/callsets/search" }, - "get": { - "id": "genomics.variants.get", + "patch": { + "description": "Updates a call set.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics.", + "request": { + "$ref": "CallSet" + }, "response": { - "$ref": "Variant" + "$ref": "CallSet" }, "parameterOrder": [ - "variantId" + "callSetId" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" ], - "description": "Gets a variant by ID.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/variants/{variantId}", - "httpMethod": "GET", "parameters": { - "variantId": { - "description": "The ID of the variant.", + "callSetId": { + "description": "The ID of the call set to be updated.", "required": true, + "type": "string", + "location": "path" + }, + "updateMask": { + "description": "An optional mask specifying which fields to update. At this time, the only\nmutable field is name. The only\nacceptable value is \"name\". If unspecified, all mutable fields will be\nupdated.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1/callsets/{callSetId}", + "path": "v1/callsets/{callSetId}", + "id": "genomics.callsets.patch" + }, + "get": { + "httpMethod": "GET", + "response": { + "$ref": "CallSet" + }, + "parameterOrder": [ + "callSetId" + ], + "parameters": { + "callSetId": { "location": "path", + "description": "The ID of the call set.", + "required": true, "type": "string" } }, - "path": "v1/variants/{variantId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] + ], + "flatPath": "v1/callsets/{callSetId}", + "id": "genomics.callsets.get", + "path": "v1/callsets/{callSetId}", + "description": "Gets a call set by ID.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)" }, "create": { - "id": "genomics.variants.create", + "path": "v1/callsets", + "id": "genomics.callsets.create", + "description": "Creates a new call set.\n\nFor the definitions of call sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "request": { + "$ref": "CallSet" + }, "response": { - "$ref": "Variant" + "$ref": "CallSet" }, "parameterOrder": [], - "description": "Creates a new variant.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "request": { + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": {}, + "flatPath": "v1/callsets" + } + } + }, + "variants": { + "methods": { + "create": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { "$ref": "Variant" }, - "flatPath": "v1/variants", - "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], "parameters": {}, + "flatPath": "v1/variants", + "id": "genomics.variants.create", "path": "v1/variants", + "description": "Creates a new variant.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "request": { + "$ref": "Variant" + } + }, + "search": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "SearchVariantsResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "parameters": {}, + "flatPath": "v1/variants/search", + "id": "genomics.variants.search", + "path": "v1/variants/search", + "description": "Gets a list of variants matching the criteria.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchVariants](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L126).", + "request": { + "$ref": "SearchVariantsRequest" + } }, - "patch": { - "id": "genomics.variants.patch", + "get": { + "httpMethod": "GET", "response": { "$ref": "Variant" }, "parameterOrder": [ "variantId" ], - "description": "Updates a variant.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics. Returns the modified variant without\nits calls.", - "request": { - "$ref": "Variant" + "parameters": { + "variantId": { + "location": "path", + "description": "The ID of the variant.", + "required": true, + "type": "string" + } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], "flatPath": "v1/variants/{variantId}", + "id": "genomics.variants.get", + "path": "v1/variants/{variantId}", + "description": "Gets a variant by ID.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)" + }, + "patch": { + "response": { + "$ref": "Variant" + }, + "parameterOrder": [ + "variantId" + ], "httpMethod": "PATCH", "parameters": { - "updateMask": { - "description": "An optional mask specifying which fields to update. At this time, mutable\nfields are names and\ninfo. Acceptable values are \"names\" and\n\"info\". If unspecified, all mutable fields will be updated.", - "location": "query", - "type": "string", - "format": "google-fieldmask" - }, "variantId": { "description": "The ID of the variant to be updated.", "required": true, - "location": "path", + "type": "string", + "location": "path" + }, + "updateMask": { + "location": "query", + "description": "An optional mask specifying which fields to update. At this time, mutable\nfields are names and\ninfo. Acceptable values are \"names\" and\n\"info\". If unspecified, all mutable fields will be updated.", + "format": "google-fieldmask", "type": "string" } }, - "path": "v1/variants/{variantId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] + ], + "flatPath": "v1/variants/{variantId}", + "path": "v1/variants/{variantId}", + "id": "genomics.variants.patch", + "request": { + "$ref": "Variant" + }, + "description": "Updates a variant.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics. Returns the modified variant without\nits calls." }, "delete": { - "id": "genomics.variants.delete", + "httpMethod": "DELETE", "response": { "$ref": "Empty" }, "parameterOrder": [ "variantId" ], - "description": "Deletes a variant.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/variants/{variantId}", - "httpMethod": "DELETE", "parameters": { "variantId": { + "location": "path", "description": "The ID of the variant to be deleted.", "required": true, - "location": "path", "type": "string" } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "flatPath": "v1/variants/{variantId}", + "id": "genomics.variants.delete", "path": "v1/variants/{variantId}", + "description": "Deletes a variant.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)" + }, + "merge": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "Empty" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics" - ] + ], + "parameters": {}, + "flatPath": "v1/variants:merge", + "id": "genomics.variants.merge", + "path": "v1/variants:merge", + "description": "Merges the given variants with existing variants.\n\nFor the definitions of variants and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nEach variant will be\nmerged with an existing variant that matches its reference sequence,\nstart, end, reference bases, and alternative bases. If no such variant\nexists, a new one will be created.\n\nWhen variants are merged, the call information from the new variant\nis added to the existing variant. Variant info fields are merged as\nspecified in the\ninfoMergeConfig\nfield of the MergeVariantsRequest.\n\nPlease exercise caution when using this method! It is easy to introduce\nmistakes in existing variants and difficult to back out of them. For\nexample,\nsuppose you were trying to merge a new variant with an existing one and\nboth\nvariants contain calls that belong to callsets with the same callset ID.\n\n // Existing variant - irrelevant fields trimmed for clarity\n {\n \"variantSetId\": \"10473108253681171589\",\n \"referenceName\": \"1\",\n \"start\": \"10582\",\n \"referenceBases\": \"G\",\n \"alternateBases\": [\n \"A\"\n ],\n \"calls\": [\n {\n \"callSetId\": \"10473108253681171589-0\",\n \"callSetName\": \"CALLSET0\",\n \"genotype\": [\n 0,\n 1\n ],\n }\n ]\n }\n\n // New variant with conflicting call information\n {\n \"variantSetId\": \"10473108253681171589\",\n \"referenceName\": \"1\",\n \"start\": \"10582\",\n \"referenceBases\": \"G\",\n \"alternateBases\": [\n \"A\"\n ],\n \"calls\": [\n {\n \"callSetId\": \"10473108253681171589-0\",\n \"callSetName\": \"CALLSET0\",\n \"genotype\": [\n 1,\n 1\n ],\n }\n ]\n }\n\nThe resulting merged variant would overwrite the existing calls with those\nfrom the new variant:\n\n {\n \"variantSetId\": \"10473108253681171589\",\n \"referenceName\": \"1\",\n \"start\": \"10582\",\n \"referenceBases\": \"G\",\n \"alternateBases\": [\n \"A\"\n ],\n \"calls\": [\n {\n \"callSetId\": \"10473108253681171589-0\",\n \"callSetName\": \"CALLSET0\",\n \"genotype\": [\n 1,\n 1\n ],\n }\n ]\n }\n\nThis may be the desired outcome, but it is up to the user to determine if\nif that is indeed the case.", + "request": { + "$ref": "MergeVariantsRequest" + } }, "import": { - "id": "genomics.variants.import", + "httpMethod": "POST", + "parameterOrder": [], "response": { "$ref": "Operation" }, - "parameterOrder": [], - "description": "Creates variant data by asynchronously importing the provided information.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThe variants for import will be merged with any existing variant that\nmatches its reference sequence, start, end, reference bases, and\nalternative bases. If no such variant exists, a new one will be created.\n\nWhen variants are merged, the call information from the new variant\nis added to the existing variant, and Variant info fields are merged\nas specified in\ninfoMergeConfig.\nAs a special case, for single-sample VCF files, QUAL and FILTER fields will\nbe moved to the call level; these are sometimes interpreted in a\ncall-specific context.\nImported VCF headers are appended to the metadata already in a variant set.", - "request": { - "$ref": "ImportVariantsRequest" - }, - "flatPath": "v1/variants:import", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/variants:import", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/genomics" - ] + ], + "parameters": {}, + "flatPath": "v1/variants:import", + "id": "genomics.variants.import", + "path": "v1/variants:import", + "description": "Creates variant data by asynchronously importing the provided information.\n\nFor the definitions of variant sets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThe variants for import will be merged with any existing variant that\nmatches its reference sequence, start, end, reference bases, and\nalternative bases. If no such variant exists, a new one will be created.\n\nWhen variants are merged, the call information from the new variant\nis added to the existing variant, and Variant info fields are merged\nas specified in\ninfoMergeConfig.\nAs a special case, for single-sample VCF files, QUAL and FILTER fields will\nbe moved to the call level; these are sometimes interpreted in a\ncall-specific context.\nImported VCF headers are appended to the metadata already in a variant set.", + "request": { + "$ref": "ImportVariantsRequest" + } } } }, - "referencesets": { + "annotationsets": { "methods": { - "get": { - "id": "genomics.referencesets.get", + "delete": { + "description": "Deletes an annotation set. Caller must have WRITE permission\nfor the associated annotation set.", + "parameterOrder": [ + "annotationSetId" + ], + "httpMethod": "DELETE", "response": { - "$ref": "ReferenceSet" + "$ref": "Empty" }, - "parameterOrder": [ - "referenceSetId" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" ], - "description": "Gets a reference set.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.getReferenceSet](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L83).", - "flatPath": "v1/referencesets/{referenceSetId}", - "httpMethod": "GET", "parameters": { - "referenceSetId": { - "description": "The ID of the reference set.", + "annotationSetId": { + "description": "The ID of the annotation set to be deleted.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/referencesets/{referenceSetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] + "flatPath": "v1/annotationsets/{annotationSetId}", + "path": "v1/annotationsets/{annotationSetId}", + "id": "genomics.annotationsets.delete" }, "search": { - "id": "genomics.referencesets.search", - "response": { - "$ref": "SearchReferenceSetsResponse" - }, + "httpMethod": "POST", "parameterOrder": [], - "description": "Searches for reference sets which match the given criteria.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchReferenceSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L71)", - "request": { - "$ref": "SearchReferenceSetsRequest" + "response": { + "$ref": "SearchAnnotationSetsResponse" }, - "flatPath": "v1/referencesets/search", - "httpMethod": "POST", "parameters": {}, - "path": "v1/referencesets/search", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/genomics", "https://www.googleapis.com/auth/genomics.readonly" - ] - } - } - }, - "datasets": { - "methods": { - "getIamPolicy": { - "id": "genomics.datasets.getIamPolicy", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" ], - "description": "Gets the access control policy for the dataset. This is empty if the\npolicy or resource does not exist.\n\nSee \u003ca href=\"/iam/docs/managing-policies#getting_a_policy\"\u003eGetting a\nPolicy\u003c/a\u003e for more information.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "flatPath": "v1/annotationsets/search", + "id": "genomics.annotationsets.search", + "path": "v1/annotationsets/search", "request": { - "$ref": "GetIamPolicyRequest" - }, - "flatPath": "v1/datasets/{datasetsId}:getIamPolicy", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which policy is being specified. Format is\n`datasets/\u003cdataset ID\u003e`.", - "required": true, - "pattern": "^datasets/[^/]+$", - "location": "path", - "type": "string" - } + "$ref": "SearchAnnotationSetsRequest" }, - "path": "v1/{+resource}:getIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "description": "Searches for annotation sets that match the given criteria. Annotation sets\nare returned in an unspecified order. This order is consistent, such that\ntwo queries for the same content (regardless of page size) yield annotation\nsets in the same order across their respective streams of paginated\nresponses. Caller must have READ permission for the queried datasets." }, - "undelete": { - "id": "genomics.datasets.undelete", + "get": { "response": { - "$ref": "Dataset" + "$ref": "AnnotationSet" }, "parameterOrder": [ - "datasetId" + "annotationSetId" ], - "description": "Undeletes a dataset by restoring a dataset which was deleted via this API.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis operation is only possible for a week after the deletion occurred.", - "request": { - "$ref": "UndeleteDatasetRequest" - }, - "flatPath": "v1/datasets/{datasetId}:undelete", - "httpMethod": "POST", + "httpMethod": "GET", "parameters": { - "datasetId": { - "description": "The ID of the dataset to be undeleted.", + "annotationSetId": { + "description": "The ID of the annotation set to be retrieved.", "required": true, - "location": "path", - "type": "string" + "type": "string", + "location": "path" } }, - "path": "v1/datasets/{datasetId}:undelete", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "flatPath": "v1/annotationsets/{annotationSetId}", + "path": "v1/annotationsets/{annotationSetId}", + "id": "genomics.annotationsets.get", + "description": "Gets an annotation set. Caller must have READ permission for\nthe associated dataset." }, - "list": { - "id": "genomics.datasets.list", + "update": { + "httpMethod": "PUT", + "parameterOrder": [ + "annotationSetId" + ], "response": { - "$ref": "ListDatasetsResponse" + "$ref": "AnnotationSet" }, - "parameterOrder": [], - "description": "Lists datasets within a project.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/datasets", - "httpMethod": "GET", "parameters": { - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 50. The maximum value is 1024.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "projectId": { - "description": "Required. The Google Cloud project ID to list datasets for.", - "location": "query", - "type": "string" + "annotationSetId": { + "description": "The ID of the annotation set to be updated.", + "required": true, + "type": "string", + "location": "path" }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "updateMask": { "location": "query", + "description": "An optional mask specifying which fields to update. Mutable fields are\nname,\nsource_uri, and\ninfo. If unspecified, all\nmutable fields will be updated.", + "format": "google-fieldmask", "type": "string" } }, - "path": "v1/datasets", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] - }, - "get": { - "id": "genomics.datasets.get", - "response": { - "$ref": "Dataset" - }, - "parameterOrder": [ - "datasetId" + "https://www.googleapis.com/auth/genomics" ], - "description": "Gets a dataset by ID.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/datasets/{datasetId}", - "httpMethod": "GET", - "parameters": { - "datasetId": { - "description": "The ID of the dataset.", - "required": true, - "location": "path", - "type": "string" - } + "flatPath": "v1/annotationsets/{annotationSetId}", + "id": "genomics.annotationsets.update", + "path": "v1/annotationsets/{annotationSetId}", + "request": { + "$ref": "AnnotationSet" }, - "path": "v1/datasets/{datasetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics", - "https://www.googleapis.com/auth/genomics.readonly" - ] + "description": "Updates an annotation set. The update must respect all mutability\nrestrictions and other invariants described on the annotation set resource.\nCaller must have WRITE permission for the associated dataset." }, "create": { - "id": "genomics.datasets.create", + "httpMethod": "POST", + "parameterOrder": [], "response": { - "$ref": "Dataset" + "$ref": "AnnotationSet" }, - "parameterOrder": [], - "description": "Creates a new dataset.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": {}, + "flatPath": "v1/annotationsets", + "id": "genomics.annotationsets.create", + "path": "v1/annotationsets", + "description": "Creates a new annotation set. Caller must have WRITE permission for the\nassociated dataset.\n\nThe following fields are required:\n\n * datasetId\n * referenceSetId\n\nAll other fields may be optionally specified, unless documented as being\nserver-generated (for example, the `id` field).", "request": { - "$ref": "Dataset" + "$ref": "AnnotationSet" + } + } + } + }, + "references": { + "methods": { + "search": { + "response": { + "$ref": "SearchReferencesResponse" }, - "flatPath": "v1/datasets", + "parameterOrder": [], "httpMethod": "POST", "parameters": {}, - "path": "v1/datasets", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "flatPath": "v1/references/search", + "path": "v1/references/search", + "id": "genomics.references.search", + "request": { + "$ref": "SearchReferencesRequest" + }, + "description": "Searches for references which match the given criteria.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.searchReferences](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L146)." }, - "patch": { - "id": "genomics.datasets.patch", + "get": { + "id": "genomics.references.get", + "path": "v1/references/{referenceId}", + "description": "Gets a reference.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.getReference](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L158).", + "httpMethod": "GET", "response": { - "$ref": "Dataset" + "$ref": "Reference" }, "parameterOrder": [ - "datasetId" + "referenceId" ], - "description": "Updates a dataset.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nThis method supports patch semantics.", - "request": { - "$ref": "Dataset" - }, - "flatPath": "v1/datasets/{datasetId}", - "httpMethod": "PATCH", "parameters": { - "updateMask": { - "description": "An optional mask specifying which fields to update. At this time, the only\nmutable field is name. The only\nacceptable value is \"name\". If unspecified, all mutable fields will be\nupdated.", - "location": "query", - "type": "string", - "format": "google-fieldmask" - }, - "datasetId": { - "description": "The ID of the dataset to be updated.", - "required": true, + "referenceId": { "location": "path", + "description": "The ID of the reference.", + "required": true, "type": "string" } }, - "path": "v1/datasets/{datasetId}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "setIamPolicy": { - "id": "genomics.datasets.setIamPolicy", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "description": "Sets the access control policy on the specified dataset. Replaces any\nexisting policy.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nSee \u003ca href=\"/iam/docs/managing-policies#setting_a_policy\"\u003eSetting a\nPolicy\u003c/a\u003e for more information.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "flatPath": "v1/datasets/{datasetsId}:setIamPolicy", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which policy is being specified. Format is\n`datasets/\u003cdataset ID\u003e`.", - "required": true, - "pattern": "^datasets/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+resource}:setIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "delete": { - "id": "genomics.datasets.delete", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "datasetId" - ], - "description": "Deletes a dataset and all of its contents (all read group sets,\nreference sets, variant sets, call sets, annotation sets, etc.)\nThis is reversible (up to one week after the deletion) via\nthe\ndatasets.undelete\noperation.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "flatPath": "v1/datasets/{datasetId}", - "httpMethod": "DELETE", - "parameters": { - "datasetId": { - "description": "The ID of the dataset to be deleted.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/datasets/{datasetId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "testIamPermissions": { - "id": "genomics.datasets.testIamPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" ], - "description": "Returns permissions that a caller has on the specified resource.\nSee \u003ca href=\"/iam/docs/managing-policies#testing_permissions\"\u003eTesting\nPermissions\u003c/a\u003e for more information.\n\nFor the definitions of datasets and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1/datasets/{datasetsId}:testIamPermissions", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which policy is being specified. Format is\n`datasets/\u003cdataset ID\u003e`.", - "required": true, - "pattern": "^datasets/[^/]+$", - "location": "path", - "type": "string" + "flatPath": "v1/references/{referenceId}" + } + }, + "resources": { + "bases": { + "methods": { + "list": { + "httpMethod": "GET", + "response": { + "$ref": "ListBasesResponse" + }, + "parameterOrder": [ + "referenceId" + ], + "parameters": { + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string", + "location": "query" + }, + "pageSize": { + "description": "The maximum number of bases to return in a single page. If unspecified,\ndefaults to 200Kbp (kilo base pairs). The maximum value is 10Mbp (mega base\npairs).", + "format": "int32", + "type": "integer", + "location": "query" + }, + "start": { + "location": "query", + "description": "The start position (0-based) of this query. Defaults to 0.", + "format": "int64", + "type": "string" + }, + "referenceId": { + "location": "path", + "description": "The ID of the reference.", + "required": true, + "type": "string" + }, + "end": { + "description": "The end position (0-based, exclusive) of this query. Defaults to the length\nof this reference.", + "format": "int64", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics", + "https://www.googleapis.com/auth/genomics.readonly" + ], + "flatPath": "v1/references/{referenceId}/bases", + "id": "genomics.references.bases.list", + "path": "v1/references/{referenceId}/bases", + "description": "Lists the bases in a reference, optionally restricted to a range.\n\nFor the definitions of references and other genomics resources, see\n[Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nImplements\n[GlobalAllianceApi.getReferenceBases](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L221)." } - }, - "path": "v1/{+resource}:testIamPermissions", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + } } } } }, - "schemas": { - "SearchReferencesRequest": { - "type": "object", - "properties": { - "referenceSetId": { - "description": "If present, return only references which belong to this reference set.", - "type": "string" - }, - "md5checksums": { - "description": "If present, return references for which the\nmd5checksum matches exactly.", - "type": "array", - "items": { - "type": "string" - } - }, - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024. The maximum value is 4096.", - "type": "integer", - "format": "int32" - }, - "accessions": { - "description": "If present, return references for which a prefix of any of\nsourceAccessions match\nany of these strings. Accession numbers typically have a main number and a\nversion, for example `GCF_000001405.26`.", - "type": "array", - "items": { - "type": "string" - } - }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", - "type": "string" - } - }, - "id": "SearchReferencesRequest" + "parameters": { + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" }, - "ReadGroupSet": { - "description": "A read group set is a logical collection of read groups, which are\ncollections of reads produced by a sequencer. A read group set typically\nmodels reads corresponding to one sample, sequenced one way, and aligned one\nway.\n\n* A read group set belongs to one dataset.\n* A read group belongs to one read group set.\n* A read belongs to one read group.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "type": "object", - "properties": { - "id": { - "description": "The server-generated read group set ID, unique for all read group sets.", - "type": "string" - }, - "info": { - "description": "A map of additional read group set information.", - "additionalProperties": { - "type": "array", - "items": { - "type": "any" - } - }, - "type": "object" - }, - "datasetId": { - "description": "The dataset to which this read group set belongs.", - "type": "string" - }, - "filename": { - "description": "The filename of the original source file for this read group set, if any.", - "type": "string" - }, - "name": { - "description": "The read group set name. By default this will be initialized to the sample\nname of the sequenced data contained in this set.", - "type": "string" - }, - "referenceSetId": { - "description": "The reference set to which the reads in this read group set are aligned.", - "type": "string" - }, - "readGroups": { - "description": "The read groups in this set. There are typically 1-10 read groups in a read\ngroup set.", - "type": "array", - "items": { - "$ref": "ReadGroup" - } - } - }, - "id": "ReadGroupSet" + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" }, - "ExportVariantSetRequest": { - "description": "The variant data export request.", - "type": "object", - "properties": { - "callSetIds": { - "description": "If provided, only variant call information from the specified call sets\nwill be exported. By default all variant calls are exported.", - "type": "array", - "items": { - "type": "string" - } - }, - "bigqueryDataset": { - "description": "Required. The BigQuery dataset to export data to. This dataset must already\nexist. Note that this is distinct from the Genomics concept of \"dataset\".", - "type": "string" - }, - "bigqueryTable": { - "description": "Required. The BigQuery table to export data to.\nIf the table doesn't exist, it will be created. If it already exists, it\nwill be overwritten.", - "type": "string" - }, - "projectId": { - "description": "Required. The Google Cloud project ID that owns the destination\nBigQuery dataset. The caller must have WRITE access to this project. This\nproject will also own the resulting export job.", - "type": "string" - }, - "format": { - "description": "The format for the exported data.", - "enum": [ - "FORMAT_UNSPECIFIED", - "FORMAT_BIGQUERY" - ], - "enumDescriptions": [ - "", - "Export the data to Google BigQuery." - ], - "type": "string" - } - }, - "id": "ExportVariantSetRequest" + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" }, - "ComputeEngine": { - "description": "Describes a Compute Engine resource that is being managed by a running\npipeline.", - "type": "object", - "properties": { - "instanceName": { - "description": "The instance on which the operation is running.", - "type": "string" - }, - "machineType": { - "description": "The machine type of the instance.", - "type": "string" - }, - "zone": { - "description": "The availability zone in which the instance resides.", - "type": "string" - }, - "diskNames": { - "description": "The names of the disks that were created for this pipeline.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "ComputeEngine" + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "CodingSequence": { - "type": "object", - "properties": { - "end": { - "description": "The end of the coding sequence on this annotation's reference sequence,\n0-based exclusive. Note that this position is relative to the reference\nstart, and *not* the containing annotation start.", - "type": "string", - "format": "int64" - }, - "start": { - "description": "The start of the coding sequence on this annotation's reference sequence,\n0-based inclusive. Note that this position is relative to the reference\nstart, and *not* the containing annotation start.", - "type": "string", - "format": "int64" - } - }, - "id": "CodingSequence" + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, - "Transcript": { - "description": "A transcript represents the assertion that a particular region of the\nreference genome may be transcribed as RNA.", - "type": "object", - "properties": { - "codingSequence": { - "description": "The range of the coding sequence for this transcript, if any. To determine\nthe exact ranges of coding sequence, intersect this range with those of the\nexons, if any. If there are any\nexons, the\ncodingSequence must start\nand end within them.\n\nNote that in some cases, the reference genome will not exactly match the\nobserved mRNA transcript e.g. due to variance in the source genome from\nreference. In these cases,\nexon.frame will not necessarily\nmatch the expected reference reading frame and coding exon reference bases\ncannot necessarily be concatenated to produce the original transcript mRNA.", - "$ref": "CodingSequence" - }, - "geneId": { - "description": "The annotation ID of the gene from which this transcript is transcribed.", - "type": "string" - }, - "exons": { - "description": "The \u003ca href=\"http://en.wikipedia.org/wiki/Exon\"\u003eexons\u003c/a\u003e that compose\nthis transcript. This field should be unset for genomes where transcript\nsplicing does not occur, for example prokaryotes.\n\nIntrons are regions of the transcript that are not included in the\nspliced RNA product. Though not explicitly modeled here, intron ranges can\nbe deduced; all regions of this transcript that are not exons are introns.\n\nExonic sequences do not necessarily code for a translational product\n(amino acids). Only the regions of exons bounded by the\ncodingSequence correspond\nto coding DNA sequence.\n\nExons are ordered by start position and may not overlap.", - "type": "array", - "items": { - "$ref": "Exon" - } - } - }, - "id": "Transcript" + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "$.xgafv": { + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query" + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + } + }, + "schemas": { "ListOperationsResponse": { "description": "The response message for Operations.ListOperations.", "type": "object", @@ -1712,543 +1607,400 @@ }, "id": "ListOperationsResponse" }, - "Entry": { - "type": "object", - "properties": { - "annotation": { - "description": "The created annotation, if creation was successful.", - "$ref": "Annotation" - }, - "status": { - "description": "The creation status.", - "$ref": "Status" - } - }, - "id": "Entry" - }, - "ClinicalCondition": { + "Variant": { + "description": "A variant represents a change in DNA sequence relative to a reference\nsequence. For example, a variant could represent a SNP or an insertion.\nVariants belong to a variant set.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nEach of the calls on a variant represent a determination of genotype with\nrespect to that variant. For example, a call might assign probability of 0.32\nto the occurrence of a SNP named rs1234 in a sample named NA12345. A call\nbelongs to a call set, which contains related calls typically from one\nsample.", "type": "object", "properties": { - "externalIds": { - "description": "The set of external IDs for this condition.", - "type": "array", - "items": { - "$ref": "ExternalId" - } - }, - "names": { - "description": "A set of names for the condition.", - "type": "array", - "items": { - "type": "string" - } - }, - "omimId": { - "description": "The OMIM id for this condition.\nSearch for these IDs at http://omim.org/", + "variantSetId": { + "description": "The ID of the variant set this variant belongs to.", "type": "string" }, - "conceptId": { - "description": "The MedGen concept id associated with this gene.\nSearch for these IDs at http://www.ncbi.nlm.nih.gov/medgen/", - "type": "string" - } - }, - "id": "ClinicalCondition" - }, - "CancelOperationRequest": { - "description": "The request message for Operations.CancelOperation.", - "type": "object", - "properties": {}, - "id": "CancelOperationRequest" - }, - "SearchVariantsRequest": { - "description": "The variant search request.", - "type": "object", - "properties": { "referenceName": { - "description": "Required. Only return variants in this reference sequence.", - "type": "string" - }, - "variantSetIds": { - "description": "At most one variant set ID must be provided. Only variants from this\nvariant set will be returned. If omitted, a call set id must be included in\nthe request.", - "type": "array", - "items": { - "type": "string" - } - }, - "maxCalls": { - "description": "The maximum number of calls to return in a single page. Note that this\nlimit may be exceeded in the event that a matching variant contains more\ncalls than the requested maximum. If unspecified, defaults to 5000. The\nmaximum value is 10000.", - "type": "integer", - "format": "int32" - }, - "pageSize": { - "description": "The maximum number of variants to return in a single page. If unspecified,\ndefaults to 5000. The maximum value is 10000.", - "type": "integer", - "format": "int32" - }, - "start": { - "description": "The beginning of the window (0-based, inclusive) for which\noverlapping variants should be returned. If unspecified, defaults to 0.", - "type": "string", - "format": "int64" - }, - "callSetIds": { - "description": "Only return variant calls which belong to call sets with these ids.\nLeaving this blank returns all variant calls. If a variant has no\ncalls belonging to any of these call sets, it won't be returned at all.", - "type": "array", - "items": { - "type": "string" - } - }, - "variantName": { - "description": "Only return variants which have exactly this name.", - "type": "string" - }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", - "type": "string" - }, - "end": { - "description": "The end of the window, 0-based exclusive. If unspecified or 0, defaults to\nthe length of the reference.", - "type": "string", - "format": "int64" - } - }, - "id": "SearchVariantsRequest" - }, - "VariantSetMetadata": { - "description": "Metadata describes a single piece of variant call metadata.\nThese data include a top level key and either a single value string (value)\nor a list of key-value pairs (info.)\nValue and info are mutually exclusive.", - "type": "object", - "properties": { - "id": { - "description": "User-provided ID field, not enforced by this API.\nTwo or more pieces of structured metadata with identical\nid and key fields are considered equivalent.", - "type": "string" - }, - "description": { - "description": "A textual description of this metadata.", + "description": "The reference on which this variant occurs.\n(such as `chr20` or `X`)", "type": "string" }, "info": { - "description": "Remaining structured metadata key-value pairs. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "additionalProperties": { "type": "array", "items": { "type": "any" } }, + "description": "A map of additional variant information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "type": "object" }, - "key": { - "description": "The top-level key.", - "type": "string" - }, - "value": { - "description": "The value field for simple metadata", - "type": "string" - }, - "number": { - "description": "The number of values that can be included in a field described by this\nmetadata.", + "referenceBases": { + "description": "The reference bases for this variant. They start at the given\nposition.", "type": "string" }, - "type": { - "description": "The type of data. Possible types include: Integer, Float,\nFlag, Character, and String.", - "enum": [ - "TYPE_UNSPECIFIED", - "INTEGER", - "FLOAT", - "FLAG", - "CHARACTER", - "STRING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "id": "VariantSetMetadata" - }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", - "type": "object", - "properties": { - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "names": { + "description": "Names for the variant, for example a RefSNP ID.", "type": "array", "items": { - "$ref": "Binding" + "type": "string" } }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "type": "string", - "format": "byte" - }, - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer", - "format": "int32" - } - }, - "id": "Policy" - }, - "Read": { - "description": "A read alignment describes a linear alignment of a string of DNA to a\nreference sequence, in addition to metadata\nabout the fragment (the molecule of DNA sequenced) and the read (the bases\nwhich were read by the sequencer). A read is equivalent to a line in a SAM\nfile. A read belongs to exactly one read group and exactly one\nread group set.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\n### Reverse-stranded reads\n\nMapped reads (reads having a non-null `alignment`) can be aligned to either\nthe forward or the reverse strand of their associated reference. Strandedness\nof a mapped read is encoded by `alignment.position.reverseStrand`.\n\nIf we consider the reference to be a forward-stranded coordinate space of\n`[0, reference.length)` with `0` as the left-most position and\n`reference.length` as the right-most position, reads are always aligned left\nto right. That is, `alignment.position.position` always refers to the\nleft-most reference coordinate and `alignment.cigar` describes the alignment\nof this read to the reference from left to right. All per-base fields such as\n`alignedSequence` and `alignedQuality` share this same left-to-right\norientation; this is true of reads which are aligned to either strand. For\nreverse-stranded reads, this means that `alignedSequence` is the reverse\ncomplement of the bases that were originally reported by the sequencing\nmachine.\n\n### Generating a reference-aligned sequence string\n\nWhen interacting with mapped reads, it's often useful to produce a string\nrepresenting the local alignment of the read to reference. The following\npseudocode demonstrates one way of doing this:\n\n out = \"\"\n offset = 0\n for c in read.alignment.cigar {\n switch c.operation {\n case \"ALIGNMENT_MATCH\", \"SEQUENCE_MATCH\", \"SEQUENCE_MISMATCH\":\n out += read.alignedSequence[offset:offset+c.operationLength]\n offset += c.operationLength\n break\n case \"CLIP_SOFT\", \"INSERT\":\n offset += c.operationLength\n break\n case \"PAD\":\n out += repeat(\"*\", c.operationLength)\n break\n case \"DELETE\":\n out += repeat(\"-\", c.operationLength)\n break\n case \"SKIP\":\n out += repeat(\" \", c.operationLength)\n break\n case \"CLIP_HARD\":\n break\n }\n }\n return out\n\n### Converting to SAM's CIGAR string\n\nThe following pseudocode generates a SAM CIGAR string from the\n`cigar` field. Note that this is a lossy conversion\n(`cigar.referenceSequence` is lost).\n\n cigarMap = {\n \"ALIGNMENT_MATCH\": \"M\",\n \"INSERT\": \"I\",\n \"DELETE\": \"D\",\n \"SKIP\": \"N\",\n \"CLIP_SOFT\": \"S\",\n \"CLIP_HARD\": \"H\",\n \"PAD\": \"P\",\n \"SEQUENCE_MATCH\": \"=\",\n \"SEQUENCE_MISMATCH\": \"X\",\n }\n cigarStr = \"\"\n for c in read.alignment.cigar {\n cigarStr += c.operationLength + cigarMap[c.operation]\n }\n return cigarStr", - "type": "object", - "properties": { - "id": { - "description": "The server-generated read ID, unique across all reads. This is different\nfrom the `fragmentName`.", - "type": "string" - }, - "alignment": { - "description": "The linear alignment for this alignment record. This field is null for\nunmapped reads.", - "$ref": "LinearAlignment" - }, - "failedVendorQualityChecks": { - "description": "Whether this read did not pass filters, such as platform or vendor quality\ncontrols (SAM flag 0x200).", - "type": "boolean" - }, - "supplementaryAlignment": { - "description": "Whether this alignment is supplementary. Equivalent to SAM flag 0x800.\nSupplementary alignments are used in the representation of a chimeric\nalignment. In a chimeric alignment, a read is split into multiple\nlinear alignments that map to different reference contigs. The first\nlinear alignment in the read will be designated as the representative\nalignment; the remaining linear alignments will be designated as\nsupplementary alignments. These alignments may have different mapping\nquality scores. In each linear alignment in a chimeric alignment, the read\nwill be hard clipped. The `alignedSequence` and\n`alignedQuality` fields in the alignment record will only\nrepresent the bases for its respective linear alignment.", - "type": "boolean" - }, - "numberReads": { - "description": "The number of reads in the fragment (extension to SAM flag 0x1).", - "type": "integer", - "format": "int32" - }, - "info": { - "description": "A map of additional read alignment information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", - "additionalProperties": { - "type": "array", - "items": { - "type": "any" - } - }, - "type": "object" - }, - "properPlacement": { - "description": "The orientation and the distance between reads from the fragment are\nconsistent with the sequencing protocol (SAM flag 0x2).", - "type": "boolean" - }, - "readGroupId": { - "description": "The ID of the read group this read belongs to. A read belongs to exactly\none read group. This is a server-generated ID which is distinct from SAM's\nRG tag (for that value, see\nReadGroup.name).", - "type": "string" - }, - "fragmentName": { - "description": "The fragment name. Equivalent to QNAME (query template name) in SAM.", - "type": "string" - }, - "nextMatePosition": { - "description": "The mapping of the primary alignment of the\n`(readNumber+1)%numberReads` read in the fragment. It replaces\nmate position and mate strand in SAM.", - "$ref": "Position" - }, - "secondaryAlignment": { - "description": "Whether this alignment is secondary. Equivalent to SAM flag 0x100.\nA secondary alignment represents an alternative to the primary alignment\nfor this read. Aligners may return secondary alignments if a read can map\nambiguously to multiple coordinates in the genome. By convention, each read\nhas one and only one alignment where both `secondaryAlignment`\nand `supplementaryAlignment` are false.", - "type": "boolean" - }, - "alignedQuality": { - "description": "The quality of the read sequence contained in this alignment record\n(equivalent to QUAL in SAM).\n`alignedSequence` and `alignedQuality` may be shorter than the full read\nsequence and quality. This will occur if the alignment is part of a\nchimeric alignment, or if the read was trimmed. When this occurs, the CIGAR\nfor this read will begin/end with a hard clip operator that will indicate\nthe length of the excised sequence.", + "alternateBases": { + "description": "The bases that appear instead of the reference bases.", "type": "array", "items": { - "type": "integer", - "format": "int32" + "type": "string" } }, - "readGroupSetId": { - "description": "The ID of the read group set this read belongs to. A read belongs to\nexactly one read group set.", - "type": "string" - }, - "fragmentLength": { - "description": "The observed length of the fragment, equivalent to TLEN in SAM.", - "type": "integer", - "format": "int32" - }, - "duplicateFragment": { - "description": "The fragment is a PCR or optical duplicate (SAM flag 0x400).", - "type": "boolean" - }, - "readNumber": { - "description": "The read number in sequencing. 0-based and less than numberReads. This\nfield replaces SAM flag 0x40 and 0x80.", - "type": "integer", - "format": "int32" - }, - "alignedSequence": { - "description": "The bases of the read sequence contained in this alignment record,\n**without CIGAR operations applied** (equivalent to SEQ in SAM).\n`alignedSequence` and `alignedQuality` may be\nshorter than the full read sequence and quality. This will occur if the\nalignment is part of a chimeric alignment, or if the read was trimmed. When\nthis occurs, the CIGAR for this read will begin/end with a hard clip\noperator that will indicate the length of the excised sequence.", - "type": "string" - } - }, - "id": "Read" - }, - "SearchReadsRequest": { - "description": "The read search request.", - "type": "object", - "properties": { - "referenceName": { - "description": "The reference sequence name, for example `chr1`, `1`, or `chrX`. If set to\n`*`, only unmapped reads are returned. If unspecified, all reads (mapped\nand unmapped) are returned.", - "type": "string" - }, - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 256. The maximum value is 2048.", - "type": "integer", - "format": "int32" + "end": { + "description": "The end position (0-based) of this variant. This corresponds to the first\nbase after the last base in the reference allele. So, the length of\nthe reference allele is (end - start). This is useful for variants\nthat don't explicitly give alternate bases, for example large deletions.", + "format": "int64", + "type": "string" }, - "readGroupSetIds": { - "description": "The IDs of the read groups sets within which to search for reads. All\nspecified read group sets must be aligned against a common set of reference\nsequences; this defines the genomic coordinates for the query. Must specify\none of `readGroupSetIds` or `readGroupIds`.", + "filter": { + "description": "A list of filters (normally quality filters) this variant has failed.\n`PASS` indicates this variant has passed all filters.", "type": "array", "items": { "type": "string" } }, - "start": { - "description": "The start position of the range on the reference, 0-based inclusive. If\nspecified, `referenceName` must also be specified.", - "type": "string", - "format": "int64" - }, - "readGroupIds": { - "description": "The IDs of the read groups within which to search for reads. All specified\nread groups must belong to the same read group sets. Must specify one of\n`readGroupSetIds` or `readGroupIds`.", + "calls": { + "description": "The variant calls for this particular variant. Each one represents the\ndetermination of genotype with respect to this variant.", "type": "array", "items": { - "type": "string" + "$ref": "VariantCall" } }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "created": { + "description": "The date this variant was created, in milliseconds from the epoch.", + "format": "int64", "type": "string" }, - "end": { - "description": "The end position of the range on the reference, 0-based exclusive. If\nspecified, `referenceName` must also be specified.", - "type": "string", - "format": "int64" - } - }, - "id": "SearchReadsRequest" - }, - "UndeleteDatasetRequest": { - "type": "object", - "properties": {}, - "id": "UndeleteDatasetRequest" - }, - "Exon": { - "type": "object", - "properties": { - "end": { - "description": "The end position of the exon on this annotation's reference sequence,\n0-based exclusive. Note that this is relative to the reference start, and\n*not* the containing annotation start.", - "type": "string", - "format": "int64" - }, "start": { - "description": "The start position of the exon on this annotation's reference sequence,\n0-based inclusive. Note that this is relative to the reference start, and\n**not** the containing annotation start.", - "type": "string", - "format": "int64" + "description": "The position at which this variant occurs (0-based).\nThis corresponds to the first base of the string of reference bases.", + "format": "int64", + "type": "string" }, - "frame": { - "description": "The frame of this exon. Contains a value of 0, 1, or 2, which indicates\nthe offset of the first coding base of the exon within the reading frame\nof the coding DNA sequence, if any. This field is dependent on the\nstrandedness of this annotation (see\nAnnotation.reverse_strand).\nFor forward stranded annotations, this offset is relative to the\nexon.start. For reverse\nstrand annotations, this offset is relative to the\nexon.end `- 1`.\n\nUnset if this exon does not intersect the coding sequence. Upon creation\nof a transcript, the frame must be populated for all or none of the\ncoding exons.", - "type": "integer", - "format": "int32" + "quality": { + "description": "A measure of how likely this variant is to be real.\nA higher value is better.", + "format": "double", + "type": "number" + }, + "id": { + "description": "The server-generated variant ID, unique across all variants.", + "type": "string" } }, - "id": "Exon" - }, - "GetIamPolicyRequest": { - "description": "Request message for `GetIamPolicy` method.", - "type": "object", - "properties": {}, - "id": "GetIamPolicyRequest" + "id": "Variant" }, - "ReferenceBound": { - "description": "ReferenceBound records an upper bound for the starting coordinate of\nvariants in a particular reference.", + "SearchCallSetsResponse": { + "description": "The call set search response.", "type": "object", "properties": { - "referenceName": { - "description": "The name of the reference associated with this reference bound.", + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", "type": "string" }, - "upperBound": { - "description": "An upper bound (inclusive) on the starting coordinate of any\nvariant in the reference sequence.", - "type": "string", - "format": "int64" + "callSets": { + "description": "The list of matching call sets.", + "type": "array", + "items": { + "$ref": "CallSet" + } } }, - "id": "ReferenceBound" + "id": "SearchCallSetsResponse" }, - "LinearAlignment": { - "description": "A linear alignment can be represented by one CIGAR string. Describes the\nmapped position and local alignment of the read to the reference.", - "type": "object", + "SearchVariantsRequest": { "properties": { - "position": { - "description": "The position of this alignment.", - "$ref": "Position" + "referenceName": { + "description": "Required. Only return variants in this reference sequence.", + "type": "string" }, - "mappingQuality": { - "description": "The mapping quality of this alignment. Represents how likely\nthe read maps to this position as opposed to other locations.\n\nSpecifically, this is -10 log10 Pr(mapping position is wrong), rounded to\nthe nearest integer.", - "type": "integer", - "format": "int32" + "variantSetIds": { + "description": "At most one variant set ID must be provided. Only variants from this\nvariant set will be returned. If omitted, a call set id must be included in\nthe request.", + "type": "array", + "items": { + "type": "string" + } }, - "cigar": { - "description": "Represents the local alignment of this sequence (alignment matches, indels,\netc) against the reference.", + "end": { + "description": "The end of the window, 0-based exclusive. If unspecified or 0, defaults to\nthe length of the reference.", + "format": "int64", + "type": "string" + }, + "maxCalls": { + "description": "The maximum number of calls to return in a single page. Note that this\nlimit may be exceeded in the event that a matching variant contains more\ncalls than the requested maximum. If unspecified, defaults to 5000. The\nmaximum value is 10000.", + "format": "int32", + "type": "integer" + }, + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of variants to return in a single page. If unspecified,\ndefaults to 5000. The maximum value is 10000.", + "format": "int32", + "type": "integer" + }, + "callSetIds": { + "description": "Only return variant calls which belong to call sets with these ids.\nLeaving this blank returns all variant calls. If a variant has no\ncalls belonging to any of these call sets, it won't be returned at all.", "type": "array", "items": { - "$ref": "CigarUnit" + "type": "string" } + }, + "start": { + "description": "The beginning of the window (0-based, inclusive) for which\noverlapping variants should be returned. If unspecified, defaults to 0.", + "format": "int64", + "type": "string" + }, + "variantName": { + "description": "Only return variants which have exactly this name.", + "type": "string" } }, - "id": "LinearAlignment" + "id": "SearchVariantsRequest", + "description": "The variant search request.", + "type": "object" }, - "Annotation": { - "description": "An annotation describes a region of reference genome. The value of an\nannotation may be one of several canonical types, supplemented by arbitrary\ninfo tags. An annotation is not inherently associated with a specific\nsample or individual (though a client could choose to use annotations in\nthis way). Example canonical annotation types are `GENE` and\n`VARIANT`.", - "type": "object", + "OperationMetadata": { "properties": { - "id": { - "description": "The server-generated annotation ID, unique across all annotations.", + "startTime": { + "description": "The time at which the job began to run.", + "format": "google-datetime", "type": "string" }, - "referenceName": { - "description": "The display name corresponding to the reference specified by\n`referenceId`, for example `chr1`, `1`, or `chrX`.", - "type": "string" + "request": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The original request that started the operation. Note that this will be in\ncurrent version of the API. If the operation was started with v1beta2 API\nand a GetOperation is performed on v1 API, a v1 request will be returned.", + "type": "object" }, - "info": { - "description": "A map of additional read alignment information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", + "runtimeMetadata": { "additionalProperties": { - "type": "array", - "items": { - "type": "any" - } + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" }, + "description": "Runtime metadata on this Operation.", "type": "object" }, - "referenceId": { - "description": "The ID of the Google Genomics reference associated with this range.", + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optionally provided by the caller when submitting the request that creates\nthe operation.", + "type": "object" + }, + "createTime": { + "description": "The time at which the job was submitted to the Genomics service.", + "format": "google-datetime", "type": "string" }, - "variant": { - "description": "A variant annotation, which describes the effect of a variant on the\ngenome, the coding sequence, and/or higher level consequences at the\norganism level e.g. pathogenicity. This field is only set for annotations\nof type `VARIANT`.", - "$ref": "VariantAnnotation" + "projectId": { + "description": "The Google Cloud Project in which the job is scoped.", + "type": "string" }, - "annotationSetId": { - "description": "The annotation set to which this annotation belongs.", + "clientId": { + "description": "This field is deprecated. Use `labels` instead. Optionally provided by the\ncaller when submitting the request that creates the operation.", + "type": "string" + }, + "endTime": { + "description": "The time at which the job stopped running.", + "format": "google-datetime", "type": "string" }, + "events": { + "description": "Optional event messages that were generated during the job's execution.\nThis also contains any warnings that were generated during import\nor export.", + "type": "array", + "items": { + "$ref": "OperationEvent" + } + } + }, + "id": "OperationMetadata", + "description": "Metadata describing an Operation.", + "type": "object" + }, + "SearchReadGroupSetsRequest": { + "description": "The read group set search request.", + "type": "object", + "properties": { "name": { - "description": "The display name of this annotation.", + "description": "Only return read group sets for which a substring of the name matches this\nstring.", "type": "string" }, - "transcript": { - "description": "A transcript value represents the assertion that a particular region of\nthe reference genome may be transcribed as RNA. An alternative splicing\npattern would be represented as a separate transcript object. This field\nis only set for annotations of type `TRANSCRIPT`.", - "$ref": "Transcript" + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string" }, - "start": { - "description": "The start position of the range on the reference, 0-based inclusive.", - "type": "string", - "format": "int64" + "pageSize": { + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 256. The maximum value is 1024.", + "format": "int32", + "type": "integer" }, - "reverseStrand": { - "description": "Whether this range refers to the reverse strand, as opposed to the forward\nstrand. Note that regardless of this field, the start/end position of the\nrange always refer to the forward strand.", - "type": "boolean" + "datasetIds": { + "description": "Restricts this query to read group sets within the given datasets. At least\none ID must be provided.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "SearchReadGroupSetsRequest" + }, + "SearchAnnotationsResponse": { + "properties": { + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "type": "string" }, - "end": { - "description": "The end position of the range on the reference, 0-based exclusive.", - "type": "string", - "format": "int64" + "annotations": { + "description": "The matching annotations.", + "type": "array", + "items": { + "$ref": "Annotation" + } + } + }, + "id": "SearchAnnotationsResponse", + "type": "object" + }, + "SearchReadsResponse": { + "properties": { + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "type": "string" }, - "type": { - "description": "The data type for this annotation. Must match the containing annotation\nset's type.", - "enum": [ - "ANNOTATION_TYPE_UNSPECIFIED", - "GENERIC", - "VARIANT", - "GENE", - "TRANSCRIPT" - ], - "enumDescriptions": [ - "", - "A `GENERIC` annotation type should be used when no other annotation\ntype will suffice. This represents an untyped annotation of the reference\ngenome.", - "A `VARIANT` annotation type.", - "A `GENE` annotation type represents the existence of a gene at the\nassociated reference coordinates. The start coordinate is typically the\ngene's transcription start site and the end is typically the end of the\ngene's last exon.", - "A `TRANSCRIPT` annotation type represents the assertion that a\nparticular region of the reference genome may be transcribed as RNA." - ], + "alignments": { + "description": "The list of matching alignments sorted by mapped genomic coordinate,\nif any, ascending in position within the same reference. Unmapped reads,\nwhich have no position, are returned contiguously and are sorted in\nascending lexicographic order by fragment name.", + "type": "array", + "items": { + "$ref": "Read" + } + } + }, + "id": "SearchReadsResponse", + "description": "The read search response.", + "type": "object" + }, + "ClinicalCondition": { + "properties": { + "conceptId": { + "description": "The MedGen concept id associated with this gene.\nSearch for these IDs at http://www.ncbi.nlm.nih.gov/medgen/", + "type": "string" + }, + "names": { + "description": "A set of names for the condition.", + "type": "array", + "items": { + "type": "string" + } + }, + "omimId": { + "description": "The OMIM id for this condition.\nSearch for these IDs at http://omim.org/", "type": "string" + }, + "externalIds": { + "description": "The set of external IDs for this condition.", + "type": "array", + "items": { + "$ref": "ExternalId" + } } }, - "id": "Annotation" + "id": "ClinicalCondition", + "type": "object" }, - "Experiment": { + "Program": { "type": "object", "properties": { - "instrumentModel": { - "description": "The instrument model used as part of this experiment. This maps to\nsequencing technology in the SAM spec.", + "commandLine": { + "description": "The command line used to run this program.", "type": "string" }, - "sequencingCenter": { - "description": "The sequencing center used as part of this experiment.", + "prevProgramId": { + "description": "The ID of the program run before this one.", "type": "string" }, - "libraryId": { - "description": "A client-supplied library identifier; a library is a collection of DNA\nfragments which have been prepared for sequencing from a sample. This\nfield is important for quality control as error or bias can be introduced\nduring sample preparation.", + "id": { + "description": "The user specified locally unique ID of the program. Used along with\n`prevProgramId` to define an ordering between programs.", "type": "string" }, - "platformUnit": { - "description": "The platform unit used as part of this experiment, for example\nflowcell-barcode.lane for Illumina or slide for SOLiD. Corresponds to the\n@RG PU field in the SAM spec.", + "version": { + "description": "The version of the program run.", + "type": "string" + }, + "name": { + "description": "The display name of the program. This is typically the colloquial name of\nthe tool used, for example 'bwa' or 'picard'.", "type": "string" } }, - "id": "Experiment" + "id": "Program" }, - "VariantCall": { - "description": "A call represents the determination of genotype with respect to a particular\nvariant. It may include associated information such as quality and phasing.\nFor example, a call might assign a probability of 0.32 to the occurrence of\na SNP named rs1234 in a call set with the name NA12345.", - "type": "object", + "ComputeEngine": { "properties": { - "callSetName": { - "description": "The name of the call set this variant call belongs to.", + "machineType": { + "description": "The machine type of the instance.", "type": "string" }, - "info": { - "description": "A map of additional variant call information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", - "additionalProperties": { - "type": "array", - "items": { - "type": "any" - } - }, - "type": "object" + "diskNames": { + "description": "The names of the disks that were created for this pipeline.", + "type": "array", + "items": { + "type": "string" + } }, - "callSetId": { - "description": "The ID of the call set this variant call belongs to.", + "instanceName": { + "description": "The instance on which the operation is running.", "type": "string" }, - "phaseset": { - "description": "If this field is present, this variant call's genotype ordering implies\nthe phase of the bases and is consistent with any other variant calls in\nthe same reference sequence which have the same phaseset value.\nWhen importing data from VCF, if the genotype data was phased but no\nphase set was specified this field will be set to `*`.", + "zone": { + "description": "The availability zone in which the instance resides.", "type": "string" + } + }, + "id": "ComputeEngine", + "description": "Describes a Compute Engine resource that is being managed by a running\npipeline.", + "type": "object" + }, + "CoverageBucket": { + "properties": { + "meanCoverage": { + "description": "The average number of reads which are aligned to each individual\nreference base in this bucket.", + "format": "float", + "type": "number" }, - "genotype": { - "description": "The genotype of this variant call. Each value represents either the value\nof the `referenceBases` field or a 1-based index into\n`alternateBases`. If a variant had a `referenceBases`\nvalue of `T` and an `alternateBases`\nvalue of `[\"A\", \"C\"]`, and the `genotype` was\n`[2, 1]`, that would mean the call\nrepresented the heterozygous value `CA` for this variant.\nIf the `genotype` was instead `[0, 1]`, the\nrepresented value would be `TA`. Ordering of the\ngenotype values is important if the `phaseset` is present.\nIf a genotype is not called (that is, a `.` is present in the\nGT string) -1 is returned.", - "type": "array", - "items": { - "type": "integer", - "format": "int32" - } + "range": { + "$ref": "Range", + "description": "The genomic coordinate range spanned by this bucket." + } + }, + "id": "CoverageBucket", + "description": "A bucket over which read coverage has been precomputed. A bucket corresponds\nto a specific range of the reference sequence.", + "type": "object" + }, + "ExternalId": { + "properties": { + "sourceName": { + "description": "The name of the source of this data.", + "type": "string" }, - "genotypeLikelihood": { - "description": "The genotype likelihoods for this variant call. Each array entry\nrepresents how likely a specific genotype is for this call. The value\nordering is defined by the GL tag in the VCF spec.\nIf Phred-scaled genotype likelihood scores (PL) are available and\nlog10(P) genotype likelihood scores (GL) are not, PL scores are converted\nto GL scores. If both are available, PL scores are stored in `info`.", - "type": "array", - "items": { - "type": "number", - "format": "double" - } + "id": { + "description": "The id used by the source of this data.", + "type": "string" } }, - "id": "VariantCall" + "id": "ExternalId", + "type": "object" }, "SearchVariantSetsRequest": { - "description": "The search variant sets request.", - "type": "object", "properties": { + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string" + }, "pageSize": { "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024.", - "type": "integer", - "format": "int32" + "format": "int32", + "type": "integer" }, "datasetIds": { "description": "Exactly one dataset ID must be provided here. Only variant sets which\nbelong to this dataset will be returned.", @@ -2256,401 +2008,492 @@ "items": { "type": "string" } - }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", - "type": "string" } }, - "id": "SearchVariantSetsRequest" + "id": "SearchVariantSetsRequest", + "description": "The search variant sets request.", + "type": "object" }, - "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", - "type": "object", + "VariantSetMetadata": { "properties": { - "error": { - "description": "The error result of the operation in case of failure or cancellation.", - "$ref": "Status" + "key": { + "description": "The top-level key.", + "type": "string" }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" + "description": { + "description": "A textual description of this metadata.", + "type": "string" }, - "metadata": { - "description": "An OperationMetadata object. This will always be returned with the Operation.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "type": { + "enum": [ + "TYPE_UNSPECIFIED", + "INTEGER", + "FLOAT", + "FLAG", + "CHARACTER", + "STRING" + ], + "description": "The type of data. Possible types include: Integer, Float,\nFlag, Character, and String.", + "type": "string", + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ] }, - "response": { - "description": "If importing ReadGroupSets, an ImportReadGroupSetsResponse is returned. If importing Variants, an ImportVariantsResponse is returned. For pipelines and exports, an empty response is returned.", + "info": { "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" + "type": "array", + "items": { + "type": "any" + } }, + "description": "Remaining structured metadata key-value pairs. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "type": "object" }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that originally returns it. For example: `operations/CJHU7Oi_ChDrveSpBRjfuL-qzoWAgEw`", + "number": { + "description": "The number of values that can be included in a field described by this\nmetadata.", + "type": "string" + }, + "id": { + "description": "User-provided ID field, not enforced by this API.\nTwo or more pieces of structured metadata with identical\nid and key fields are considered equivalent.", + "type": "string" + }, + "value": { + "description": "The value field for simple metadata", "type": "string" } }, - "id": "Operation" + "id": "VariantSetMetadata", + "description": "Metadata describes a single piece of variant call metadata.\nThese data include a top level key and either a single value string (value)\nor a list of key-value pairs (info.)\nValue and info are mutually exclusive.", + "type": "object" }, - "SearchAnnotationsRequest": { - "type": "object", + "Reference": { "properties": { - "referenceName": { - "description": "The name of the reference to query, within the reference set associated\nwith this query.", + "name": { + "description": "The name of this reference, for example `22`.", "type": "string" }, - "referenceId": { - "description": "The ID of the reference to query.", + "md5checksum": { + "description": "MD5 of the upper-case sequence excluding all whitespace characters (this\nis equivalent to SQ:M5 in SAM). This value is represented in lower case\nhexadecimal format.", "type": "string" }, - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 256. The maximum value is 2048.", - "type": "integer", - "format": "int32" + "id": { + "description": "The server-generated reference ID, unique across all references.", + "type": "string" }, - "annotationSetIds": { - "description": "Required. The annotation sets to search within. The caller must have\n`READ` access to these annotation sets.\nAll queried annotation sets must have the same type.", + "length": { + "description": "The length of this reference's sequence.", + "format": "int64", + "type": "string" + }, + "sourceAccessions": { + "description": "All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally\nwith a version number, for example `GCF_000001405.26`.", "type": "array", "items": { "type": "string" } }, - "start": { - "description": "The start position of the range on the reference, 0-based inclusive. If\nspecified,\nreferenceId or\nreferenceName\nmust be specified. Defaults to 0.", - "type": "string", - "format": "int64" + "ncbiTaxonId": { + "description": "ID from http://www.ncbi.nlm.nih.gov/taxonomy. For example, 9606 for human.", + "format": "int32", + "type": "integer" + }, + "sourceUri": { + "description": "The URI from which the sequence was obtained. Typically specifies a FASTA\nformat file.", + "type": "string" + } + }, + "id": "Reference", + "description": "A reference is a canonical assembled DNA sequence, intended to act as a\nreference coordinate space for other genomic annotations. A single reference\nmight represent the human chromosome 1 or mitochandrial DNA, for instance. A\nreference belongs to one or more reference sets.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "type": "object" + }, + "SearchReferenceSetsRequest": { + "properties": { + "accessions": { + "description": "If present, return reference sets for which a prefix of any of\nsourceAccessions\nmatch any of these strings. Accession numbers typically have a main number\nand a version, for example `NC_000001.11`.", + "type": "array", + "items": { + "type": "string" + } }, "pageToken": { "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", "type": "string" }, - "end": { - "description": "The end position of the range on the reference, 0-based exclusive. If\nreferenceId or\nreferenceName\nmust be specified, Defaults to the length of the reference.", - "type": "string", - "format": "int64" + "pageSize": { + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024. The maximum value is 4096.", + "format": "int32", + "type": "integer" + }, + "assemblyId": { + "description": "If present, return reference sets for which a substring of their\n`assemblyId` matches this string (case insensitive).", + "type": "string" + }, + "md5checksums": { + "description": "If present, return reference sets for which the\nmd5checksum matches exactly.", + "type": "array", + "items": { + "type": "string" + } } }, - "id": "SearchAnnotationsRequest" + "id": "SearchReferenceSetsRequest", + "type": "object" }, "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "type": "object", "properties": { "policy": { - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", - "$ref": "Policy" + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." } }, - "id": "SetIamPolicyRequest" + "id": "SetIamPolicyRequest", + "description": "Request message for `SetIamPolicy` method.", + "type": "object" }, - "ExportReadGroupSetRequest": { - "description": "The read group set export request.", - "type": "object", + "MergeVariantsRequest": { "properties": { - "referenceNames": { - "description": "The reference names to export. If this is not specified, all reference\nsequences, including unmapped reads, are exported.\nUse `*` to export only unmapped reads.", + "variants": { + "description": "The variants to be merged with existing variants.", "type": "array", "items": { - "type": "string" + "$ref": "Variant" } }, - "projectId": { - "description": "Required. The Google Cloud project ID that owns this\nexport. The caller must have WRITE access to this project.", - "type": "string" + "infoMergeConfig": { + "additionalProperties": { + "enum": [ + "INFO_MERGE_OPERATION_UNSPECIFIED", + "IGNORE_NEW", + "MOVE_TO_CALLS" + ], + "type": "string" + }, + "description": "A mapping between info field keys and the InfoMergeOperations to\nbe performed on them.", + "type": "object" }, - "exportUri": { - "description": "Required. A Google Cloud Storage URI for the exported BAM file.\nThe currently authenticated user must have write access to the new file.\nAn error will be returned if the URI already contains data.", + "variantSetId": { + "description": "The destination variant set.", "type": "string" } }, - "id": "ExportReadGroupSetRequest" + "id": "MergeVariantsRequest", + "type": "object" }, - "SearchVariantsResponse": { - "description": "The variant search response.", + "BatchCreateAnnotationsRequest": { "type": "object", "properties": { - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", - "type": "string" - }, - "variants": { - "description": "The list of matching Variants.", + "annotations": { + "description": "The annotations to be created. At most 4096 can be specified in a single\nrequest.", "type": "array", "items": { - "$ref": "Variant" + "$ref": "Annotation" } + }, + "requestId": { + "description": "A unique request ID which enables the server to detect duplicated requests.\nIf provided, duplicated requests will result in the same response; if not\nprovided, duplicated requests may result in duplicated data. For a given\nannotation set, callers should not reuse `request_id`s when writing\ndifferent batches of annotations - behavior in this case is undefined.\nA common approach is to use a UUID. For batch jobs where worker crashes are\na possibility, consider using some unique variant of a worker or run ID.", + "type": "string" } }, - "id": "SearchVariantsResponse" + "id": "BatchCreateAnnotationsRequest" }, - "ReadGroup": { - "description": "A read group is all the data that's processed the same way by the sequencer.", + "Read": { + "description": "A read alignment describes a linear alignment of a string of DNA to a\nreference sequence, in addition to metadata\nabout the fragment (the molecule of DNA sequenced) and the read (the bases\nwhich were read by the sequencer). A read is equivalent to a line in a SAM\nfile. A read belongs to exactly one read group and exactly one\nread group set.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\n### Reverse-stranded reads\n\nMapped reads (reads having a non-null `alignment`) can be aligned to either\nthe forward or the reverse strand of their associated reference. Strandedness\nof a mapped read is encoded by `alignment.position.reverseStrand`.\n\nIf we consider the reference to be a forward-stranded coordinate space of\n`[0, reference.length)` with `0` as the left-most position and\n`reference.length` as the right-most position, reads are always aligned left\nto right. That is, `alignment.position.position` always refers to the\nleft-most reference coordinate and `alignment.cigar` describes the alignment\nof this read to the reference from left to right. All per-base fields such as\n`alignedSequence` and `alignedQuality` share this same left-to-right\norientation; this is true of reads which are aligned to either strand. For\nreverse-stranded reads, this means that `alignedSequence` is the reverse\ncomplement of the bases that were originally reported by the sequencing\nmachine.\n\n### Generating a reference-aligned sequence string\n\nWhen interacting with mapped reads, it's often useful to produce a string\nrepresenting the local alignment of the read to reference. The following\npseudocode demonstrates one way of doing this:\n\n out = \"\"\n offset = 0\n for c in read.alignment.cigar {\n switch c.operation {\n case \"ALIGNMENT_MATCH\", \"SEQUENCE_MATCH\", \"SEQUENCE_MISMATCH\":\n out += read.alignedSequence[offset:offset+c.operationLength]\n offset += c.operationLength\n break\n case \"CLIP_SOFT\", \"INSERT\":\n offset += c.operationLength\n break\n case \"PAD\":\n out += repeat(\"*\", c.operationLength)\n break\n case \"DELETE\":\n out += repeat(\"-\", c.operationLength)\n break\n case \"SKIP\":\n out += repeat(\" \", c.operationLength)\n break\n case \"CLIP_HARD\":\n break\n }\n }\n return out\n\n### Converting to SAM's CIGAR string\n\nThe following pseudocode generates a SAM CIGAR string from the\n`cigar` field. Note that this is a lossy conversion\n(`cigar.referenceSequence` is lost).\n\n cigarMap = {\n \"ALIGNMENT_MATCH\": \"M\",\n \"INSERT\": \"I\",\n \"DELETE\": \"D\",\n \"SKIP\": \"N\",\n \"CLIP_SOFT\": \"S\",\n \"CLIP_HARD\": \"H\",\n \"PAD\": \"P\",\n \"SEQUENCE_MATCH\": \"=\",\n \"SEQUENCE_MISMATCH\": \"X\",\n }\n cigarStr = \"\"\n for c in read.alignment.cigar {\n cigarStr += c.operationLength + cigarMap[c.operation]\n }\n return cigarStr", "type": "object", "properties": { - "id": { - "description": "The server-generated read group ID, unique for all read groups.\nNote: This is different than the @RG ID field in the SAM spec. For that\nvalue, see name.", + "readGroupSetId": { + "description": "The ID of the read group set this read belongs to. A read belongs to\nexactly one read group set.", "type": "string" }, - "description": { - "description": "A free-form text description of this read group.", - "type": "string" + "duplicateFragment": { + "description": "The fragment is a PCR or optical duplicate (SAM flag 0x400).", + "type": "boolean" }, - "sampleId": { - "description": "A client-supplied sample identifier for the reads in this read group.", + "readNumber": { + "description": "The read number in sequencing. 0-based and less than numberReads. This\nfield replaces SAM flag 0x40 and 0x80.", + "format": "int32", + "type": "integer" + }, + "readGroupId": { + "description": "The ID of the read group this read belongs to. A read belongs to exactly\none read group. This is a server-generated ID which is distinct from SAM's\nRG tag (for that value, see\nReadGroup.name).", "type": "string" }, - "experiment": { - "description": "The experiment used to generate this read group.", - "$ref": "Experiment" + "alignedSequence": { + "description": "The bases of the read sequence contained in this alignment record,\n**without CIGAR operations applied** (equivalent to SEQ in SAM).\n`alignedSequence` and `alignedQuality` may be\nshorter than the full read sequence and quality. This will occur if the\nalignment is part of a chimeric alignment, or if the read was trimmed. When\nthis occurs, the CIGAR for this read will begin/end with a hard clip\noperator that will indicate the length of the excised sequence.", + "type": "string" }, - "predictedInsertSize": { - "description": "The predicted insert size of this read group. The insert size is the length\nthe sequenced DNA fragment from end-to-end, not including the adapters.", - "type": "integer", - "format": "int32" + "nextMatePosition": { + "$ref": "Position", + "description": "The mapping of the primary alignment of the\n`(readNumber+1)%numberReads` read in the fragment. It replaces\nmate position and mate strand in SAM." }, "info": { - "description": "A map of additional read group information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "additionalProperties": { "type": "array", "items": { "type": "any" } }, + "description": "A map of additional read alignment information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "type": "object" }, - "datasetId": { - "description": "The dataset to which this read group belongs.", - "type": "string" + "properPlacement": { + "description": "The orientation and the distance between reads from the fragment are\nconsistent with the sequencing protocol (SAM flag 0x2).", + "type": "boolean" }, - "name": { - "description": "The read group name. This corresponds to the @RG ID field in the SAM spec.", - "type": "string" + "supplementaryAlignment": { + "description": "Whether this alignment is supplementary. Equivalent to SAM flag 0x800.\nSupplementary alignments are used in the representation of a chimeric\nalignment. In a chimeric alignment, a read is split into multiple\nlinear alignments that map to different reference contigs. The first\nlinear alignment in the read will be designated as the representative\nalignment; the remaining linear alignments will be designated as\nsupplementary alignments. These alignments may have different mapping\nquality scores. In each linear alignment in a chimeric alignment, the read\nwill be hard clipped. The `alignedSequence` and\n`alignedQuality` fields in the alignment record will only\nrepresent the bases for its respective linear alignment.", + "type": "boolean" }, - "referenceSetId": { - "description": "The reference set the reads in this read group are aligned to.", - "type": "string" + "fragmentLength": { + "description": "The observed length of the fragment, equivalent to TLEN in SAM.", + "format": "int32", + "type": "integer" }, - "programs": { - "description": "The programs used to generate this read group. Programs are always\nidentical for all read groups within a read group set. For this reason,\nonly the first read group in a returned set will have this field\npopulated.", + "failedVendorQualityChecks": { + "description": "Whether this read did not pass filters, such as platform or vendor quality\ncontrols (SAM flag 0x200).", + "type": "boolean" + }, + "alignedQuality": { + "description": "The quality of the read sequence contained in this alignment record\n(equivalent to QUAL in SAM).\n`alignedSequence` and `alignedQuality` may be shorter than the full read\nsequence and quality. This will occur if the alignment is part of a\nchimeric alignment, or if the read was trimmed. When this occurs, the CIGAR\nfor this read will begin/end with a hard clip operator that will indicate\nthe length of the excised sequence.", "type": "array", "items": { - "$ref": "Program" + "format": "int32", + "type": "integer" } + }, + "alignment": { + "description": "The linear alignment for this alignment record. This field is null for\nunmapped reads.", + "$ref": "LinearAlignment" + }, + "numberReads": { + "description": "The number of reads in the fragment (extension to SAM flag 0x1).", + "format": "int32", + "type": "integer" + }, + "id": { + "description": "The server-generated read ID, unique across all reads. This is different\nfrom the `fragmentName`.", + "type": "string" + }, + "secondaryAlignment": { + "description": "Whether this alignment is secondary. Equivalent to SAM flag 0x100.\nA secondary alignment represents an alternative to the primary alignment\nfor this read. Aligners may return secondary alignments if a read can map\nambiguously to multiple coordinates in the genome. By convention, each read\nhas one and only one alignment where both `secondaryAlignment`\nand `supplementaryAlignment` are false.", + "type": "boolean" + }, + "fragmentName": { + "description": "The fragment name. Equivalent to QNAME (query template name) in SAM.", + "type": "string" } }, - "id": "ReadGroup" + "id": "Read" }, - "BatchCreateAnnotationsResponse": { - "type": "object", + "ReferenceSet": { "properties": { - "entries": { - "description": "The resulting per-annotation entries, ordered consistently with the\noriginal request.", + "id": { + "description": "The server-generated reference set ID, unique across all reference sets.", + "type": "string" + }, + "description": { + "description": "Free text description of this reference set.", + "type": "string" + }, + "sourceAccessions": { + "description": "All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally\nwith a version number, for example `NC_000001.11`.", "type": "array", "items": { - "$ref": "Entry" + "type": "string" } - } - }, - "id": "BatchCreateAnnotationsResponse" - }, - "SearchVariantSetsResponse": { - "description": "The search variant sets response.", - "type": "object", - "properties": { - "variantSets": { - "description": "The variant sets belonging to the requested dataset.", + }, + "ncbiTaxonId": { + "description": "ID from http://www.ncbi.nlm.nih.gov/taxonomy (for example, 9606 for human)\nindicating the species which this reference set is intended to model. Note\nthat contained references may specify a different `ncbiTaxonId`, as\nassemblies may contain reference sequences which do not belong to the\nmodeled species, for example EBV in a human reference genome.", + "format": "int32", + "type": "integer" + }, + "sourceUri": { + "description": "The URI from which the references were obtained.", + "type": "string" + }, + "referenceIds": { + "description": "The IDs of the reference objects that are part of this set.\n`Reference.md5checksum` must be unique within this set.", "type": "array", "items": { - "$ref": "VariantSet" + "type": "string" } }, - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "assemblyId": { + "description": "Public id of this reference set, such as `GRCh37`.", + "type": "string" + }, + "md5checksum": { + "description": "Order-independent MD5 checksum which identifies this reference set. The\nchecksum is computed by sorting all lower case hexidecimal string\n`reference.md5checksum` (for all reference in this set) in\nascending lexicographic order, concatenating, and taking the MD5 of that\nvalue. The resulting value is represented in lower case hexadecimal format.", "type": "string" } }, - "id": "SearchVariantSetsResponse" + "id": "ReferenceSet", + "description": "A reference set is a set of references which typically comprise a reference\nassembly for a species, such as `GRCh38` which is representative\nof the human genome. A reference set defines a common coordinate space for\ncomparing reference-aligned experimental data. A reference set contains 1 or\nmore references.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "type": "object" }, - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", - "type": "object", + "CigarUnit": { "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" + "operation": { + "enum": [ + "OPERATION_UNSPECIFIED", + "ALIGNMENT_MATCH", + "INSERT", + "DELETE", + "SKIP", + "CLIP_SOFT", + "CLIP_HARD", + "PAD", + "SEQUENCE_MATCH", + "SEQUENCE_MISMATCH" + ], + "type": "string", + "enumDescriptions": [ + "", + "An alignment match indicates that a sequence can be aligned to the\nreference without evidence of an INDEL. Unlike the\n`SEQUENCE_MATCH` and `SEQUENCE_MISMATCH` operators,\nthe `ALIGNMENT_MATCH` operator does not indicate whether the\nreference and read sequences are an exact match. This operator is\nequivalent to SAM's `M`.", + "The insert operator indicates that the read contains evidence of bases\nbeing inserted into the reference. This operator is equivalent to SAM's\n`I`.", + "The delete operator indicates that the read contains evidence of bases\nbeing deleted from the reference. This operator is equivalent to SAM's\n`D`.", + "The skip operator indicates that this read skips a long segment of the\nreference, but the bases have not been deleted. This operator is commonly\nused when working with RNA-seq data, where reads may skip long segments\nof the reference between exons. This operator is equivalent to SAM's\n`N`.", + "The soft clip operator indicates that bases at the start/end of a read\nhave not been considered during alignment. This may occur if the majority\nof a read maps, except for low quality bases at the start/end of a read.\nThis operator is equivalent to SAM's `S`. Bases that are soft\nclipped will still be stored in the read.", + "The hard clip operator indicates that bases at the start/end of a read\nhave been omitted from this alignment. This may occur if this linear\nalignment is part of a chimeric alignment, or if the read has been\ntrimmed (for example, during error correction or to trim poly-A tails for\nRNA-seq). This operator is equivalent to SAM's `H`.", + "The pad operator indicates that there is padding in an alignment. This\noperator is equivalent to SAM's `P`.", + "This operator indicates that this portion of the aligned sequence exactly\nmatches the reference. This operator is equivalent to SAM's `=`.", + "This operator indicates that this portion of the aligned sequence is an\nalignment match to the reference, but a sequence mismatch. This can\nindicate a SNP or a read error. This operator is equivalent to SAM's\n`X`." + ] }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", - "type": "array", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - } + "referenceSequence": { + "description": "`referenceSequence` is only used at mismatches\n(`SEQUENCE_MISMATCH`) and deletions (`DELETE`).\nFilling this field replaces SAM's MD tag. If the relevant information is\nnot available, this field is unset.", + "type": "string" }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "operationLength": { + "description": "The number of genomic bases that the operation runs for. Required.", + "format": "int64", "type": "string" } }, - "id": "Status" + "id": "CigarUnit", + "description": "A single CIGAR operation.", + "type": "object" }, - "SearchCallSetsRequest": { - "description": "The call set search request.", - "type": "object", + "AnnotationSet": { "properties": { - "variantSetIds": { - "description": "Restrict the query to call sets within the given variant sets. At least one\nID must be provided.", - "type": "array", - "items": { - "type": "string" - } + "sourceUri": { + "description": "The source URI describing the file from which this annotation set was\ngenerated, if any.", + "type": "string" }, - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024.", - "type": "integer", - "format": "int32" + "datasetId": { + "description": "The dataset to which this annotation set belongs.", + "type": "string" }, "name": { - "description": "Only return call sets for which a substring of the name matches this\nstring.", + "description": "The display name for this annotation set.", + "type": "string" + }, + "referenceSetId": { + "description": "The ID of the reference set that defines the coordinate space for this\nset's annotations.", "type": "string" }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "info": { + "additionalProperties": { + "type": "array", + "items": { + "type": "any" + } + }, + "description": "A map of additional read alignment information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", + "type": "object" + }, + "type": { + "enum": [ + "ANNOTATION_TYPE_UNSPECIFIED", + "GENERIC", + "VARIANT", + "GENE", + "TRANSCRIPT" + ], + "description": "The type of annotations contained within this set.", + "type": "string", + "enumDescriptions": [ + "", + "A `GENERIC` annotation type should be used when no other annotation\ntype will suffice. This represents an untyped annotation of the reference\ngenome.", + "A `VARIANT` annotation type.", + "A `GENE` annotation type represents the existence of a gene at the\nassociated reference coordinates. The start coordinate is typically the\ngene's transcription start site and the end is typically the end of the\ngene's last exon.", + "A `TRANSCRIPT` annotation type represents the assertion that a\nparticular region of the reference genome may be transcribed as RNA." + ] + }, + "id": { + "description": "The server-generated annotation set ID, unique across all annotation sets.", "type": "string" } }, - "id": "SearchCallSetsRequest" + "id": "AnnotationSet", + "description": "An annotation set is a logical grouping of annotations that share consistent\ntype information and provenance. Examples of annotation sets include 'all\ngenes from refseq', and 'all variant annotations from ClinVar'.", + "type": "object" }, - "BatchCreateAnnotationsRequest": { - "type": "object", + "Transcript": { "properties": { - "annotations": { - "description": "The annotations to be created. At most 4096 can be specified in a single\nrequest.", + "exons": { + "description": "The \u003ca href=\"http://en.wikipedia.org/wiki/Exon\"\u003eexons\u003c/a\u003e that compose\nthis transcript. This field should be unset for genomes where transcript\nsplicing does not occur, for example prokaryotes.\n\nIntrons are regions of the transcript that are not included in the\nspliced RNA product. Though not explicitly modeled here, intron ranges can\nbe deduced; all regions of this transcript that are not exons are introns.\n\nExonic sequences do not necessarily code for a translational product\n(amino acids). Only the regions of exons bounded by the\ncodingSequence correspond\nto coding DNA sequence.\n\nExons are ordered by start position and may not overlap.", "type": "array", "items": { - "$ref": "Annotation" + "$ref": "Exon" } }, - "requestId": { - "description": "A unique request ID which enables the server to detect duplicated requests.\nIf provided, duplicated requests will result in the same response; if not\nprovided, duplicated requests may result in duplicated data. For a given\nannotation set, callers should not reuse `request_id`s when writing\ndifferent batches of annotations - behavior in this case is undefined.\nA common approach is to use a UUID. For batch jobs where worker crashes are\na possibility, consider using some unique variant of a worker or run ID.", + "codingSequence": { + "description": "The range of the coding sequence for this transcript, if any. To determine\nthe exact ranges of coding sequence, intersect this range with those of the\nexons, if any. If there are any\nexons, the\ncodingSequence must start\nand end within them.\n\nNote that in some cases, the reference genome will not exactly match the\nobserved mRNA transcript e.g. due to variance in the source genome from\nreference. In these cases,\nexon.frame will not necessarily\nmatch the expected reference reading frame and coding exon reference bases\ncannot necessarily be concatenated to produce the original transcript mRNA.", + "$ref": "CodingSequence" + }, + "geneId": { + "description": "The annotation ID of the gene from which this transcript is transcribed.", "type": "string" } }, - "id": "BatchCreateAnnotationsRequest" + "id": "Transcript", + "description": "A transcript represents the assertion that a particular region of the\nreference genome may be transcribed as RNA.", + "type": "object" }, - "ListCoverageBucketsResponse": { - "type": "object", + "Experiment": { "properties": { - "coverageBuckets": { - "description": "The coverage buckets. The list of buckets is sparse; a bucket with 0\noverlapping reads is not returned. A bucket never crosses more than one\nreference sequence. Each bucket has width `bucketWidth`, unless\nits end is the end of the reference sequence.", - "type": "array", - "items": { - "$ref": "CoverageBucket" - } + "libraryId": { + "description": "A client-supplied library identifier; a library is a collection of DNA\nfragments which have been prepared for sequencing from a sample. This\nfield is important for quality control as error or bias can be introduced\nduring sample preparation.", + "type": "string" }, - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "instrumentModel": { + "description": "The instrument model used as part of this experiment. This maps to\nsequencing technology in the SAM spec.", "type": "string" }, - "bucketWidth": { - "description": "The length of each coverage bucket in base pairs. Note that buckets at the\nend of a reference sequence may be shorter. This value is omitted if the\nbucket width is infinity (the default behaviour, with no range or\n`targetBucketWidth`).", - "type": "string", - "format": "int64" + "sequencingCenter": { + "description": "The sequencing center used as part of this experiment.", + "type": "string" + }, + "platformUnit": { + "description": "The platform unit used as part of this experiment, for example\nflowcell-barcode.lane for Illumina or slide for SOLiD. Corresponds to the\n@RG PU field in the SAM spec.", + "type": "string" } }, - "id": "ListCoverageBucketsResponse" + "id": "Experiment", + "type": "object" }, "ListDatasetsResponse": { "description": "The dataset list response.", "type": "object", "properties": { - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", - "type": "string" - }, "datasets": { "description": "The list of matching Datasets.", "type": "array", "items": { "$ref": "Dataset" } - } - }, - "id": "ListDatasetsResponse" - }, - "ListBasesResponse": { - "type": "object", - "properties": { - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", - "type": "string" - }, - "offset": { - "description": "The offset position (0-based) of the given `sequence` from the\nstart of this `Reference`. This value will differ for each page\nin a paginated request.", - "type": "string", - "format": "int64" - }, - "sequence": { - "description": "A substring of the bases that make up this reference.", - "type": "string" - } - }, - "id": "ListBasesResponse" - }, - "SearchAnnotationsResponse": { - "type": "object", - "properties": { - "annotations": { - "description": "The matching annotations.", - "type": "array", - "items": { - "$ref": "Annotation" - } }, "nextPageToken": { "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", "type": "string" } }, - "id": "SearchAnnotationsResponse" - }, - "SearchReadGroupSetsRequest": { - "description": "The read group set search request.", - "type": "object", - "properties": { - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 256. The maximum value is 1024.", - "type": "integer", - "format": "int32" - }, - "datasetIds": { - "description": "Restricts this query to read group sets within the given datasets. At least\none ID must be provided.", - "type": "array", - "items": { - "type": "string" - } - }, - "name": { - "description": "Only return read group sets for which a substring of the name matches this\nstring.", - "type": "string" - }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", - "type": "string" - } - }, - "id": "SearchReadGroupSetsRequest" + "id": "ListDatasetsResponse" }, "TestIamPermissionsRequest": { "description": "Request message for `TestIamPermissions` method.", @@ -2666,147 +2509,181 @@ }, "id": "TestIamPermissionsRequest" }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", + "ExportReadGroupSetRequest": { + "description": "The read group set export request.", "type": "object", "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "exportUri": { + "description": "Required. A Google Cloud Storage URI for the exported BAM file.\nThe currently authenticated user must have write access to the new file.\nAn error will be returned if the URI already contains data.", + "type": "string" + }, + "referenceNames": { + "description": "The reference names to export. If this is not specified, all reference\nsequences, including unmapped reads, are exported.\nUse `*` to export only unmapped reads.", "type": "array", "items": { "type": "string" } + }, + "projectId": { + "description": "Required. The Google Cloud project ID that owns this\nexport. The caller must have WRITE access to this project.", + "type": "string" } }, - "id": "TestIamPermissionsResponse" + "id": "ExportReadGroupSetRequest" }, - "MergeVariantsRequest": { - "type": "object", + "Exon": { "properties": { - "variantSetId": { - "description": "The destination variant set.", + "start": { + "description": "The start position of the exon on this annotation's reference sequence,\n0-based inclusive. Note that this is relative to the reference start, and\n**not** the containing annotation start.", + "format": "int64", "type": "string" }, - "variants": { - "description": "The variants to be merged with existing variants.", - "type": "array", - "items": { - "$ref": "Variant" - } + "end": { + "description": "The end position of the exon on this annotation's reference sequence,\n0-based exclusive. Note that this is relative to the reference start, and\n*not* the containing annotation start.", + "format": "int64", + "type": "string" }, - "infoMergeConfig": { - "description": "A mapping between info field keys and the InfoMergeOperations to\nbe performed on them.", - "additionalProperties": { - "enum": [ - "INFO_MERGE_OPERATION_UNSPECIFIED", - "IGNORE_NEW", - "MOVE_TO_CALLS" - ], - "type": "string" - }, - "type": "object" + "frame": { + "description": "The frame of this exon. Contains a value of 0, 1, or 2, which indicates\nthe offset of the first coding base of the exon within the reading frame\nof the coding DNA sequence, if any. This field is dependent on the\nstrandedness of this annotation (see\nAnnotation.reverse_strand).\nFor forward stranded annotations, this offset is relative to the\nexon.start. For reverse\nstrand annotations, this offset is relative to the\nexon.end `- 1`.\n\nUnset if this exon does not intersect the coding sequence. Upon creation\nof a transcript, the frame must be populated for all or none of the\ncoding exons.", + "format": "int32", + "type": "integer" } }, - "id": "MergeVariantsRequest" + "id": "Exon", + "type": "object" }, - "ImportVariantsResponse": { - "description": "The variant data import response.", + "CallSet": { + "description": "A call set is a collection of variant calls, typically for one sample. It\nbelongs to a variant set.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", "type": "object", "properties": { - "callSetIds": { - "description": "IDs of the call sets created during the import.", + "variantSetIds": { + "description": "The IDs of the variant sets this call set belongs to. This field must\nhave exactly length one, as a call set belongs to a single variant set.\nThis field is repeated for compatibility with the\n[GA4GH 0.5.1\nAPI](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variants.avdl#L76).", "type": "array", "items": { "type": "string" } - } - }, - "id": "ImportVariantsResponse" - }, - "CoverageBucket": { - "description": "A bucket over which read coverage has been precomputed. A bucket corresponds\nto a specific range of the reference sequence.", - "type": "object", - "properties": { - "range": { - "description": "The genomic coordinate range spanned by this bucket.", - "$ref": "Range" }, - "meanCoverage": { - "description": "The average number of reads which are aligned to each individual\nreference base in this bucket.", - "type": "number", - "format": "float" - } - }, - "id": "CoverageBucket" - }, - "CallSet": { - "description": "A call set is a collection of variant calls, typically for one sample. It\nbelongs to a variant set.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "type": "object", - "properties": { "id": { "description": "The server-generated call set ID, unique across all call sets.", "type": "string" }, "created": { "description": "The date this call set was created in milliseconds from the epoch.", - "type": "string", - "format": "int64" + "format": "int64", + "type": "string" }, "sampleId": { "description": "The sample ID this call set corresponds to.", "type": "string" }, - "variantSetIds": { - "description": "The IDs of the variant sets this call set belongs to. This field must\nhave exactly length one, as a call set belongs to a single variant set.\nThis field is repeated for compatibility with the\n[GA4GH 0.5.1\nAPI](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variants.avdl#L76).", - "type": "array", - "items": { - "type": "string" - } + "name": { + "description": "The call set name.", + "type": "string" }, "info": { - "description": "A map of additional call set information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "additionalProperties": { "type": "array", "items": { "type": "any" } }, + "description": "A map of additional call set information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "type": "object" + } + }, + "id": "CallSet" + }, + "SearchAnnotationSetsResponse": { + "type": "object", + "properties": { + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "type": "string" }, - "name": { - "description": "The call set name.", + "annotationSets": { + "description": "The matching annotation sets.", + "type": "array", + "items": { + "$ref": "AnnotationSet" + } + } + }, + "id": "SearchAnnotationSetsResponse" + }, + "ImportVariantsRequest": { + "properties": { + "infoMergeConfig": { + "additionalProperties": { + "enum": [ + "INFO_MERGE_OPERATION_UNSPECIFIED", + "IGNORE_NEW", + "MOVE_TO_CALLS" + ], + "type": "string" + }, + "description": "A mapping between info field keys and the InfoMergeOperations to\nbe performed on them. This is plumbed down to the MergeVariantRequests\ngenerated by the resulting import job.", + "type": "object" + }, + "variantSetId": { + "description": "Required. The variant set to which variant data should be imported.", "type": "string" + }, + "sourceUris": { + "description": "A list of URIs referencing variant files in Google Cloud Storage. URIs can\ninclude wildcards [as described\nhere](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).\nNote that recursive wildcards ('**') are not supported.", + "type": "array", + "items": { + "type": "string" + } + }, + "normalizeReferenceNames": { + "description": "Convert reference names to the canonical representation.\nhg19 haploytypes (those reference names containing \"_hap\")\nare not modified in any way.\nAll other reference names are modified according to the following rules:\nThe reference name is capitalized.\nThe \"chr\" prefix is dropped for all autosomes and sex chromsomes.\nFor example \"chr17\" becomes \"17\" and \"chrX\" becomes \"X\".\nAll mitochondrial chromosomes (\"chrM\", \"chrMT\", etc) become \"MT\".", + "type": "boolean" + }, + "format": { + "enum": [ + "FORMAT_UNSPECIFIED", + "FORMAT_VCF", + "FORMAT_COMPLETE_GENOMICS" + ], + "description": "The format of the variant data being imported. If unspecified, defaults to\nto `VCF`.", + "type": "string", + "enumDescriptions": [ + "", + "VCF (Variant Call Format). The VCF files may be gzip compressed. gVCF is\nalso supported.", + "Complete Genomics masterVarBeta format. The masterVarBeta files may\nbe bzip2 compressed." + ] } }, - "id": "CallSet" + "id": "ImportVariantsRequest", + "description": "The variant data import request.", + "type": "object" }, - "VariantAnnotation": { - "type": "object", + "ListCoverageBucketsResponse": { "properties": { - "alternateBases": { - "description": "The alternate allele for this variant. If multiple alternate alleles\nexist at this location, create a separate variant for each one, as they\nmay represent distinct conditions.", + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", "type": "string" }, - "geneId": { - "description": "Google annotation ID of the gene affected by this variant. This should\nbe provided when the variant is created.", + "bucketWidth": { + "description": "The length of each coverage bucket in base pairs. Note that buckets at the\nend of a reference sequence may be shorter. This value is omitted if the\nbucket width is infinity (the default behaviour, with no range or\n`targetBucketWidth`).", + "format": "int64", "type": "string" }, - "transcriptIds": { - "description": "Google annotation IDs of the transcripts affected by this variant. These\nshould be provided when the variant is created.", - "type": "array", - "items": { - "type": "string" - } - }, - "conditions": { - "description": "The set of conditions associated with this variant.\nA condition describes the way a variant influences human health.", + "coverageBuckets": { + "description": "The coverage buckets. The list of buckets is sparse; a bucket with 0\noverlapping reads is not returned. A bucket never crosses more than one\nreference sequence. Each bucket has width `bucketWidth`, unless\nits end is the end of the reference sequence.", "type": "array", "items": { - "$ref": "ClinicalCondition" + "$ref": "CoverageBucket" } - }, + } + }, + "id": "ListCoverageBucketsResponse", + "type": "object" + }, + "VariantAnnotation": { + "type": "object", + "properties": { "effect": { - "description": "Effect of the variant on the coding sequence.", "enum": [ "EFFECT_UNSPECIFIED", "EFFECT_OTHER", @@ -2818,6 +2695,8 @@ "STOP_LOSS", "SPLICE_SITE_DISRUPTION" ], + "description": "Effect of the variant on the coding sequence.", + "type": "string", "enumDescriptions": [ "", "`EFFECT_OTHER` should be used when no other Effect\nwill suffice.", @@ -2828,11 +2707,48 @@ "`STOP_GAIN` indicates a mutation that leads to the creation\nof a stop codon at the variant site. Frameshift mutations creating\ndownstream stop codons do not count as `STOP_GAIN`.", "`STOP_LOSS` indicates a mutation that eliminates a\nstop codon at the variant site.", "`SPLICE_SITE_DISRUPTION` indicates that this variant is\nfound in a splice site for the associated transcript, and alters the\nnormal splicing pattern." + ] + }, + "transcriptIds": { + "description": "Google annotation IDs of the transcripts affected by this variant. These\nshould be provided when the variant is created.", + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "enumDescriptions": [ + "", + "`TYPE_OTHER` should be used when no other Type will suffice.\nFurther explanation of the variant type may be included in the\ninfo field.", + "`INSERTION` indicates an insertion.", + "`DELETION` indicates a deletion.", + "`SUBSTITUTION` indicates a block substitution of\ntwo or more nucleotides.", + "`SNP` indicates a single nucleotide polymorphism.", + "`STRUCTURAL` indicates a large structural variant,\nincluding chromosomal fusions, inversions, etc.", + "`CNV` indicates a variation in copy number." + ], + "enum": [ + "TYPE_UNSPECIFIED", + "TYPE_OTHER", + "INSERTION", + "DELETION", + "SUBSTITUTION", + "SNP", + "STRUCTURAL", + "CNV" ], + "description": "Type has been adapted from ClinVar's list of variant types.", + "type": "string" + }, + "alternateBases": { + "description": "The alternate allele for this variant. If multiple alternate alleles\nexist at this location, create a separate variant for each one, as they\nmay represent distinct conditions.", + "type": "string" + }, + "geneId": { + "description": "Google annotation ID of the gene affected by this variant. This should\nbe provided when the variant is created.", "type": "string" }, "clinicalSignificance": { - "description": "Describes the clinical significance of a variant.\nIt is adapted from the ClinVar controlled vocabulary for clinical\nsignificance described at:\nhttp://www.ncbi.nlm.nih.gov/clinvar/docs/clinsig/", "enum": [ "CLINICAL_SIGNIFICANCE_UNSPECIFIED", "CLINICAL_SIGNIFICANCE_OTHER", @@ -2849,6 +2765,8 @@ "PROTECTIVE", "MULTIPLE_REPORTED" ], + "description": "Describes the clinical significance of a variant.\nIt is adapted from the ClinVar controlled vocabulary for clinical\nsignificance described at:\nhttp://www.ncbi.nlm.nih.gov/clinvar/docs/clinsig/", + "type": "string", "enumDescriptions": [ "", "`OTHER` should be used when no other clinical significance\nvalue will suffice.", @@ -2864,399 +2782,535 @@ "", "", "`MULTIPLE_REPORTED` should be used when multiple clinical\nsignficances are reported for a variant. The original clinical\nsignificance values may be provided in the `info` field." - ], - "type": "string" + ] }, - "type": { - "description": "Type has been adapted from ClinVar's list of variant types.", - "enum": [ - "TYPE_UNSPECIFIED", - "TYPE_OTHER", - "INSERTION", - "DELETION", - "SUBSTITUTION", - "SNP", - "STRUCTURAL", - "CNV" - ], + "conditions": { + "description": "The set of conditions associated with this variant.\nA condition describes the way a variant influences human health.", + "type": "array", + "items": { + "$ref": "ClinicalCondition" + } + } + }, + "id": "VariantAnnotation" + }, + "ExportVariantSetRequest": { + "properties": { + "format": { "enumDescriptions": [ "", - "`TYPE_OTHER` should be used when no other Type will suffice.\nFurther explanation of the variant type may be included in the\ninfo field.", - "`INSERTION` indicates an insertion.", - "`DELETION` indicates a deletion.", - "`SUBSTITUTION` indicates a block substitution of\ntwo or more nucleotides.", - "`SNP` indicates a single nucleotide polymorphism.", - "`STRUCTURAL` indicates a large structural variant,\nincluding chromosomal fusions, inversions, etc.", - "`CNV` indicates a variation in copy number." + "Export the data to Google BigQuery." + ], + "enum": [ + "FORMAT_UNSPECIFIED", + "FORMAT_BIGQUERY" ], + "description": "The format for the exported data.", + "type": "string" + }, + "bigqueryDataset": { + "description": "Required. The BigQuery dataset to export data to. This dataset must already\nexist. Note that this is distinct from the Genomics concept of \"dataset\".", + "type": "string" + }, + "bigqueryTable": { + "description": "Required. The BigQuery table to export data to.\nIf the table doesn't exist, it will be created. If it already exists, it\nwill be overwritten.", + "type": "string" + }, + "callSetIds": { + "description": "If provided, only variant call information from the specified call sets\nwill be exported. By default all variant calls are exported.", + "type": "array", + "items": { + "type": "string" + } + }, + "projectId": { + "description": "Required. The Google Cloud project ID that owns the destination\nBigQuery dataset. The caller must have WRITE access to this project. This\nproject will also own the resulting export job.", "type": "string" } }, - "id": "VariantAnnotation" + "id": "ExportVariantSetRequest", + "description": "The variant data export request.", + "type": "object" }, - "ImportVariantsRequest": { - "description": "The variant data import request.", + "SearchAnnotationsRequest": { "type": "object", "properties": { - "variantSetId": { - "description": "Required. The variant set to which variant data should be imported.", + "referenceId": { + "description": "The ID of the reference to query.", + "type": "string" + }, + "end": { + "description": "The end position of the range on the reference, 0-based exclusive. If\nreferenceId or\nreferenceName\nmust be specified, Defaults to the length of the reference.", + "format": "int64", + "type": "string" + }, + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 256. The maximum value is 2048.", + "format": "int32", + "type": "integer" + }, + "start": { + "description": "The start position of the range on the reference, 0-based inclusive. If\nspecified,\nreferenceId or\nreferenceName\nmust be specified. Defaults to 0.", + "format": "int64", + "type": "string" + }, + "annotationSetIds": { + "description": "Required. The annotation sets to search within. The caller must have\n`READ` access to these annotation sets.\nAll queried annotation sets must have the same type.", + "type": "array", + "items": { + "type": "string" + } + }, + "referenceName": { + "description": "The name of the reference to query, within the reference set associated\nwith this query.", + "type": "string" + } + }, + "id": "SearchAnnotationsRequest" + }, + "OperationEvent": { + "properties": { + "startTime": { + "description": "Optional time of when event started.", + "format": "google-datetime", + "type": "string" + }, + "description": { + "description": "Required description of event.", + "type": "string" + }, + "endTime": { + "description": "Optional time of when event finished. An event can have a start time and no\nfinish time. If an event has a finish time, there must be a start time.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "OperationEvent", + "description": "An event that occurred during an Operation.", + "type": "object" + }, + "CodingSequence": { + "type": "object", + "properties": { + "start": { + "description": "The start of the coding sequence on this annotation's reference sequence,\n0-based inclusive. Note that this position is relative to the reference\nstart, and *not* the containing annotation start.", + "format": "int64", + "type": "string" + }, + "end": { + "description": "The end of the coding sequence on this annotation's reference sequence,\n0-based exclusive. Note that this position is relative to the reference\nstart, and *not* the containing annotation start.", + "format": "int64", + "type": "string" + } + }, + "id": "CodingSequence" + }, + "SearchReferencesResponse": { + "properties": { + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "type": "string" + }, + "references": { + "description": "The matching references.", + "type": "array", + "items": { + "$ref": "Reference" + } + } + }, + "id": "SearchReferencesResponse", + "type": "object" + }, + "TestIamPermissionsResponse": { + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse", + "description": "Response message for `TestIamPermissions` method.", + "type": "object" + }, + "GetIamPolicyRequest": { + "description": "Request message for `GetIamPolicy` method.", + "type": "object", + "properties": {}, + "id": "GetIamPolicyRequest" + }, + "SearchAnnotationSetsRequest": { + "type": "object", + "properties": { + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", "type": "string" }, - "sourceUris": { - "description": "A list of URIs referencing variant files in Google Cloud Storage. URIs can\ninclude wildcards [as described\nhere](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).\nNote that recursive wildcards ('**') are not supported.", + "pageSize": { + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 128. The maximum value is 1024.", + "format": "int32", + "type": "integer" + }, + "datasetIds": { + "description": "Required. The dataset IDs to search within. Caller must have `READ` access\nto these datasets.", "type": "array", "items": { "type": "string" } }, - "format": { - "description": "The format of the variant data being imported. If unspecified, defaults to\nto `VCF`.", - "enum": [ - "FORMAT_UNSPECIFIED", - "FORMAT_VCF", - "FORMAT_COMPLETE_GENOMICS" - ], + "types": { + "description": "If specified, only annotation sets that have any of these types are\nreturned.", + "type": "array", + "items": { + "enum": [ + "ANNOTATION_TYPE_UNSPECIFIED", + "GENERIC", + "VARIANT", + "GENE", + "TRANSCRIPT" + ], + "type": "string" + }, "enumDescriptions": [ "", - "VCF (Variant Call Format). The VCF files may be gzip compressed. gVCF is\nalso supported.", - "Complete Genomics masterVarBeta format. The masterVarBeta files may\nbe bzip2 compressed." - ], + "A `GENERIC` annotation type should be used when no other annotation\ntype will suffice. This represents an untyped annotation of the reference\ngenome.", + "A `VARIANT` annotation type.", + "A `GENE` annotation type represents the existence of a gene at the\nassociated reference coordinates. The start coordinate is typically the\ngene's transcription start site and the end is typically the end of the\ngene's last exon.", + "A `TRANSCRIPT` annotation type represents the assertion that a\nparticular region of the reference genome may be transcribed as RNA." + ] + }, + "name": { + "description": "Only return annotations sets for which a substring of the name matches this\nstring (case insensitive).", "type": "string" }, - "normalizeReferenceNames": { - "description": "Convert reference names to the canonical representation.\nhg19 haploytypes (those reference names containing \"_hap\")\nare not modified in any way.\nAll other reference names are modified according to the following rules:\nThe reference name is capitalized.\nThe \"chr\" prefix is dropped for all autosomes and sex chromsomes.\nFor example \"chr17\" becomes \"17\" and \"chrX\" becomes \"X\".\nAll mitochondrial chromosomes (\"chrM\", \"chrMT\", etc) become \"MT\".", - "type": "boolean" + "referenceSetId": { + "description": "If specified, only annotation sets associated with the given reference set\nare returned.", + "type": "string" + } + }, + "id": "SearchAnnotationSetsRequest" + }, + "SearchReadGroupSetsResponse": { + "properties": { + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "type": "string" }, - "infoMergeConfig": { - "description": "A mapping between info field keys and the InfoMergeOperations to\nbe performed on them. This is plumbed down to the MergeVariantRequests\ngenerated by the resulting import job.", - "additionalProperties": { - "enum": [ - "INFO_MERGE_OPERATION_UNSPECIFIED", - "IGNORE_NEW", - "MOVE_TO_CALLS" - ], - "type": "string" - }, - "type": "object" + "readGroupSets": { + "description": "The list of matching read group sets.", + "type": "array", + "items": { + "$ref": "ReadGroupSet" + } } }, - "id": "ImportVariantsRequest" + "id": "SearchReadGroupSetsResponse", + "description": "The read group set search response.", + "type": "object" }, - "SearchCallSetsResponse": { - "description": "The call set search response.", + "LinearAlignment": { + "description": "A linear alignment can be represented by one CIGAR string. Describes the\nmapped position and local alignment of the read to the reference.", "type": "object", "properties": { - "callSets": { - "description": "The list of matching call sets.", + "position": { + "$ref": "Position", + "description": "The position of this alignment." + }, + "cigar": { + "description": "Represents the local alignment of this sequence (alignment matches, indels,\netc) against the reference.", "type": "array", "items": { - "$ref": "CallSet" + "$ref": "CigarUnit" } }, - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", - "type": "string" + "mappingQuality": { + "description": "The mapping quality of this alignment. Represents how likely\nthe read maps to this position as opposed to other locations.\n\nSpecifically, this is -10 log10 Pr(mapping position is wrong), rounded to\nthe nearest integer.", + "format": "int32", + "type": "integer" } }, - "id": "SearchCallSetsResponse" + "id": "LinearAlignment" }, - "SearchReferenceSetsRequest": { + "SearchReferencesRequest": { "type": "object", "properties": { - "md5checksums": { - "description": "If present, return reference sets for which the\nmd5checksum matches exactly.", + "accessions": { + "description": "If present, return references for which a prefix of any of\nsourceAccessions match\nany of these strings. Accession numbers typically have a main number and a\nversion, for example `GCF_000001405.26`.", "type": "array", "items": { "type": "string" } }, + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string" + }, + "referenceSetId": { + "description": "If present, return only references which belong to this reference set.", + "type": "string" + }, "pageSize": { "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024. The maximum value is 4096.", - "type": "integer", - "format": "int32" + "format": "int32", + "type": "integer" }, - "accessions": { - "description": "If present, return reference sets for which a prefix of any of\nsourceAccessions\nmatch any of these strings. Accession numbers typically have a main number\nand a version, for example `NC_000001.11`.", + "md5checksums": { + "description": "If present, return references for which the\nmd5checksum matches exactly.", "type": "array", "items": { "type": "string" } - }, - "assemblyId": { - "description": "If present, return reference sets for which a substring of their\n`assemblyId` matches this string (case insensitive).", - "type": "string" - }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", - "type": "string" } }, - "id": "SearchReferenceSetsRequest" + "id": "SearchReferencesRequest" }, - "Variant": { - "description": "A variant represents a change in DNA sequence relative to a reference\nsequence. For example, a variant could represent a SNP or an insertion.\nVariants belong to a variant set.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)\n\nEach of the calls on a variant represent a determination of genotype with\nrespect to that variant. For example, a call might assign probability of 0.32\nto the occurrence of a SNP named rs1234 in a sample named NA12345. A call\nbelongs to a call set, which contains related calls typically from one\nsample.", + "Dataset": { + "description": "A Dataset is a collection of genomic data.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", "type": "object", "properties": { - "alternateBases": { - "description": "The bases that appear instead of the reference bases.", - "type": "array", - "items": { - "type": "string" - } + "name": { + "description": "The dataset name.", + "type": "string" + }, + "projectId": { + "description": "The Google Cloud project ID that this dataset belongs to.", + "type": "string" }, "id": { - "description": "The server-generated variant ID, unique across all variants.", + "description": "The server-generated dataset ID, unique across all datasets.", "type": "string" }, - "filter": { - "description": "A list of filters (normally quality filters) this variant has failed.\n`PASS` indicates this variant has passed all filters.", + "createTime": { + "description": "The time this dataset was created, in seconds from the epoch.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "Dataset" + }, + "ImportVariantsResponse": { + "properties": { + "callSetIds": { + "description": "IDs of the call sets created during the import.", "type": "array", "items": { "type": "string" } - }, - "referenceName": { - "description": "The reference on which this variant occurs.\n(such as `chr20` or `X`)", + } + }, + "id": "ImportVariantsResponse", + "description": "The variant data import response.", + "type": "object" + }, + "ReadGroup": { + "properties": { + "name": { + "description": "The read group name. This corresponds to the @RG ID field in the SAM spec.", "type": "string" }, - "created": { - "description": "The date this variant was created, in milliseconds from the epoch.", - "type": "string", - "format": "int64" + "referenceSetId": { + "description": "The reference set the reads in this read group are aligned to.", + "type": "string" }, "info": { - "description": "A map of additional variant information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "additionalProperties": { "type": "array", "items": { "type": "any" } }, + "description": "A map of additional read group information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "type": "object" }, - "calls": { - "description": "The variant calls for this particular variant. Each one represents the\ndetermination of genotype with respect to this variant.", - "type": "array", - "items": { - "$ref": "VariantCall" - } - }, - "variantSetId": { - "description": "The ID of the variant set this variant belongs to.", + "id": { + "description": "The server-generated read group ID, unique for all read groups.\nNote: This is different than the @RG ID field in the SAM spec. For that\nvalue, see name.", "type": "string" }, - "names": { - "description": "Names for the variant, for example a RefSNP ID.", + "predictedInsertSize": { + "description": "The predicted insert size of this read group. The insert size is the length\nthe sequenced DNA fragment from end-to-end, not including the adapters.", + "format": "int32", + "type": "integer" + }, + "programs": { + "description": "The programs used to generate this read group. Programs are always\nidentical for all read groups within a read group set. For this reason,\nonly the first read group in a returned set will have this field\npopulated.", "type": "array", "items": { - "type": "string" + "$ref": "Program" } }, - "quality": { - "description": "A measure of how likely this variant is to be real.\nA higher value is better.", - "type": "number", - "format": "double" + "description": { + "description": "A free-form text description of this read group.", + "type": "string" }, - "start": { - "description": "The position at which this variant occurs (0-based).\nThis corresponds to the first base of the string of reference bases.", - "type": "string", - "format": "int64" + "sampleId": { + "description": "A client-supplied sample identifier for the reads in this read group.", + "type": "string" }, - "referenceBases": { - "description": "The reference bases for this variant. They start at the given\nposition.", + "datasetId": { + "description": "The dataset to which this read group belongs.", "type": "string" }, - "end": { - "description": "The end position (0-based) of this variant. This corresponds to the first\nbase after the last base in the reference allele. So, the length of\nthe reference allele is (end - start). This is useful for variants\nthat don't explicitly give alternate bases, for example large deletions.", - "type": "string", - "format": "int64" + "experiment": { + "$ref": "Experiment", + "description": "The experiment used to generate this read group." } }, - "id": "Variant" + "id": "ReadGroup", + "description": "A read group is all the data that's processed the same way by the sequencer.", + "type": "object" }, - "Reference": { - "description": "A reference is a canonical assembled DNA sequence, intended to act as a\nreference coordinate space for other genomic annotations. A single reference\nmight represent the human chromosome 1 or mitochandrial DNA, for instance. A\nreference belongs to one or more reference sets.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "type": "object", + "ReadGroupSet": { "properties": { - "id": { - "description": "The server-generated reference ID, unique across all references.", - "type": "string" - }, - "ncbiTaxonId": { - "description": "ID from http://www.ncbi.nlm.nih.gov/taxonomy. For example, 9606 for human.", - "type": "integer", - "format": "int32" - }, - "sourceUri": { - "description": "The URI from which the sequence was obtained. Typically specifies a FASTA\nformat file.", - "type": "string" - }, - "sourceAccessions": { - "description": "All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally\nwith a version number, for example `GCF_000001405.26`.", + "readGroups": { + "description": "The read groups in this set. There are typically 1-10 read groups in a read\ngroup set.", "type": "array", "items": { - "type": "string" + "$ref": "ReadGroup" } }, - "length": { - "description": "The length of this reference's sequence.", - "type": "string", - "format": "int64" + "filename": { + "description": "The filename of the original source file for this read group set, if any.", + "type": "string" }, "name": { - "description": "The name of this reference, for example `22`.", + "description": "The read group set name. By default this will be initialized to the sample\nname of the sequenced data contained in this set.", + "type": "string" + }, + "referenceSetId": { + "description": "The reference set to which the reads in this read group set are aligned.", + "type": "string" + }, + "info": { + "additionalProperties": { + "type": "array", + "items": { + "type": "any" + } + }, + "description": "A map of additional read group set information.", + "type": "object" + }, + "id": { + "description": "The server-generated read group set ID, unique for all read group sets.", "type": "string" }, - "md5checksum": { - "description": "MD5 of the upper-case sequence excluding all whitespace characters (this\nis equivalent to SQ:M5 in SAM). This value is represented in lower case\nhexadecimal format.", + "datasetId": { + "description": "The dataset to which this read group set belongs.", "type": "string" } }, - "id": "Reference" + "id": "ReadGroupSet", + "description": "A read group set is a logical collection of read groups, which are\ncollections of reads produced by a sequencer. A read group set typically\nmodels reads corresponding to one sample, sequenced one way, and aligned one\nway.\n\n* A read group set belongs to one dataset.\n* A read group belongs to one read group set.\n* A read belongs to one read group.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", + "type": "object" }, - "SearchAnnotationSetsResponse": { + "SearchVariantSetsResponse": { + "description": "The search variant sets response.", "type": "object", "properties": { "nextPageToken": { "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", "type": "string" }, - "annotationSets": { - "description": "The matching annotation sets.", + "variantSets": { + "description": "The variant sets belonging to the requested dataset.", "type": "array", "items": { - "$ref": "AnnotationSet" + "$ref": "VariantSet" } } }, - "id": "SearchAnnotationSetsResponse" + "id": "SearchVariantSetsResponse" }, - "CigarUnit": { - "description": "A single CIGAR operation.", + "Empty": { + "properties": {}, + "id": "Empty", + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object" + }, + "Entry": { + "properties": { + "status": { + "$ref": "Status", + "description": "The creation status." + }, + "annotation": { + "description": "The created annotation, if creation was successful.", + "$ref": "Annotation" + } + }, + "id": "Entry", + "type": "object" + }, + "Position": { + "description": "An abstraction for referring to a genomic position, in relation to some\nalready known reference. For now, represents a genomic position as a\nreference name, a base number on that reference (0-based), and a\ndetermination of forward or reverse strand.", "type": "object", "properties": { - "operation": { - "enum": [ - "OPERATION_UNSPECIFIED", - "ALIGNMENT_MATCH", - "INSERT", - "DELETE", - "SKIP", - "CLIP_SOFT", - "CLIP_HARD", - "PAD", - "SEQUENCE_MATCH", - "SEQUENCE_MISMATCH" - ], - "enumDescriptions": [ - "", - "An alignment match indicates that a sequence can be aligned to the\nreference without evidence of an INDEL. Unlike the\n`SEQUENCE_MATCH` and `SEQUENCE_MISMATCH` operators,\nthe `ALIGNMENT_MATCH` operator does not indicate whether the\nreference and read sequences are an exact match. This operator is\nequivalent to SAM's `M`.", - "The insert operator indicates that the read contains evidence of bases\nbeing inserted into the reference. This operator is equivalent to SAM's\n`I`.", - "The delete operator indicates that the read contains evidence of bases\nbeing deleted from the reference. This operator is equivalent to SAM's\n`D`.", - "The skip operator indicates that this read skips a long segment of the\nreference, but the bases have not been deleted. This operator is commonly\nused when working with RNA-seq data, where reads may skip long segments\nof the reference between exons. This operator is equivalent to SAM's\n`N`.", - "The soft clip operator indicates that bases at the start/end of a read\nhave not been considered during alignment. This may occur if the majority\nof a read maps, except for low quality bases at the start/end of a read.\nThis operator is equivalent to SAM's `S`. Bases that are soft\nclipped will still be stored in the read.", - "The hard clip operator indicates that bases at the start/end of a read\nhave been omitted from this alignment. This may occur if this linear\nalignment is part of a chimeric alignment, or if the read has been\ntrimmed (for example, during error correction or to trim poly-A tails for\nRNA-seq). This operator is equivalent to SAM's `H`.", - "The pad operator indicates that there is padding in an alignment. This\noperator is equivalent to SAM's `P`.", - "This operator indicates that this portion of the aligned sequence exactly\nmatches the reference. This operator is equivalent to SAM's `=`.", - "This operator indicates that this portion of the aligned sequence is an\nalignment match to the reference, but a sequence mismatch. This can\nindicate a SNP or a read error. This operator is equivalent to SAM's\n`X`." - ], + "position": { + "description": "The 0-based offset from the start of the forward strand for that reference.", + "format": "int64", "type": "string" }, - "referenceSequence": { - "description": "`referenceSequence` is only used at mismatches\n(`SEQUENCE_MISMATCH`) and deletions (`DELETE`).\nFilling this field replaces SAM's MD tag. If the relevant information is\nnot available, this field is unset.", + "referenceName": { + "description": "The name of the reference in whatever reference set is being used.", "type": "string" }, - "operationLength": { - "description": "The number of genomic bases that the operation runs for. Required.", - "type": "string", - "format": "int64" + "reverseStrand": { + "description": "Whether this position is on the reverse strand, as opposed to the forward\nstrand.", + "type": "boolean" } }, - "id": "CigarUnit" + "id": "Position" }, - "OperationMetadata": { - "description": "Metadata describing an Operation.", + "SearchReferenceSetsResponse": { "type": "object", "properties": { - "clientId": { - "description": "This field is deprecated. Use `labels` instead. Optionally provided by the\ncaller when submitting the request that creates the operation.", - "type": "string" - }, - "request": { - "description": "The original request that started the operation. Note that this will be in\ncurrent version of the API. If the operation was started with v1beta2 API\nand a GetOperation is performed on v1 API, a v1 request will be returned.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "events": { - "description": "Optional event messages that were generated during the job's execution.\nThis also contains any warnings that were generated during import\nor export.", + "referenceSets": { + "description": "The matching references sets.", "type": "array", "items": { - "$ref": "OperationEvent" + "$ref": "ReferenceSet" } }, - "endTime": { - "description": "The time at which the job stopped running.", - "type": "string", - "format": "google-datetime" - }, - "labels": { - "description": "Optionally provided by the caller when submitting the request that creates\nthe operation.", - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - "createTime": { - "description": "The time at which the job was submitted to the Genomics service.", - "type": "string", - "format": "google-datetime" - }, - "runtimeMetadata": { - "description": "Runtime metadata on this Operation.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "startTime": { - "description": "The time at which the job began to run.", - "type": "string", - "format": "google-datetime" - }, - "projectId": { - "description": "The Google Cloud Project in which the job is scoped.", + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", "type": "string" } }, - "id": "OperationMetadata" + "id": "SearchReferenceSetsResponse" }, - "Dataset": { - "description": "A Dataset is a collection of genomic data.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "type": "object", + "SearchCallSetsRequest": { "properties": { - "createTime": { - "description": "The time this dataset was created, in seconds from the epoch.", - "type": "string", - "format": "google-datetime" + "variantSetIds": { + "description": "Restrict the query to call sets within the given variant sets. At least one\nID must be provided.", + "type": "array", + "items": { + "type": "string" + } }, - "id": { - "description": "The server-generated dataset ID, unique across all datasets.", + "name": { + "description": "Only return call sets for which a substring of the name matches this\nstring.", "type": "string" }, - "projectId": { - "description": "The Google Cloud project ID that this dataset belongs to.", + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", "type": "string" }, - "name": { - "description": "The dataset name.", - "type": "string" + "pageSize": { + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 1024.", + "format": "int32", + "type": "integer" } }, - "id": "Dataset" + "id": "SearchCallSetsRequest", + "description": "The call set search request.", + "type": "object" }, "ImportReadGroupSetsRequest": { "description": "The read group set import request.", @@ -3267,17 +3321,17 @@ "type": "string" }, "partitionStrategy": { - "description": "The partition strategy describes how read groups are partitioned into read\ngroup sets.", - "enum": [ - "PARTITION_STRATEGY_UNSPECIFIED", - "PER_FILE_PER_SAMPLE", - "MERGE_ALL" - ], "enumDescriptions": [ "", "In most cases, this strategy yields one read group set per file. This is\nthe default behavior.\n\nAllocate one read group set per file per sample. For BAM files, read\ngroups are considered to share a sample if they have identical sample\nnames. Furthermore, all reads for each file which do not belong to a read\ngroup, if any, will be grouped into a single read group set per-file.", "Includes all read groups in all imported files into a single read group\nset. Requires that the headers for all imported files are equivalent. All\nreads which do not belong to a read group, if any, will be grouped into a\nseparate read group set." ], + "enum": [ + "PARTITION_STRATEGY_UNSPECIFIED", + "PER_FILE_PER_SAMPLE", + "MERGE_ALL" + ], + "description": "The partition strategy describes how read groups are partitioned into read\ngroup sets.", "type": "string" }, "datasetId": { @@ -3294,162 +3348,85 @@ }, "id": "ImportReadGroupSetsRequest" }, - "Range": { - "description": "A 0-based half-open genomic coordinate range for search requests.", + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", "type": "object", "properties": { - "referenceName": { - "description": "The reference sequence name, for example `chr1`,\n`1`, or `chrX`.", + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", "type": "string" }, - "end": { - "description": "The end position of the range on the reference, 0-based exclusive.", - "type": "string", - "format": "int64" + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" }, - "start": { - "description": "The start position of the range on the reference, 0-based inclusive.", - "type": "string", - "format": "int64" - } - }, - "id": "Range" - }, - "Binding": { - "description": "Associates `members` with a `role`.", - "type": "object", - "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", "type": "array", "items": { - "type": "string" + "$ref": "Binding" } - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" - } - }, - "id": "Binding" - }, - "RuntimeMetadata": { - "description": "Runtime metadata that will be populated in the\nruntimeMetadata\nfield of the Operation associated with a RunPipeline execution.", - "type": "object", - "properties": { - "computeEngine": { - "description": "Execution information specific to Google Compute Engine.", - "$ref": "ComputeEngine" } }, - "id": "RuntimeMetadata" + "id": "Policy" }, - "SearchReferencesResponse": { - "type": "object", + "Annotation": { "properties": { - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "end": { + "description": "The end position of the range on the reference, 0-based exclusive.", + "format": "int64", "type": "string" }, - "references": { - "description": "The matching references.", - "type": "array", - "items": { - "$ref": "Reference" - } - } - }, - "id": "SearchReferencesResponse" - }, - "ReferenceSet": { - "description": "A reference set is a set of references which typically comprise a reference\nassembly for a species, such as `GRCh38` which is representative\nof the human genome. A reference set defines a common coordinate space for\ncomparing reference-aligned experimental data. A reference set contains 1 or\nmore references.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", - "type": "object", - "properties": { - "id": { - "description": "The server-generated reference set ID, unique across all reference sets.", - "type": "string" + "transcript": { + "$ref": "Transcript", + "description": "A transcript value represents the assertion that a particular region of\nthe reference genome may be transcribed as RNA. An alternative splicing\npattern would be represented as a separate transcript object. This field\nis only set for annotations of type `TRANSCRIPT`." }, - "description": { - "description": "Free text description of this reference set.", + "start": { + "description": "The start position of the range on the reference, 0-based inclusive.", + "format": "int64", "type": "string" }, - "ncbiTaxonId": { - "description": "ID from http://www.ncbi.nlm.nih.gov/taxonomy (for example, 9606 for human)\nindicating the species which this reference set is intended to model. Note\nthat contained references may specify a different `ncbiTaxonId`, as\nassemblies may contain reference sequences which do not belong to the\nmodeled species, for example EBV in a human reference genome.", - "type": "integer", - "format": "int32" - }, - "referenceIds": { - "description": "The IDs of the reference objects that are part of this set.\n`Reference.md5checksum` must be unique within this set.", - "type": "array", - "items": { - "type": "string" - } - }, - "sourceUri": { - "description": "The URI from which the references were obtained.", + "annotationSetId": { + "description": "The annotation set to which this annotation belongs.", + "type": "string" + }, + "name": { + "description": "The display name of this annotation.", "type": "string" }, - "sourceAccessions": { - "description": "All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally\nwith a version number, for example `NC_000001.11`.", - "type": "array", - "items": { - "type": "string" - } + "variant": { + "$ref": "VariantAnnotation", + "description": "A variant annotation, which describes the effect of a variant on the\ngenome, the coding sequence, and/or higher level consequences at the\norganism level e.g. pathogenicity. This field is only set for annotations\nof type `VARIANT`." }, - "assemblyId": { - "description": "Public id of this reference set, such as `GRCh37`.", + "referenceId": { + "description": "The ID of the Google Genomics reference associated with this range.", "type": "string" }, - "md5checksum": { - "description": "Order-independent MD5 checksum which identifies this reference set. The\nchecksum is computed by sorting all lower case hexidecimal string\n`reference.md5checksum` (for all reference in this set) in\nascending lexicographic order, concatenating, and taking the MD5 of that\nvalue. The resulting value is represented in lower case hexadecimal format.", - "type": "string" - } - }, - "id": "ReferenceSet" - }, - "AnnotationSet": { - "description": "An annotation set is a logical grouping of annotations that share consistent\ntype information and provenance. Examples of annotation sets include 'all\ngenes from refseq', and 'all variant annotations from ClinVar'.", - "type": "object", - "properties": { "id": { - "description": "The server-generated annotation set ID, unique across all annotation sets.", + "description": "The server-generated annotation ID, unique across all annotations.", + "type": "string" + }, + "reverseStrand": { + "description": "Whether this range refers to the reverse strand, as opposed to the forward\nstrand. Note that regardless of this field, the start/end position of the\nrange always refer to the forward strand.", + "type": "boolean" + }, + "referenceName": { + "description": "The display name corresponding to the reference specified by\n`referenceId`, for example `chr1`, `1`, or `chrX`.", "type": "string" }, "info": { - "description": "A map of additional read alignment information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "additionalProperties": { "type": "array", "items": { "type": "any" } }, + "description": "A map of additional read alignment information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", "type": "object" }, - "sourceUri": { - "description": "The source URI describing the file from which this annotation set was\ngenerated, if any.", - "type": "string" - }, - "datasetId": { - "description": "The dataset to which this annotation set belongs.", - "type": "string" - }, - "name": { - "description": "The display name for this annotation set.", - "type": "string" - }, - "referenceSetId": { - "description": "The ID of the reference set that defines the coordinate space for this\nset's annotations.", - "type": "string" - }, "type": { - "description": "The type of annotations contained within this set.", - "enum": [ - "ANNOTATION_TYPE_UNSPECIFIED", - "GENERIC", - "VARIANT", - "GENE", - "TRANSCRIPT" - ], "enumDescriptions": [ "", "A `GENERIC` annotation type should be used when no other annotation\ntype will suffice. This represents an untyped annotation of the reference\ngenome.", @@ -3457,104 +3434,183 @@ "A `GENE` annotation type represents the existence of a gene at the\nassociated reference coordinates. The start coordinate is typically the\ngene's transcription start site and the end is typically the end of the\ngene's last exon.", "A `TRANSCRIPT` annotation type represents the assertion that a\nparticular region of the reference genome may be transcribed as RNA." ], + "enum": [ + "ANNOTATION_TYPE_UNSPECIFIED", + "GENERIC", + "VARIANT", + "GENE", + "TRANSCRIPT" + ], + "description": "The data type for this annotation. Must match the containing annotation\nset's type.", "type": "string" } }, - "id": "AnnotationSet" + "id": "Annotation", + "description": "An annotation describes a region of reference genome. The value of an\nannotation may be one of several canonical types, supplemented by arbitrary\ninfo tags. An annotation is not inherently associated with a specific\nsample or individual (though a client could choose to use annotations in\nthis way). Example canonical annotation types are `GENE` and\n`VARIANT`.", + "type": "object" }, - "ImportReadGroupSetsResponse": { - "description": "The read group set import response.", + "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", "type": "object", + "properties": {}, + "id": "CancelOperationRequest" + }, + "SearchReadsRequest": { "properties": { + "pageToken": { + "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 256. The maximum value is 2048.", + "format": "int32", + "type": "integer" + }, + "start": { + "description": "The start position of the range on the reference, 0-based inclusive. If\nspecified, `referenceName` must also be specified.", + "format": "int64", + "type": "string" + }, + "referenceName": { + "description": "The reference sequence name, for example `chr1`, `1`, or `chrX`. If set to\n`*`, only unmapped reads are returned. If unspecified, all reads (mapped\nand unmapped) are returned.", + "type": "string" + }, "readGroupSetIds": { - "description": "IDs of the read group sets that were created.", + "description": "The IDs of the read groups sets within which to search for reads. All\nspecified read group sets must be aligned against a common set of reference\nsequences; this defines the genomic coordinates for the query. Must specify\none of `readGroupSetIds` or `readGroupIds`.", + "type": "array", + "items": { + "type": "string" + } + }, + "readGroupIds": { + "description": "The IDs of the read groups within which to search for reads. All specified\nread groups must belong to the same read group sets. Must specify one of\n`readGroupSetIds` or `readGroupIds`.", "type": "array", "items": { "type": "string" } + }, + "end": { + "description": "The end position of the range on the reference, 0-based exclusive. If\nspecified, `referenceName` must also be specified.", + "format": "int64", + "type": "string" } }, - "id": "ImportReadGroupSetsResponse" + "id": "SearchReadsRequest", + "description": "The read search request.", + "type": "object" }, - "ExternalId": { - "type": "object", + "Operation": { "properties": { - "sourceName": { - "description": "The name of the source of this data.", - "type": "string" + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "If importing ReadGroupSets, an ImportReadGroupSetsResponse is returned. If importing Variants, an ImportVariantsResponse is returned. For pipelines and exports, an empty response is returned.", + "type": "object" }, - "id": { - "description": "The id used by the source of this data.", + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. For example: `operations/CJHU7Oi_ChDrveSpBRjfuL-qzoWAgEw`", "type": "string" + }, + "error": { + "description": "The error result of the operation in case of failure or cancellation.", + "$ref": "Status" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "An OperationMetadata object. This will always be returned with the Operation.", + "type": "object" + }, + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" } }, - "id": "ExternalId" + "id": "Operation", + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "type": "object" }, - "SearchReadGroupSetsResponse": { - "description": "The read group set search response.", + "RuntimeMetadata": { + "description": "Runtime metadata that will be populated in the\nruntimeMetadata\nfield of the Operation associated with a RunPipeline execution.", "type": "object", "properties": { - "readGroupSets": { - "description": "The list of matching read group sets.", + "computeEngine": { + "description": "Execution information specific to Google Compute Engine.", + "$ref": "ComputeEngine" + } + }, + "id": "RuntimeMetadata" + }, + "ImportReadGroupSetsResponse": { + "properties": { + "readGroupSetIds": { + "description": "IDs of the read group sets that were created.", "type": "array", "items": { - "$ref": "ReadGroupSet" + "type": "string" } - }, - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", - "type": "string" } }, - "id": "SearchReadGroupSetsResponse" + "id": "ImportReadGroupSetsResponse", + "description": "The read group set import response.", + "type": "object" }, - "OperationEvent": { - "description": "An event that occurred during an Operation.", - "type": "object", + "VariantCall": { "properties": { - "description": { - "description": "Required description of event.", + "phaseset": { + "description": "If this field is present, this variant call's genotype ordering implies\nthe phase of the bases and is consistent with any other variant calls in\nthe same reference sequence which have the same phaseset value.\nWhen importing data from VCF, if the genotype data was phased but no\nphase set was specified this field will be set to `*`.", "type": "string" }, - "startTime": { - "description": "Optional time of when event started.", - "type": "string", - "format": "google-datetime" + "info": { + "additionalProperties": { + "type": "array", + "items": { + "type": "any" + } + }, + "description": "A map of additional variant call information. This must be of the form\nmap\u003cstring, string[]\u003e (string key mapping to a list of string values).", + "type": "object" }, - "endTime": { - "description": "Optional time of when event finished. An event can have a start time and no\nfinish time. If an event has a finish time, there must be a start time.", - "type": "string", - "format": "google-datetime" - } - }, - "id": "OperationEvent" - }, - "SearchReadsResponse": { - "description": "The read search response.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", + "callSetName": { + "description": "The name of the call set this variant call belongs to.", "type": "string" }, - "alignments": { - "description": "The list of matching alignments sorted by mapped genomic coordinate,\nif any, ascending in position within the same reference. Unmapped reads,\nwhich have no position, are returned contiguously and are sorted in\nascending lexicographic order by fragment name.", + "genotypeLikelihood": { + "description": "The genotype likelihoods for this variant call. Each array entry\nrepresents how likely a specific genotype is for this call. The value\nordering is defined by the GL tag in the VCF spec.\nIf Phred-scaled genotype likelihood scores (PL) are available and\nlog10(P) genotype likelihood scores (GL) are not, PL scores are converted\nto GL scores. If both are available, PL scores are stored in `info`.", "type": "array", "items": { - "$ref": "Read" + "format": "double", + "type": "number" + } + }, + "callSetId": { + "description": "The ID of the call set this variant call belongs to.", + "type": "string" + }, + "genotype": { + "description": "The genotype of this variant call. Each value represents either the value\nof the `referenceBases` field or a 1-based index into\n`alternateBases`. If a variant had a `referenceBases`\nvalue of `T` and an `alternateBases`\nvalue of `[\"A\", \"C\"]`, and the `genotype` was\n`[2, 1]`, that would mean the call\nrepresented the heterozygous value `CA` for this variant.\nIf the `genotype` was instead `[0, 1]`, the\nrepresented value would be `TA`. Ordering of the\ngenotype values is important if the `phaseset` is present.\nIf a genotype is not called (that is, a `.` is present in the\nGT string) -1 is returned.", + "type": "array", + "items": { + "format": "int32", + "type": "integer" } } }, - "id": "SearchReadsResponse" + "id": "VariantCall", + "description": "A call represents the determination of genotype with respect to a particular\nvariant. It may include associated information such as quality and phasing.\nFor example, a call might assign a probability of 0.32 to the occurrence of\na SNP named rs1234 in a call set with the name NA12345.", + "type": "object" }, - "SearchReferenceSetsResponse": { - "type": "object", + "SearchVariantsResponse": { "properties": { - "referenceSets": { - "description": "The matching references sets.", + "variants": { + "description": "The list of matching Variants.", "type": "array", "items": { - "$ref": "ReferenceSet" + "$ref": "Variant" } }, "nextPageToken": { @@ -3562,103 +3618,119 @@ "type": "string" } }, - "id": "SearchReferenceSetsResponse" + "id": "SearchVariantsResponse", + "description": "The variant search response.", + "type": "object" }, - "Program": { + "ListBasesResponse": { "type": "object", "properties": { - "prevProgramId": { - "description": "The ID of the program run before this one.", + "sequence": { + "description": "A substring of the bases that make up this reference.", "type": "string" }, - "commandLine": { - "description": "The command line used to run this program.", + "offset": { + "description": "The offset position (0-based) of the given `sequence` from the\nstart of this `Reference`. This value will differ for each page\nin a paginated request.", + "format": "int64", "type": "string" }, - "id": { - "description": "The user specified locally unique ID of the program. Used along with\n`prevProgramId` to define an ordering between programs.", + "nextPageToken": { + "description": "The continuation token, which is used to page through large result sets.\nProvide this value in a subsequent request to return the next page of\nresults. This field will be empty if there aren't any additional results.", "type": "string" + } + }, + "id": "ListBasesResponse" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } }, - "name": { - "description": "The display name of the program. This is typically the colloquial name of\nthe tool used, for example 'bwa' or 'picard'.", - "type": "string" + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" }, - "version": { - "description": "The version of the program run.", + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", "type": "string" } }, - "id": "Program" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" + "id": "Status" }, - "SearchAnnotationSetsRequest": { - "type": "object", + "Binding": { "properties": { - "datasetIds": { - "description": "Required. The dataset IDs to search within. Caller must have `READ` access\nto these datasets.", + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", "type": "array", "items": { "type": "string" } }, - "pageSize": { - "description": "The maximum number of results to return in a single page. If unspecified,\ndefaults to 128. The maximum value is 1024.", - "type": "integer", - "format": "int32" - }, - "name": { - "description": "Only return annotations sets for which a substring of the name matches this\nstring (case insensitive).", + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", "type": "string" - }, - "referenceSetId": { - "description": "If specified, only annotation sets associated with the given reference set\nare returned.", + } + }, + "id": "Binding", + "description": "Associates `members` with a `role`.", + "type": "object" + }, + "UndeleteDatasetRequest": { + "properties": {}, + "id": "UndeleteDatasetRequest", + "type": "object" + }, + "Range": { + "properties": { + "start": { + "description": "The start position of the range on the reference, 0-based inclusive.", + "format": "int64", "type": "string" }, - "types": { - "description": "If specified, only annotation sets that have any of these types are\nreturned.", - "enumDescriptions": [ - "", - "A `GENERIC` annotation type should be used when no other annotation\ntype will suffice. This represents an untyped annotation of the reference\ngenome.", - "A `VARIANT` annotation type.", - "A `GENE` annotation type represents the existence of a gene at the\nassociated reference coordinates. The start coordinate is typically the\ngene's transcription start site and the end is typically the end of the\ngene's last exon.", - "A `TRANSCRIPT` annotation type represents the assertion that a\nparticular region of the reference genome may be transcribed as RNA." - ], - "type": "array", - "items": { - "enum": [ - "ANNOTATION_TYPE_UNSPECIFIED", - "GENERIC", - "VARIANT", - "GENE", - "TRANSCRIPT" - ], - "type": "string" - } + "end": { + "description": "The end position of the range on the reference, 0-based exclusive.", + "format": "int64", + "type": "string" }, - "pageToken": { - "description": "The continuation token, which is used to page through large result sets.\nTo get the next page of results, set this parameter to the value of\n`nextPageToken` from the previous response.", + "referenceName": { + "description": "The reference sequence name, for example `chr1`,\n`1`, or `chrX`.", "type": "string" } }, - "id": "SearchAnnotationSetsRequest" + "id": "Range", + "description": "A 0-based half-open genomic coordinate range for search requests.", + "type": "object" }, "VariantSet": { "description": "A variant set is a collection of call sets and variants. It contains summary\nstatistics of those contents. A variant set belongs to a dataset.\n\nFor more genomics resource definitions, see [Fundamentals of Google\nGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)", "type": "object", "properties": { - "id": { - "description": "The server-generated variant set ID, unique across all variant sets.", + "name": { + "description": "User-specified, mutable name.", "type": "string" }, - "description": { - "description": "A textual description of this variant set.", + "referenceSetId": { + "description": "The reference set to which the variant set is mapped. The reference set\ndescribes the alignment provenance of the variant set, while the\n`referenceBounds` describe the shape of the actual variant data. The\nreference set's reference names are a superset of those found in the\n`referenceBounds`.\n\nFor example, given a variant set that is mapped to the GRCh38 reference set\nand contains a single variant on reference 'X', `referenceBounds` would\ncontain only an entry for 'X', while the associated reference set\nenumerates all possible references: '1', '2', 'X', 'Y', 'MT', etc.", "type": "string" }, + "metadata": { + "description": "The metadata associated with this variant set.", + "type": "array", + "items": { + "$ref": "VariantSetMetadata" + } + }, "referenceBounds": { "description": "A list of all references used by the variants in a variant set\nwith associated coordinate upper bounds for each one.", "type": "array", @@ -3666,152 +3738,80 @@ "$ref": "ReferenceBound" } }, - "datasetId": { - "description": "The dataset to which this variant set belongs.", + "id": { + "description": "The server-generated variant set ID, unique across all variant sets.", "type": "string" }, - "metadata": { - "description": "The metadata associated with this variant set.", - "type": "array", - "items": { - "$ref": "VariantSetMetadata" - } - }, - "name": { - "description": "User-specified, mutable name.", + "description": { + "description": "A textual description of this variant set.", "type": "string" }, - "referenceSetId": { - "description": "The reference set to which the variant set is mapped. The reference set\ndescribes the alignment provenance of the variant set, while the\n`referenceBounds` describe the shape of the actual variant data. The\nreference set's reference names are a superset of those found in the\n`referenceBounds`.\n\nFor example, given a variant set that is mapped to the GRCh38 reference set\nand contains a single variant on reference 'X', `referenceBounds` would\ncontain only an entry for 'X', while the associated reference set\nenumerates all possible references: '1', '2', 'X', 'Y', 'MT', etc.", + "datasetId": { + "description": "The dataset to which this variant set belongs.", "type": "string" } }, "id": "VariantSet" }, - "Position": { - "description": "An abstraction for referring to a genomic position, in relation to some\nalready known reference. For now, represents a genomic position as a\nreference name, a base number on that reference (0-based), and a\ndetermination of forward or reverse strand.", + "BatchCreateAnnotationsResponse": { + "properties": { + "entries": { + "description": "The resulting per-annotation entries, ordered consistently with the\noriginal request.", + "type": "array", + "items": { + "$ref": "Entry" + } + } + }, + "id": "BatchCreateAnnotationsResponse", + "type": "object" + }, + "ReferenceBound": { + "description": "ReferenceBound records an upper bound for the starting coordinate of\nvariants in a particular reference.", "type": "object", "properties": { - "referenceName": { - "description": "The name of the reference in whatever reference set is being used.", + "upperBound": { + "description": "An upper bound (inclusive) on the starting coordinate of any\nvariant in the reference sequence.", + "format": "int64", "type": "string" }, - "position": { - "description": "The 0-based offset from the start of the forward strand for that reference.", - "type": "string", - "format": "int64" - }, - "reverseStrand": { - "description": "Whether this position is on the reverse strand, as opposed to the forward\nstrand.", - "type": "boolean" + "referenceName": { + "description": "The name of the reference associated with this reference bound.", + "type": "string" } }, - "id": "Position" + "id": "ReferenceBound" } }, - "revision": "20170125", - "basePath": "", + "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "discoveryVersion": "v1", + "version": "v1", "baseUrl": "https://genomics.googleapis.com/", - "name": "genomics", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/bigquery": { + "description": "View and manage your data in Google BigQuery" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/genomics.readonly": { + "description": "View Genomics data" + }, + "https://www.googleapis.com/auth/genomics": { + "description": "View and manage Genomics data" + } + } } }, - "documentationLink": "https://cloud.google.com/genomics", - "ownerDomain": "google.com", - "batchPath": "batch", "servicePath": "", - "ownerName": "Google", - "version": "v1", - "rootUrl": "https://genomics.googleapis.com/", + "description": "Upload, process, query, and search Genomics data in the cloud.", "kind": "discovery#restDescription" } diff --git a/vendor/google.golang.org/api/genomics/v1/genomics-gen.go b/vendor/google.golang.org/api/genomics/v1/genomics-gen.go index 35780f16a..e85c53dce 100644 --- a/vendor/google.golang.org/api/genomics/v1/genomics-gen.go +++ b/vendor/google.golang.org/api/genomics/v1/genomics-gen.go @@ -83,9 +83,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Annotations *AnnotationsService @@ -117,6 +118,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAnnotationsService(s *Service) *AnnotationsService { rs := &AnnotationsService{s: s} return rs @@ -4477,6 +4482,7 @@ func (c *AnnotationsBatchCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchcreateannotationsrequest) if err != nil { @@ -4623,6 +4629,7 @@ func (c *AnnotationsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotation) if err != nil { @@ -4746,6 +4753,7 @@ func (c *AnnotationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/annotations/{annotationId}") @@ -4883,6 +4891,7 @@ func (c *AnnotationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5024,6 +5033,7 @@ func (c *AnnotationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchannotationsrequest) if err != nil { @@ -5183,6 +5193,7 @@ func (c *AnnotationsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotation) if err != nil { @@ -5333,6 +5344,7 @@ func (c *AnnotationsetsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotationset) if err != nil { @@ -5456,6 +5468,7 @@ func (c *AnnotationsetsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/annotationsets/{annotationSetId}") @@ -5593,6 +5606,7 @@ func (c *AnnotationsetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5730,6 +5744,7 @@ func (c *AnnotationsetsSearchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchannotationsetsrequest) if err != nil { @@ -5891,6 +5906,7 @@ func (c *AnnotationsetsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotationset) if err != nil { @@ -6037,6 +6053,7 @@ func (c *CallsetsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.callset) if err != nil { @@ -6165,6 +6182,7 @@ func (c *CallsetsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/callsets/{callSetId}") @@ -6308,6 +6326,7 @@ func (c *CallsetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6459,6 +6478,7 @@ func (c *CallsetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.callset) if err != nil { @@ -6610,6 +6630,7 @@ func (c *CallsetsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchcallsetsrequest) if err != nil { @@ -6760,6 +6781,7 @@ func (c *DatasetsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { @@ -6895,6 +6917,7 @@ func (c *DatasetsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/datasets/{datasetId}") @@ -7038,6 +7061,7 @@ func (c *DatasetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7182,6 +7206,7 @@ func (c *DatasetsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { @@ -7357,6 +7382,7 @@ func (c *DatasetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7534,6 +7560,7 @@ func (c *DatasetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { @@ -7688,6 +7715,7 @@ func (c *DatasetsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -7836,6 +7864,7 @@ func (c *DatasetsTestIamPermissionsCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -7983,6 +8012,7 @@ func (c *DatasetsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeletedatasetrequest) if err != nil { @@ -8122,6 +8152,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { @@ -8271,6 +8302,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8454,6 +8486,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8627,6 +8660,7 @@ func (c *ReadgroupsetsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/readgroupsets/{readGroupSetId}") @@ -8769,6 +8803,7 @@ func (c *ReadgroupsetsExportCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.exportreadgroupsetrequest) if err != nil { @@ -8921,6 +8956,7 @@ func (c *ReadgroupsetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9075,6 +9111,7 @@ func (c *ReadgroupsetsImportCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.importreadgroupsetsrequest) if err != nil { @@ -9222,6 +9259,7 @@ func (c *ReadgroupsetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.readgroupset) if err != nil { @@ -9373,6 +9411,7 @@ func (c *ReadgroupsetsSearchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchreadgroupsetsrequest) if err != nil { @@ -9610,6 +9649,7 @@ func (c *ReadgroupsetsCoveragebucketsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9833,6 +9873,7 @@ func (c *ReadsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchreadsrequest) if err != nil { @@ -9999,6 +10040,7 @@ func (c *ReferencesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10140,6 +10182,7 @@ func (c *ReferencesSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchreferencesrequest) if err != nil { @@ -10342,6 +10385,7 @@ func (c *ReferencesBasesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10538,6 +10582,7 @@ func (c *ReferencesetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10680,6 +10725,7 @@ func (c *ReferencesetsSearchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchreferencesetsrequest) if err != nil { @@ -10830,6 +10876,7 @@ func (c *VariantsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variant) if err != nil { @@ -10958,6 +11005,7 @@ func (c *VariantsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/variants/{variantId}") @@ -11101,6 +11149,7 @@ func (c *VariantsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11259,6 +11308,7 @@ func (c *VariantsImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.importvariantsrequest) if err != nil { @@ -11484,6 +11534,7 @@ func (c *VariantsMergeCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.mergevariantsrequest) if err != nil { @@ -11628,6 +11679,7 @@ func (c *VariantsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variant) if err != nil { @@ -11779,6 +11831,7 @@ func (c *VariantsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchvariantsrequest) if err != nil { @@ -11935,6 +11988,7 @@ func (c *VariantsetsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variantset) if err != nil { @@ -12065,6 +12119,7 @@ func (c *VariantsetsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/variantsets/{variantSetId}") @@ -12199,6 +12254,7 @@ func (c *VariantsetsExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.exportvariantsetrequest) if err != nil { @@ -12351,6 +12407,7 @@ func (c *VariantsetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12504,6 +12561,7 @@ func (c *VariantsetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variantset) if err != nil { @@ -12656,6 +12714,7 @@ func (c *VariantsetsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchvariantsetsrequest) if err != nil { diff --git a/vendor/google.golang.org/api/genomics/v1alpha2/genomics-api.json b/vendor/google.golang.org/api/genomics/v1alpha2/genomics-api.json index e151afc5f..5f62b1518 100644 --- a/vendor/google.golang.org/api/genomics/v1alpha2/genomics-api.json +++ b/vendor/google.golang.org/api/genomics/v1alpha2/genomics-api.json @@ -1,507 +1,152 @@ { - "id": "genomics:v1alpha2", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/genomics": { - "description": "View and manage Genomics data" - }, - "https://www.googleapis.com/auth/compute": { - "description": "View and manage your Google Compute Engine resources" + "schemas": { + "LoggingOptions": { + "id": "LoggingOptions", + "description": "The logging options for the pipeline run.", + "type": "object", + "properties": { + "gcsPath": { + "description": "The location in Google Cloud Storage to which the pipeline logs\nwill be copied. Can be specified as a fully qualified directory\npath, in which case logs will be output with a unique identifier\nas the filename in that directory, or as a fully specified path,\nwhich must end in `.log`, in which case that path will be\nused, and the user must ensure that logs are not\noverwritten. Stdout and stderr logs from the run are also\ngenerated and output as `-stdout.log` and `-stderr.log`.", + "type": "string" } } - } - }, - "description": "Upload, process, query, and search Genomics data in the cloud.", - "protocol": "rest", - "title": "Genomics API", - "resources": { - "pipelines": { - "methods": { - "getControllerConfig": { - "id": "genomics.pipelines.getControllerConfig", - "response": { - "$ref": "ControllerConfig" - }, - "parameterOrder": [], - "description": "Gets controller configuration information. Should only be called\nby VMs created by the Pipelines Service and not by end users.", - "flatPath": "v1alpha2/pipelines:getControllerConfig", - "httpMethod": "GET", - "parameters": { - "operationId": { - "description": "The operation to retrieve controller configuration for.", - "location": "query", - "type": "string" - }, - "validationToken": { - "location": "query", - "type": "string", - "format": "uint64" - } - }, - "path": "v1alpha2/pipelines:getControllerConfig", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + }, + "RunPipelineRequest": { + "description": "The request to run a pipeline. If `pipelineId` is specified, it\nrefers to a saved pipeline created with CreatePipeline and set as\nthe `pipelineId` of the returned Pipeline object. If\n`ephemeralPipeline` is specified, that pipeline is run once\nwith the given args and not saved. It is an error to specify both\n`pipelineId` and `ephemeralPipeline`. `pipelineArgs`\nmust be specified.", + "type": "object", + "properties": { + "pipelineArgs": { + "description": "The arguments to use when running this pipeline.", + "$ref": "RunPipelineArgs" }, - "run": { - "id": "genomics.pipelines.run", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [], - "description": "Runs a pipeline. If `pipelineId` is specified in the request, then\nrun a saved pipeline. If `ephemeralPipeline` is specified, then run\nthat pipeline once without saving a copy.\n\nThe caller must have READ permission to the project where the pipeline\nis stored and WRITE permission to the project where the pipeline will be\nrun, as VMs will be created and storage will be used.\n\nIf a pipeline operation is still running after 6 days, it will be canceled.", - "request": { - "$ref": "RunPipelineRequest" - }, - "flatPath": "v1alpha2/pipelines:run", - "httpMethod": "POST", - "parameters": {}, - "path": "v1alpha2/pipelines:run", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/genomics" - ] + "pipelineId": { + "description": "The already created pipeline to run.", + "type": "string" }, - "setOperationStatus": { - "id": "genomics.pipelines.setOperationStatus", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [], - "description": "Sets status of a given operation. Any new timestamps (as determined by\ndescription) are appended to TimestampEvents. Should only be called by VMs\ncreated by the Pipelines Service and not by end users.", - "request": { - "$ref": "SetOperationStatusRequest" - }, - "flatPath": "v1alpha2/pipelines:setOperationStatus", - "httpMethod": "PUT", - "parameters": {}, - "path": "v1alpha2/pipelines:setOperationStatus", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "ephemeralPipeline": { + "$ref": "Pipeline", + "description": "A new pipeline object to run once and then delete." + } + }, + "id": "RunPipelineRequest" + }, + "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", + "type": "object", + "properties": {}, + "id": "CancelOperationRequest" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "type": "object", + "properties": { + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." }, - "list": { - "id": "genomics.pipelines.list", - "response": { - "$ref": "ListPipelinesResponse" - }, - "parameterOrder": [], - "description": "Lists pipelines.\n\nCaller must have READ permission to the project.", - "flatPath": "v1alpha2/pipelines", - "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "Number of pipelines to return at once. Defaults to 256, and max\nis 2048.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "projectId": { - "description": "Required. The name of the project to search for pipelines. Caller\nmust have READ access to this project.", - "location": "query", - "type": "string" - }, - "namePrefix": { - "description": "Pipelines with names that match this prefix should be\nreturned. If unspecified, all pipelines in the project, up to\n`pageSize`, will be returned.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Token to use to indicate where to start getting results.\nIf unspecified, returns the first page of results.", - "location": "query", - "type": "string" - } + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" }, - "path": "v1alpha2/pipelines", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "description": "An OperationMetadata object. This will always be returned with the Operation.", + "type": "object" }, - "get": { - "id": "genomics.pipelines.get", - "response": { - "$ref": "Pipeline" - }, - "parameterOrder": [ - "pipelineId" - ], - "description": "Retrieves a pipeline based on ID.\n\nCaller must have READ permission to the project.", - "flatPath": "v1alpha2/pipelines/{pipelineId}", - "httpMethod": "GET", - "parameters": { - "pipelineId": { - "description": "Caller must have READ access to the project in which this pipeline\nis defined.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1alpha2/pipelines/{pipelineId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "done": { + "type": "boolean", + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable." }, - "create": { - "id": "genomics.pipelines.create", - "response": { - "$ref": "Pipeline" - }, - "parameterOrder": [], - "description": "Creates a pipeline that can be run later. Create takes a Pipeline that\nhas all fields other than `pipelineId` populated, and then returns\nthe same pipeline with `pipelineId` populated. This id can be used\nto run the pipeline.\n\nCaller must have WRITE permission to the project.", - "request": { - "$ref": "Pipeline" + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" }, - "flatPath": "v1alpha2/pipelines", - "httpMethod": "POST", - "parameters": {}, - "path": "v1alpha2/pipelines", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "description": "If importing ReadGroupSets, an ImportReadGroupSetsResponse is returned. If importing Variants, an ImportVariantsResponse is returned. For pipelines and exports, an empty response is returned.", + "type": "object" }, - "delete": { - "id": "genomics.pipelines.delete", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "pipelineId" - ], - "description": "Deletes a pipeline based on ID.\n\nCaller must have WRITE permission to the project.", - "flatPath": "v1alpha2/pipelines/{pipelineId}", - "httpMethod": "DELETE", - "parameters": { - "pipelineId": { - "description": "Caller must have WRITE access to the project in which this pipeline\nis defined.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1alpha2/pipelines/{pipelineId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. For example: `operations/CJHU7Oi_ChDrveSpBRjfuL-qzoWAgEw`", + "type": "string" } - } + }, + "id": "Operation" }, - "operations": { - "methods": { - "get": { - "id": "genomics.operations.get", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "name" - ], - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "flatPath": "v1alpha2/operations/{operationsId}", - "httpMethod": "GET", - "parameters": { - "name": { - "description": "The name of the operation resource.", - "required": true, - "pattern": "^operations/.+$", - "location": "path", - "type": "string" - } - }, - "path": "v1alpha2/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "list": { - "id": "genomics.operations.list", - "response": { - "$ref": "ListOperationsResponse" - }, - "parameterOrder": [ - "name" - ], - "description": "Lists operations that match the specified filter in the request.", - "flatPath": "v1alpha2/operations", - "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "The maximum number of results to return. If unspecified, defaults to\n256. The maximum value is 2048.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "filter": { - "description": "A string for filtering Operations.\nThe following filter fields are supported:\n\n* projectId: Required. Corresponds to\n OperationMetadata.projectId.\n* createTime: The time this job was created, in seconds from the\n [epoch](http://en.wikipedia.org/wiki/Unix_time). Can use `\u003e=` and/or `\u003c=`\n operators.\n* status: Can be `RUNNING`, `SUCCESS`, `FAILURE`, or `CANCELED`. Only\n one status may be specified.\n* labels.key where key is a label key.\n\nExamples:\n\n* `projectId = my-project AND createTime \u003e= 1432140000`\n* `projectId = my-project AND createTime \u003e= 1432140000 AND createTime \u003c= 1432150000 AND status = RUNNING`\n* `projectId = my-project AND labels.color = *`\n* `projectId = my-project AND labels.color = red`", - "location": "query", - "type": "string" - }, - "name": { - "description": "The name of the operation collection.", - "required": true, - "pattern": "^operations$", - "location": "path", - "type": "string" - }, - "pageToken": { - "description": "The standard list page token.", - "location": "query", - "type": "string" - } - }, - "path": "v1alpha2/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] - }, - "cancel": { - "id": "genomics.operations.cancel", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients may use Operations.GetOperation or Operations.ListOperations to check whether the cancellation succeeded or the operation completed despite cancellation.", - "request": { - "$ref": "CancelOperationRequest" - }, - "flatPath": "v1alpha2/operations/{operationsId}:cancel", - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The name of the operation resource to be cancelled.", - "required": true, - "pattern": "^operations/.+$", - "location": "path", - "type": "string" - } - }, - "path": "v1alpha2/{+name}:cancel", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/genomics" - ] + "RuntimeMetadata": { + "description": "Runtime metadata that will be populated in the\nruntimeMetadata\nfield of the Operation associated with a RunPipeline execution.", + "type": "object", + "properties": { + "computeEngine": { + "$ref": "ComputeEngine", + "description": "Execution information specific to Google Compute Engine." } - } - } - }, - "schemas": { - "SetOperationStatusRequest": { - "description": "Request to set operation status. Should only be used by VMs\ncreated by the Pipelines Service and not by end users.", + }, + "id": "RuntimeMetadata" + }, + "ImportReadGroupSetsResponse": { + "description": "The read group set import response.", "type": "object", "properties": { - "operationId": { - "type": "string" - }, - "validationToken": { - "type": "string", - "format": "uint64" - }, - "timestampEvents": { + "readGroupSetIds": { + "description": "IDs of the read group sets that were created.", "type": "array", "items": { - "$ref": "TimestampEvent" + "type": "string" } - }, - "errorCode": { - "enum": [ - "OK", - "CANCELLED", - "UNKNOWN", - "INVALID_ARGUMENT", - "DEADLINE_EXCEEDED", - "NOT_FOUND", - "ALREADY_EXISTS", - "PERMISSION_DENIED", - "UNAUTHENTICATED", - "RESOURCE_EXHAUSTED", - "FAILED_PRECONDITION", - "ABORTED", - "OUT_OF_RANGE", - "UNIMPLEMENTED", - "INTERNAL", - "UNAVAILABLE", - "DATA_LOSS" - ], - "enumDescriptions": [ - "Not an error; returned on success\n\nHTTP Mapping: 200 OK", - "The operation was cancelled, typically by the caller.\n\nHTTP Mapping: 499 Client Closed Request", - "Unknown error. For example, this error may be returned when\na `Status` value received from another address space belongs to\nan error space that is not known in this address space. Also\nerrors raised by APIs that do not return enough error information\nmay be converted to this error.\n\nHTTP Mapping: 500 Internal Server Error", - "The client specified an invalid argument. Note that this differs\nfrom `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments\nthat are problematic regardless of the state of the system\n(e.g., a malformed file name).\n\nHTTP Mapping: 400 Bad Request", - "The deadline expired before the operation could complete. For operations\nthat change the state of the system, this error may be returned\neven if the operation has completed successfully. For example, a\nsuccessful response from a server could have been delayed long\nenough for the deadline to expire.\n\nHTTP Mapping: 504 Gateway Timeout", - "Some requested entity (e.g., file or directory) was not found.\n\nNote to server developers: if a request is denied for an entire class\nof users, such as gradual feature rollout or undocumented whitelist,\n`NOT_FOUND` may be used. If a request is denied for some users within\na class of users, such as user-based access control, `PERMISSION_DENIED`\nmust be used.\n\nHTTP Mapping: 404 Not Found", - "The entity that a client attempted to create (e.g., file or directory)\nalready exists.\n\nHTTP Mapping: 409 Conflict", - "The caller does not have permission to execute the specified\noperation. `PERMISSION_DENIED` must not be used for rejections\ncaused by exhausting some resource (use `RESOURCE_EXHAUSTED`\ninstead for those errors). `PERMISSION_DENIED` must not be\nused if the caller can not be identified (use `UNAUTHENTICATED`\ninstead for those errors). This error code does not imply the\nrequest is valid or the requested entity exists or satisfies\nother pre-conditions.\n\nHTTP Mapping: 403 Forbidden", - "The request does not have valid authentication credentials for the\noperation.\n\nHTTP Mapping: 401 Unauthorized", - "Some resource has been exhausted, perhaps a per-user quota, or\nperhaps the entire file system is out of space.\n\nHTTP Mapping: 429 Too Many Requests", - "The operation was rejected because the system is not in a state\nrequired for the operation's execution. For example, the directory\nto be deleted is non-empty, an rmdir operation is applied to\na non-directory, etc.\n\nService implementors can use the following guidelines to decide\nbetween `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:\n (a) Use `UNAVAILABLE` if the client can retry just the failing call.\n (b) Use `ABORTED` if the client should retry at a higher level\n (e.g., restarting a read-modify-write sequence).\n (c) Use `FAILED_PRECONDITION` if the client should not retry until\n the system state has been explicitly fixed. E.g., if an \"rmdir\"\n fails because the directory is non-empty, `FAILED_PRECONDITION`\n should be returned since the client should not retry unless\n the files are deleted from the directory.\n\nHTTP Mapping: 400 Bad Request", - "The operation was aborted, typically due to a concurrency issue such as\na sequencer check failure or transaction abort.\n\nSee the guidelines above for deciding between `FAILED_PRECONDITION`,\n`ABORTED`, and `UNAVAILABLE`.\n\nHTTP Mapping: 409 Conflict", - "The operation was attempted past the valid range. E.g., seeking or\nreading past end-of-file.\n\nUnlike `INVALID_ARGUMENT`, this error indicates a problem that may\nbe fixed if the system state changes. For example, a 32-bit file\nsystem will generate `INVALID_ARGUMENT` if asked to read at an\noffset that is not in the range [0,2^32-1], but it will generate\n`OUT_OF_RANGE` if asked to read from an offset past the current\nfile size.\n\nThere is a fair bit of overlap between `FAILED_PRECONDITION` and\n`OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific\nerror) when it applies so that callers who are iterating through\na space can easily look for an `OUT_OF_RANGE` error to detect when\nthey are done.\n\nHTTP Mapping: 400 Bad Request", - "The operation is not implemented or is not supported/enabled in this\nservice.\n\nHTTP Mapping: 501 Not Implemented", - "Internal errors. This means that some invariants expected by the\nunderlying system have been broken. This error code is reserved\nfor serious errors.\n\nHTTP Mapping: 500 Internal Server Error", - "The service is currently unavailable. This is most likely a\ntransient condition, which can be corrected by retrying with\na backoff.\n\nSee the guidelines above for deciding between `FAILED_PRECONDITION`,\n`ABORTED`, and `UNAVAILABLE`.\n\nHTTP Mapping: 503 Service Unavailable", - "Unrecoverable data loss or corruption.\n\nHTTP Mapping: 500 Internal Server Error" - ], - "type": "string" - }, - "errorMessage": { - "type": "string" } }, - "id": "SetOperationStatusRequest" + "id": "ImportReadGroupSetsResponse" }, "Status": { "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", "type": "object", "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" }, "details": { "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", "type": "array", "items": { + "type": "object", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" - }, - "type": "object" + } } }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" } }, "id": "Status" }, - "OperationEvent": { - "description": "An event that occurred during an Operation.", - "type": "object", - "properties": { - "description": { - "description": "Required description of event.", - "type": "string" - }, - "startTime": { - "description": "Optional time of when event started.", - "type": "string", - "format": "google-datetime" - }, - "endTime": { - "description": "Optional time of when event finished. An event can have a start time and no\nfinish time. If an event has a finish time, there must be a start time.", - "type": "string", - "format": "google-datetime" - } - }, - "id": "OperationEvent" - }, - "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", - "type": "object", - "properties": { - "error": { - "description": "The error result of the operation in case of failure or cancellation.", - "$ref": "Status" - }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" - }, - "metadata": { - "description": "An OperationMetadata object. This will always be returned with the Operation.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "response": { - "description": "If importing ReadGroupSets, an ImportReadGroupSetsResponse is returned. If importing Variants, an ImportVariantsResponse is returned. For pipelines and exports, an empty response is returned.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that originally returns it. For example: `operations/CJHU7Oi_ChDrveSpBRjfuL-qzoWAgEw`", - "type": "string" - } - }, - "id": "Operation" - }, - "DockerExecutor": { - "description": "The Docker execuctor specification.", - "type": "object", - "properties": { - "imageName": { - "description": "Required. Image name from either Docker Hub or Google Container Registry.\nUsers that run pipelines must have READ access to the image.", - "type": "string" - }, - "cmd": { - "description": "Required. The command or newline delimited script to run. The command\nstring will be executed within a bash shell.\n\nIf the command exits with a non-zero exit code, output parameter\nde-localization will be skipped and the pipeline operation's\n`error` field will be populated.\n\nMaximum command string length is 16384.", - "type": "string" - } - }, - "id": "DockerExecutor" - }, - "ComputeEngine": { - "description": "Describes a Compute Engine resource that is being managed by a running\npipeline.", + "ServiceAccount": { + "description": "A Google Cloud Service Account.", "type": "object", "properties": { - "instanceName": { - "description": "The instance on which the operation is running.", - "type": "string" - }, - "machineType": { - "description": "The machine type of the instance.", - "type": "string" - }, - "zone": { - "description": "The availability zone in which the instance resides.", + "email": { + "description": "Email address of the service account. Defaults to `default`,\nwhich uses the compute service account associated with the project.", "type": "string" }, - "diskNames": { - "description": "The names of the disks that were created for this pipeline.", + "scopes": { + "description": "List of scopes to be enabled for this service account on the VM.\nThe following scopes are automatically included:\n\n* https://www.googleapis.com/auth/compute\n* https://www.googleapis.com/auth/devstorage.full_control\n* https://www.googleapis.com/auth/genomics\n* https://www.googleapis.com/auth/logging.write\n* https://www.googleapis.com/auth/monitoring.write", "type": "array", "items": { "type": "string" } } }, - "id": "ComputeEngine" + "id": "ServiceAccount" }, "PipelineResources": { - "description": "The system resources for the pipeline run.", - "type": "object", "properties": { - "minimumCpuCores": { - "description": "The minimum number of cores to use. Defaults to 1.", - "type": "integer", - "format": "int32" - }, - "disks": { - "description": "Disks to attach.", - "type": "array", - "items": { - "$ref": "Disk" - } + "bootDiskSizeGb": { + "description": "The size of the boot disk. Defaults to 10 (GB).", + "format": "int32", + "type": "integer" }, "preemptible": { "description": "Whether to use preemptible VMs. Defaults to `false`. In order to use this,\nmust be true for both create time and run time. Cannot be true at run time\nif false at create time.", @@ -509,8 +154,8 @@ }, "minimumRamGb": { "description": "The minimum amount of RAM to use. Defaults to 3.75 (GB)", - "type": "number", - "format": "double" + "format": "double", + "type": "number" }, "zones": { "description": "List of Google Compute Engine availability zones to which resource\ncreation will restricted. If empty, any zone may be chosen.", @@ -519,181 +164,194 @@ "type": "string" } }, + "minimumCpuCores": { + "description": "The minimum number of cores to use. Defaults to 1.", + "format": "int32", + "type": "integer" + }, "noAddress": { "description": "Whether to assign an external IP to the instance. This is an experimental\nfeature that may go away. Defaults to false.\nCorresponds to `--no_address` flag for [gcloud compute instances create]\n(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create).\nIn order to use this, must be true for both create time and run time.\nCannot be true at run time if false at create time. If you need to ssh into\na private IP VM for debugging, you can ssh to a public VM and then ssh into\nthe private VM's Internal IP. If noAddress is set, this pipeline run may\nonly load docker images from Google Container Registry and not Docker Hub.\n** Note: To use this option, your project must be in Google Access for\nPrivate IPs Early Access Program.**", "type": "boolean" }, - "bootDiskSizeGb": { - "description": "The size of the boot disk. Defaults to 10 (GB).", - "type": "integer", - "format": "int32" - } - }, - "id": "PipelineResources" - }, - "RepeatedString": { - "type": "object", - "properties": { - "values": { + "disks": { + "description": "Disks to attach.", "type": "array", "items": { - "type": "string" + "$ref": "Disk" } } }, - "id": "RepeatedString" + "id": "PipelineResources", + "description": "The system resources for the pipeline run.", + "type": "object" }, - "LoggingOptions": { - "description": "The logging options for the pipeline run.", + "Pipeline": { + "description": "The pipeline object. Represents a transformation from a set of input\nparameters to a set of output parameters. The transformation is defined\nas a docker image and command to run within that image. Each pipeline\nis run on a Google Compute Engine VM. A pipeline can be created with the\n`create` method and then later run with the `run` method, or a pipeline can\nbe defined and run all at once with the `run` method.", "type": "object", "properties": { - "gcsPath": { - "description": "The location in Google Cloud Storage to which the pipeline logs\nwill be copied. Can be specified as a fully qualified directory\npath, in which case logs will be output with a unique identifier\nas the filename in that directory, or as a fully specified path,\nwhich must end in `.log`, in which case that path will be\nused, and the user must ensure that logs are not\noverwritten. Stdout and stderr logs from the run are also\ngenerated and output as `-stdout.log` and `-stderr.log`.", + "outputParameters": { + "description": "Output parameters of the pipeline.", + "type": "array", + "items": { + "$ref": "PipelineParameter" + } + }, + "docker": { + "description": "Specifies the docker run information.", + "$ref": "DockerExecutor" + }, + "description": { + "description": "User-specified description.", "type": "string" - } - }, - "id": "LoggingOptions" - }, - "ListPipelinesResponse": { - "description": "The response of ListPipelines. Contains at most `pageSize`\npipelines. If it contains `pageSize` pipelines, and more pipelines\nexist, then `nextPageToken` will be populated and should be\nused as the `pageToken` argument to a subsequent ListPipelines\nrequest.", - "type": "object", - "properties": { - "pipelines": { - "description": "The matched pipelines.", + }, + "inputParameters": { + "description": "Input parameters of the pipeline.", "type": "array", "items": { - "$ref": "Pipeline" + "$ref": "PipelineParameter" } }, - "nextPageToken": { - "description": "The token to use to get the next page of results.", + "resources": { + "$ref": "PipelineResources", + "description": "Required. Specifies resource requirements for the pipeline run.\nRequired fields:\n\n*\nminimumCpuCores\n\n*\nminimumRamGb" + }, + "name": { + "description": "Required. A user specified pipeline name that does not have to be unique.\nThis name can be used for filtering Pipelines in ListPipelines.", + "type": "string" + }, + "projectId": { + "description": "Required. The project in which to create the pipeline. The caller must have\nWRITE access.", + "type": "string" + }, + "pipelineId": { + "description": "Unique pipeline id that is generated by the service when CreatePipeline\nis called. Cannot be specified in the Pipeline used in the\nCreatePipelineRequest, and will be populated in the response to\nCreatePipeline and all subsequent Get and List calls. Indicates that the\nservice has registered this pipeline.", "type": "string" } }, - "id": "ListPipelinesResponse" + "id": "Pipeline" }, - "RunPipelineArgs": { - "description": "The pipeline run arguments.", + "ControllerConfig": { + "id": "ControllerConfig", + "description": "Stores the information that the controller will fetch from the\nserver in order to run. Should only be used by VMs created by the\nPipelines Service and not by end users.", "type": "object", "properties": { - "clientId": { - "description": "This field is deprecated. Use `labels` instead. Client-specified pipeline\noperation identifier.", - "type": "string" - }, - "resources": { - "description": "Specifies resource requirements/overrides for the pipeline run.", - "$ref": "PipelineResources" - }, - "outputs": { - "description": "Pipeline output arguments; keys are defined in the pipeline\ndocumentation. All output parameters of without default values\nmust be specified. If parameters with defaults are specified\nhere, the defaults will be overridden.", + "gcsSources": { "additionalProperties": { - "type": "string" + "$ref": "RepeatedString" }, "type": "object" }, - "serviceAccount": { - "description": "The Google Cloud Service Account that will be used to access data and\nservices. By default, the compute service account associated with\n`projectId` is used.", - "$ref": "ServiceAccount" + "gcsSinks": { + "additionalProperties": { + "$ref": "RepeatedString" + }, + "type": "object" }, - "labels": { - "description": "Labels to apply to this pipeline run. Labels will also be applied to\ncompute resources (VM, disks) created by this pipeline run. When listing\noperations, operations can filtered by labels.\nLabel keys may not be empty; label values may be empty. Non-empty labels\nmust be 1-63 characters long, and comply with [RFC1035]\n(https://www.ietf.org/rfc/rfc1035.txt).\nSpecifically, the name must be 1-63 characters long and match the regular\nexpression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first\ncharacter must be a lowercase letter, and all following characters must be\na dash, lowercase letter, or digit, except the last character, which cannot\nbe a dash.", + "disks": { "additionalProperties": { "type": "string" }, "type": "object" }, - "logging": { - "description": "Required. Logging options. Used by the service to communicate results\nto the user.", - "$ref": "LoggingOptions" + "machineType": { + "type": "string" }, - "inputs": { - "description": "Pipeline input arguments; keys are defined in the pipeline documentation.\nAll input parameters that do not have default values must be specified.\nIf parameters with defaults are specified here, the defaults will be\noverridden.", + "cmd": { + "type": "string" + }, + "vars": { "additionalProperties": { "type": "string" }, "type": "object" }, - "keepVmAliveOnFailureDuration": { - "description": "How long to keep the VM up after a failure (for example docker command\nfailed, copying input or output files failed, etc). While the VM is up, one\ncan ssh into the VM to debug. Default is 0; maximum allowed value is 1 day.", - "type": "string", - "format": "google-duration" + "image": { + "type": "string" }, - "projectId": { - "description": "Required. The project in which to run the pipeline. The caller must have\nWRITER access to all Google Cloud services and resources (e.g. Google\nCompute Engine) will be used.", + "gcsLogPath": { "type": "string" } - }, - "id": "RunPipelineArgs" + } }, - "TimestampEvent": { - "description": "Stores the list of events and times they occured for major events in job\nexecution.", + "OperationEvent": { "type": "object", "properties": { - "description": { - "description": "String indicating the type of event", + "endTime": { + "description": "Optional time of when event finished. An event can have a start time and no\nfinish time. If an event has a finish time, there must be a start time.", + "format": "google-datetime", "type": "string" }, - "timestamp": { - "description": "The time this event occured.", - "type": "string", - "format": "google-datetime" + "startTime": { + "description": "Optional time of when event started.", + "format": "google-datetime", + "type": "string" + }, + "description": { + "description": "Required description of event.", + "type": "string" } }, - "id": "TimestampEvent" - }, - "CancelOperationRequest": { - "description": "The request message for Operations.CancelOperation.", - "type": "object", - "properties": {}, - "id": "CancelOperationRequest" + "id": "OperationEvent", + "description": "An event that occurred during an Operation." }, "ListOperationsResponse": { "description": "The response message for Operations.ListOperations.", "type": "object", "properties": { - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - }, "operations": { - "description": "A list of operations that matches the specified filter in the request.", "type": "array", "items": { "$ref": "Operation" - } + }, + "description": "A list of operations that matches the specified filter in the request." + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" } }, "id": "ListOperationsResponse" }, - "ImportReadGroupSetsResponse": { - "description": "The read group set import response.", + "RepeatedString": { + "id": "RepeatedString", "type": "object", "properties": { - "readGroupSetIds": { - "description": "IDs of the read group sets that were created.", + "values": { "type": "array", "items": { "type": "string" } } - }, - "id": "ImportReadGroupSetsResponse" + } }, "OperationMetadata": { "description": "Metadata describing an Operation.", "type": "object", "properties": { + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optionally provided by the caller when submitting the request that creates\nthe operation.", + "type": "object" + }, + "createTime": { + "description": "The time at which the job was submitted to the Genomics service.", + "format": "google-datetime", + "type": "string" + }, + "projectId": { + "description": "The Google Cloud Project in which the job is scoped.", + "type": "string" + }, "clientId": { "description": "This field is deprecated. Use `labels` instead. Optionally provided by the\ncaller when submitting the request that creates the operation.", "type": "string" }, - "request": { - "description": "The original request that started the operation. Note that this will be in\ncurrent version of the API. If the operation was started with v1beta2 API\nand a GetOperation is performed on v1 API, a v1 request will be returned.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "endTime": { + "description": "The time at which the job stopped running.", + "format": "google-datetime", + "type": "string" }, "events": { "description": "Optional event messages that were generated during the job's execution.\nThis also contains any warnings that were generated during import\nor export.", @@ -702,385 +360,727 @@ "$ref": "OperationEvent" } }, - "endTime": { - "description": "The time at which the job stopped running.", - "type": "string", - "format": "google-datetime" + "startTime": { + "description": "The time at which the job began to run.", + "format": "google-datetime", + "type": "string" }, - "labels": { - "description": "Optionally provided by the caller when submitting the request that creates\nthe operation.", + "request": { "additionalProperties": { - "type": "string" + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" }, + "description": "The original request that started the operation. Note that this will be in\ncurrent version of the API. If the operation was started with v1beta2 API\nand a GetOperation is performed on v1 API, a v1 request will be returned.", "type": "object" }, - "createTime": { - "description": "The time at which the job was submitted to the Genomics service.", - "type": "string", - "format": "google-datetime" - }, "runtimeMetadata": { - "description": "Runtime metadata on this Operation.", + "type": "object", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "type": "object" - }, - "startTime": { - "description": "The time at which the job began to run.", - "type": "string", - "format": "google-datetime" - }, - "projectId": { - "description": "The Google Cloud Project in which the job is scoped.", - "type": "string" + "description": "Runtime metadata on this Operation." } }, "id": "OperationMetadata" }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" - }, - "RunPipelineRequest": { - "description": "The request to run a pipeline. If `pipelineId` is specified, it\nrefers to a saved pipeline created with CreatePipeline and set as\nthe `pipelineId` of the returned Pipeline object. If\n`ephemeralPipeline` is specified, that pipeline is run once\nwith the given args and not saved. It is an error to specify both\n`pipelineId` and `ephemeralPipeline`. `pipelineArgs`\nmust be specified.", + "RunPipelineArgs": { + "description": "The pipeline run arguments.", "type": "object", "properties": { - "pipelineId": { - "description": "The already created pipeline to run.", + "serviceAccount": { + "$ref": "ServiceAccount", + "description": "The Google Cloud Service Account that will be used to access data and\nservices. By default, the compute service account associated with\n`projectId` is used." + }, + "inputs": { + "description": "Pipeline input arguments; keys are defined in the pipeline documentation.\nAll input parameters that do not have default values must be specified.\nIf parameters with defaults are specified here, the defaults will be\noverridden.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this pipeline run. Labels will also be applied to\ncompute resources (VM, disks) created by this pipeline run. When listing\noperations, operations can filtered by labels.\nLabel keys may not be empty; label values may be empty. Non-empty labels\nmust be 1-63 characters long, and comply with [RFC1035]\n(https://www.ietf.org/rfc/rfc1035.txt).\nSpecifically, the name must be 1-63 characters long and match the regular\nexpression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first\ncharacter must be a lowercase letter, and all following characters must be\na dash, lowercase letter, or digit, except the last character, which cannot\nbe a dash.", + "type": "object" + }, + "logging": { + "$ref": "LoggingOptions", + "description": "Required. Logging options. Used by the service to communicate results\nto the user." + }, + "keepVmAliveOnFailureDuration": { + "description": "How long to keep the VM up after a failure (for example docker command\nfailed, copying input or output files failed, etc). While the VM is up, one\ncan ssh into the VM to debug. Default is 0; maximum allowed value is 1 day.", + "format": "google-duration", "type": "string" }, - "ephemeralPipeline": { - "description": "A new pipeline object to run once and then delete.", - "$ref": "Pipeline" + "resources": { + "description": "Specifies resource requirements/overrides for the pipeline run.", + "$ref": "PipelineResources" }, - "pipelineArgs": { - "description": "The arguments to use when running this pipeline.", - "$ref": "RunPipelineArgs" + "outputs": { + "additionalProperties": { + "type": "string" + }, + "description": "Pipeline output arguments; keys are defined in the pipeline\ndocumentation. All output parameters of without default values\nmust be specified. If parameters with defaults are specified\nhere, the defaults will be overridden.", + "type": "object" + }, + "projectId": { + "description": "Required. The project in which to run the pipeline. The caller must have\nWRITER access to all Google Cloud services and resources (e.g. Google\nCompute Engine) will be used.", + "type": "string" + }, + "clientId": { + "description": "This field is deprecated. Use `labels` instead. Client-specified pipeline\noperation identifier.", + "type": "string" } }, - "id": "RunPipelineRequest" + "id": "RunPipelineArgs" }, - "PipelineParameter": { - "description": "Parameters facilitate setting and delivering data into the\npipeline's execution environment. They are defined at create time,\nwith optional defaults, and can be overridden at run time.\n\nIf `localCopy` is unset, then the parameter specifies a string that\nis passed as-is into the pipeline, as the value of the environment\nvariable with the given name. A default value can be optionally\nspecified at create time. The default can be overridden at run time\nusing the inputs map. If no default is given, a value must be\nsupplied at runtime.\n\nIf `localCopy` is defined, then the parameter specifies a data\nsource or sink, both in Google Cloud Storage and on the Docker container\nwhere the pipeline computation is run. The service account associated with\nthe Pipeline (by\ndefault the project's Compute Engine service account) must have access to the\nGoogle Cloud Storage paths.\n\nAt run time, the Google Cloud Storage paths can be overridden if a default\nwas provided at create time, or must be set otherwise. The pipeline runner\nshould add a key/value pair to either the inputs or outputs map. The\nindicated data copies will be carried out before/after pipeline execution,\njust as if the corresponding arguments were provided to `gsutil cp`.\n\nFor example: Given the following `PipelineParameter`, specified\nin the `inputParameters` list:\n\n```\n{name: \"input_file\", localCopy: {path: \"file.txt\", disk: \"pd1\"}}\n```\n\nwhere `disk` is defined in the `PipelineResources` object as:\n\n```\n{name: \"pd1\", mountPoint: \"/mnt/disk/\"}\n```\n\nWe create a disk named `pd1`, mount it on the host VM, and map\n`/mnt/pd1` to `/mnt/disk` in the docker container. At\nruntime, an entry for `input_file` would be required in the inputs\nmap, such as:\n\n```\n inputs[\"input_file\"] = \"gs://my-bucket/bar.txt\"\n```\n\nThis would generate the following gsutil call:\n\n```\n gsutil cp gs://my-bucket/bar.txt /mnt/pd1/file.txt\n```\n\nThe file `/mnt/pd1/file.txt` maps to `/mnt/disk/file.txt` in the\nDocker container. Acceptable paths are:\n\n\u003ctable\u003e\n \u003cthead\u003e\n \u003ctr\u003e\u003cth\u003eGoogle Cloud storage path\u003c/th\u003e\u003cth\u003eLocal path\u003c/th\u003e\u003c/tr\u003e\n \u003c/thead\u003e\n \u003ctbody\u003e\n \u003ctr\u003e\u003ctd\u003efile\u003c/td\u003e\u003ctd\u003efile\u003c/td\u003e\u003c/tr\u003e\n \u003ctr\u003e\u003ctd\u003eglob\u003c/td\u003e\u003ctd\u003edirectory\u003c/td\u003e\u003c/tr\u003e\n \u003c/tbody\u003e\n\u003c/table\u003e\n\nFor outputs, the direction of the copy is reversed:\n\n```\n gsutil cp /mnt/disk/file.txt gs://my-bucket/bar.txt\n```\n\nAcceptable paths are:\n\n\u003ctable\u003e\n \u003cthead\u003e\n \u003ctr\u003e\u003cth\u003eLocal path\u003c/th\u003e\u003cth\u003eGoogle Cloud Storage path\u003c/th\u003e\u003c/tr\u003e\n \u003c/thead\u003e\n \u003ctbody\u003e\n \u003ctr\u003e\u003ctd\u003efile\u003c/td\u003e\u003ctd\u003efile\u003c/td\u003e\u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003efile\u003c/td\u003e\n \u003ctd\u003edirectory - directory must already exist\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003eglob\u003c/td\u003e\n \u003ctd\u003edirectory - directory will be created if it doesn't exist\u003c/td\u003e\u003c/tr\u003e\n \u003c/tbody\u003e\n\u003c/table\u003e\n\nOne restriction due to docker limitations, is that for outputs that are found\non the boot disk, the local path cannot be a glob and must be a file.", + "ListPipelinesResponse": { "type": "object", "properties": { - "description": { - "description": "Human-readable description.", + "nextPageToken": { + "description": "The token to use to get the next page of results.", "type": "string" }, - "defaultValue": { - "description": "The default value for this parameter. Can be overridden at runtime.\nIf `localCopy` is present, then this must be a Google Cloud Storage path\nbeginning with `gs://`.", + "pipelines": { + "description": "The matched pipelines.", + "type": "array", + "items": { + "$ref": "Pipeline" + } + } + }, + "id": "ListPipelinesResponse", + "description": "The response of ListPipelines. Contains at most `pageSize`\npipelines. If it contains `pageSize` pipelines, and more pipelines\nexist, then `nextPageToken` will be populated and should be\nused as the `pageToken` argument to a subsequent ListPipelines\nrequest." + }, + "SetOperationStatusRequest": { + "type": "object", + "properties": { + "errorCode": { + "type": "string", + "enumDescriptions": [ + "Not an error; returned on success\n\nHTTP Mapping: 200 OK", + "The operation was cancelled, typically by the caller.\n\nHTTP Mapping: 499 Client Closed Request", + "Unknown error. For example, this error may be returned when\na `Status` value received from another address space belongs to\nan error space that is not known in this address space. Also\nerrors raised by APIs that do not return enough error information\nmay be converted to this error.\n\nHTTP Mapping: 500 Internal Server Error", + "The client specified an invalid argument. Note that this differs\nfrom `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments\nthat are problematic regardless of the state of the system\n(e.g., a malformed file name).\n\nHTTP Mapping: 400 Bad Request", + "The deadline expired before the operation could complete. For operations\nthat change the state of the system, this error may be returned\neven if the operation has completed successfully. For example, a\nsuccessful response from a server could have been delayed long\nenough for the deadline to expire.\n\nHTTP Mapping: 504 Gateway Timeout", + "Some requested entity (e.g., file or directory) was not found.\n\nNote to server developers: if a request is denied for an entire class\nof users, such as gradual feature rollout or undocumented whitelist,\n`NOT_FOUND` may be used. If a request is denied for some users within\na class of users, such as user-based access control, `PERMISSION_DENIED`\nmust be used.\n\nHTTP Mapping: 404 Not Found", + "The entity that a client attempted to create (e.g., file or directory)\nalready exists.\n\nHTTP Mapping: 409 Conflict", + "The caller does not have permission to execute the specified\noperation. `PERMISSION_DENIED` must not be used for rejections\ncaused by exhausting some resource (use `RESOURCE_EXHAUSTED`\ninstead for those errors). `PERMISSION_DENIED` must not be\nused if the caller can not be identified (use `UNAUTHENTICATED`\ninstead for those errors). This error code does not imply the\nrequest is valid or the requested entity exists or satisfies\nother pre-conditions.\n\nHTTP Mapping: 403 Forbidden", + "The request does not have valid authentication credentials for the\noperation.\n\nHTTP Mapping: 401 Unauthorized", + "Some resource has been exhausted, perhaps a per-user quota, or\nperhaps the entire file system is out of space.\n\nHTTP Mapping: 429 Too Many Requests", + "The operation was rejected because the system is not in a state\nrequired for the operation's execution. For example, the directory\nto be deleted is non-empty, an rmdir operation is applied to\na non-directory, etc.\n\nService implementors can use the following guidelines to decide\nbetween `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:\n (a) Use `UNAVAILABLE` if the client can retry just the failing call.\n (b) Use `ABORTED` if the client should retry at a higher level\n (e.g., restarting a read-modify-write sequence).\n (c) Use `FAILED_PRECONDITION` if the client should not retry until\n the system state has been explicitly fixed. E.g., if an \"rmdir\"\n fails because the directory is non-empty, `FAILED_PRECONDITION`\n should be returned since the client should not retry unless\n the files are deleted from the directory.\n\nHTTP Mapping: 400 Bad Request", + "The operation was aborted, typically due to a concurrency issue such as\na sequencer check failure or transaction abort.\n\nSee the guidelines above for deciding between `FAILED_PRECONDITION`,\n`ABORTED`, and `UNAVAILABLE`.\n\nHTTP Mapping: 409 Conflict", + "The operation was attempted past the valid range. E.g., seeking or\nreading past end-of-file.\n\nUnlike `INVALID_ARGUMENT`, this error indicates a problem that may\nbe fixed if the system state changes. For example, a 32-bit file\nsystem will generate `INVALID_ARGUMENT` if asked to read at an\noffset that is not in the range [0,2^32-1], but it will generate\n`OUT_OF_RANGE` if asked to read from an offset past the current\nfile size.\n\nThere is a fair bit of overlap between `FAILED_PRECONDITION` and\n`OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific\nerror) when it applies so that callers who are iterating through\na space can easily look for an `OUT_OF_RANGE` error to detect when\nthey are done.\n\nHTTP Mapping: 400 Bad Request", + "The operation is not implemented or is not supported/enabled in this\nservice.\n\nHTTP Mapping: 501 Not Implemented", + "Internal errors. This means that some invariants expected by the\nunderlying system have been broken. This error code is reserved\nfor serious errors.\n\nHTTP Mapping: 500 Internal Server Error", + "The service is currently unavailable. This is most likely a\ntransient condition, which can be corrected by retrying with\na backoff.\n\nSee the guidelines above for deciding between `FAILED_PRECONDITION`,\n`ABORTED`, and `UNAVAILABLE`.\n\nHTTP Mapping: 503 Service Unavailable", + "Unrecoverable data loss or corruption.\n\nHTTP Mapping: 500 Internal Server Error" + ], + "enum": [ + "OK", + "CANCELLED", + "UNKNOWN", + "INVALID_ARGUMENT", + "DEADLINE_EXCEEDED", + "NOT_FOUND", + "ALREADY_EXISTS", + "PERMISSION_DENIED", + "UNAUTHENTICATED", + "RESOURCE_EXHAUSTED", + "FAILED_PRECONDITION", + "ABORTED", + "OUT_OF_RANGE", + "UNIMPLEMENTED", + "INTERNAL", + "UNAVAILABLE", + "DATA_LOSS" + ] + }, + "timestampEvents": { + "type": "array", + "items": { + "$ref": "TimestampEvent" + } + }, + "operationId": { "type": "string" }, - "localCopy": { - "description": "If present, this parameter is marked for copying to and from the VM.\n`LocalCopy` indicates where on the VM the file should be. The value\ngiven to this parameter (either at runtime or using `defaultValue`)\nmust be the remote path where the file should be.", - "$ref": "LocalCopy" + "errorMessage": { + "type": "string" }, - "name": { - "description": "Required. Name of the parameter - the pipeline runner uses this string\nas the key to the input and output maps in RunPipeline.", + "validationToken": { + "format": "uint64", "type": "string" } }, - "id": "PipelineParameter" + "id": "SetOperationStatusRequest", + "description": "Request to set operation status. Should only be used by VMs\ncreated by the Pipelines Service and not by end users." }, - "Disk": { - "description": "A Google Compute Engine disk resource specification.", + "ComputeEngine": { + "description": "Describes a Compute Engine resource that is being managed by a running\npipeline.", "type": "object", "properties": { - "mountPoint": { - "description": "Required at create time and cannot be overridden at run time.\nSpecifies the path in the docker container where files on\nthis disk should be located. For example, if `mountPoint`\nis `/mnt/disk`, and the parameter has `localPath`\n`inputs/file.txt`, the docker container can access the data at\n`/mnt/disk/inputs/file.txt`.", + "machineType": { + "description": "The machine type of the instance.", "type": "string" }, - "source": { - "description": "The full or partial URL of the persistent disk to attach. See\nhttps://cloud.google.com/compute/docs/reference/latest/instances#resource\nand\nhttps://cloud.google.com/compute/docs/disks/persistent-disks#snapshots\nfor more details.", + "diskNames": { + "description": "The names of the disks that were created for this pipeline.", + "type": "array", + "items": { + "type": "string" + } + }, + "instanceName": { + "description": "The instance on which the operation is running.", + "type": "string" + }, + "zone": { + "description": "The availability zone in which the instance resides.", + "type": "string" + } + }, + "id": "ComputeEngine" + }, + "ImportVariantsResponse": { + "id": "ImportVariantsResponse", + "description": "The variant data import response.", + "type": "object", + "properties": { + "callSetIds": { + "description": "IDs of the call sets created during the import.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "TimestampEvent": { + "description": "Stores the list of events and times they occured for major events in job\nexecution.", + "type": "object", + "properties": { + "timestamp": { + "description": "The time this event occured.", + "format": "google-datetime", "type": "string" }, + "description": { + "description": "String indicating the type of event", + "type": "string" + } + }, + "id": "TimestampEvent" + }, + "LocalCopy": { + "id": "LocalCopy", + "description": "LocalCopy defines how a remote file should be copied to and from the VM.", + "type": "object", + "properties": { + "disk": { + "description": "Required. The name of the disk where this parameter is\nlocated. Can be the name of one of the disks specified in the\nResources field, or \"boot\", which represents the Docker\ninstance's boot disk and has a mount point of `/`.", + "type": "string" + }, + "path": { + "description": "Required. The path within the user's docker container where\nthis input should be localized to and from, relative to the specified\ndisk's mount point. For example: file.txt,", + "type": "string" + } + } + }, + "DockerExecutor": { + "description": "The Docker execuctor specification.", + "type": "object", + "properties": { + "imageName": { + "description": "Required. Image name from either Docker Hub or Google Container Registry.\nUsers that run pipelines must have READ access to the image.", + "type": "string" + }, + "cmd": { + "description": "Required. The command or newline delimited script to run. The command\nstring will be executed within a bash shell.\n\nIf the command exits with a non-zero exit code, output parameter\nde-localization will be skipped and the pipeline operation's\n`error` field will be populated.\n\nMaximum command string length is 16384.", + "type": "string" + } + }, + "id": "DockerExecutor" + }, + "Disk": { + "properties": { "autoDelete": { "description": "Deprecated. Disks created by the Pipelines API will be deleted at the end\nof the pipeline run, regardless of what this field is set to.", "type": "boolean" }, "sizeGb": { "description": "The size of the disk. Defaults to 500 (GB).\nThis field is not applicable for local SSD.", - "type": "integer", - "format": "int32" + "format": "int32", + "type": "integer" + }, + "mountPoint": { + "description": "Required at create time and cannot be overridden at run time.\nSpecifies the path in the docker container where files on\nthis disk should be located. For example, if `mountPoint`\nis `/mnt/disk`, and the parameter has `localPath`\n`inputs/file.txt`, the docker container can access the data at\n`/mnt/disk/inputs/file.txt`.", + "type": "string" + }, + "source": { + "description": "The full or partial URL of the persistent disk to attach. See\nhttps://cloud.google.com/compute/docs/reference/latest/instances#resource\nand\nhttps://cloud.google.com/compute/docs/disks/persistent-disks#snapshots\nfor more details.", + "type": "string" }, "name": { "description": "Required. The name of the disk that can be used in the pipeline\nparameters. Must be 1 - 63 characters.\nThe name \"boot\" is reserved for system use.", "type": "string" }, "type": { - "description": "Required. The type of the disk to create.", - "enum": [ - "TYPE_UNSPECIFIED", - "PERSISTENT_HDD", - "PERSISTENT_SSD", - "LOCAL_SSD" - ], "enumDescriptions": [ "Default disk type. Use one of the other options below.", "Specifies a Google Compute Engine persistent hard disk. See\nhttps://cloud.google.com/compute/docs/disks/#pdspecs for details.", "Specifies a Google Compute Engine persistent solid-state disk. See\nhttps://cloud.google.com/compute/docs/disks/#pdspecs for details.", "Specifies a Google Compute Engine local SSD.\nSee https://cloud.google.com/compute/docs/disks/local-ssd for details." ], + "enum": [ + "TYPE_UNSPECIFIED", + "PERSISTENT_HDD", + "PERSISTENT_SSD", + "LOCAL_SSD" + ], + "description": "Required. The type of the disk to create.", "type": "string" } }, - "id": "Disk" + "id": "Disk", + "description": "A Google Compute Engine disk resource specification.", + "type": "object" }, - "LocalCopy": { - "description": "LocalCopy defines how a remote file should be copied to and from the VM.", + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "PipelineParameter": { + "description": "Parameters facilitate setting and delivering data into the\npipeline's execution environment. They are defined at create time,\nwith optional defaults, and can be overridden at run time.\n\nIf `localCopy` is unset, then the parameter specifies a string that\nis passed as-is into the pipeline, as the value of the environment\nvariable with the given name. A default value can be optionally\nspecified at create time. The default can be overridden at run time\nusing the inputs map. If no default is given, a value must be\nsupplied at runtime.\n\nIf `localCopy` is defined, then the parameter specifies a data\nsource or sink, both in Google Cloud Storage and on the Docker container\nwhere the pipeline computation is run. The service account associated with\nthe Pipeline (by\ndefault the project's Compute Engine service account) must have access to the\nGoogle Cloud Storage paths.\n\nAt run time, the Google Cloud Storage paths can be overridden if a default\nwas provided at create time, or must be set otherwise. The pipeline runner\nshould add a key/value pair to either the inputs or outputs map. The\nindicated data copies will be carried out before/after pipeline execution,\njust as if the corresponding arguments were provided to `gsutil cp`.\n\nFor example: Given the following `PipelineParameter`, specified\nin the `inputParameters` list:\n\n```\n{name: \"input_file\", localCopy: {path: \"file.txt\", disk: \"pd1\"}}\n```\n\nwhere `disk` is defined in the `PipelineResources` object as:\n\n```\n{name: \"pd1\", mountPoint: \"/mnt/disk/\"}\n```\n\nWe create a disk named `pd1`, mount it on the host VM, and map\n`/mnt/pd1` to `/mnt/disk` in the docker container. At\nruntime, an entry for `input_file` would be required in the inputs\nmap, such as:\n\n```\n inputs[\"input_file\"] = \"gs://my-bucket/bar.txt\"\n```\n\nThis would generate the following gsutil call:\n\n```\n gsutil cp gs://my-bucket/bar.txt /mnt/pd1/file.txt\n```\n\nThe file `/mnt/pd1/file.txt` maps to `/mnt/disk/file.txt` in the\nDocker container. Acceptable paths are:\n\n\u003ctable\u003e\n \u003cthead\u003e\n \u003ctr\u003e\u003cth\u003eGoogle Cloud storage path\u003c/th\u003e\u003cth\u003eLocal path\u003c/th\u003e\u003c/tr\u003e\n \u003c/thead\u003e\n \u003ctbody\u003e\n \u003ctr\u003e\u003ctd\u003efile\u003c/td\u003e\u003ctd\u003efile\u003c/td\u003e\u003c/tr\u003e\n \u003ctr\u003e\u003ctd\u003eglob\u003c/td\u003e\u003ctd\u003edirectory\u003c/td\u003e\u003c/tr\u003e\n \u003c/tbody\u003e\n\u003c/table\u003e\n\nFor outputs, the direction of the copy is reversed:\n\n```\n gsutil cp /mnt/disk/file.txt gs://my-bucket/bar.txt\n```\n\nAcceptable paths are:\n\n\u003ctable\u003e\n \u003cthead\u003e\n \u003ctr\u003e\u003cth\u003eLocal path\u003c/th\u003e\u003cth\u003eGoogle Cloud Storage path\u003c/th\u003e\u003c/tr\u003e\n \u003c/thead\u003e\n \u003ctbody\u003e\n \u003ctr\u003e\u003ctd\u003efile\u003c/td\u003e\u003ctd\u003efile\u003c/td\u003e\u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003efile\u003c/td\u003e\n \u003ctd\u003edirectory - directory must already exist\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003eglob\u003c/td\u003e\n \u003ctd\u003edirectory - directory will be created if it doesn't exist\u003c/td\u003e\u003c/tr\u003e\n \u003c/tbody\u003e\n\u003c/table\u003e\n\nOne restriction due to docker limitations, is that for outputs that are found\non the boot disk, the local path cannot be a glob and must be a file.", "type": "object", "properties": { - "path": { - "description": "Required. The path within the user's docker container where\nthis input should be localized to and from, relative to the specified\ndisk's mount point. For example: file.txt,", + "name": { + "description": "Required. Name of the parameter - the pipeline runner uses this string\nas the key to the input and output maps in RunPipeline.", "type": "string" }, - "disk": { - "description": "Required. The name of the disk where this parameter is\nlocated. Can be the name of one of the disks specified in the\nResources field, or \"boot\", which represents the Docker\ninstance's boot disk and has a mount point of `/`.", + "description": { + "description": "Human-readable description.", + "type": "string" + }, + "localCopy": { + "$ref": "LocalCopy", + "description": "If present, this parameter is marked for copying to and from the VM.\n`LocalCopy` indicates where on the VM the file should be. The value\ngiven to this parameter (either at runtime or using `defaultValue`)\nmust be the remote path where the file should be." + }, + "defaultValue": { + "description": "The default value for this parameter. Can be overridden at runtime.\nIf `localCopy` is present, then this must be a Google Cloud Storage path\nbeginning with `gs://`.", "type": "string" } }, - "id": "LocalCopy" - }, - "RuntimeMetadata": { - "description": "Runtime metadata that will be populated in the\nruntimeMetadata\nfield of the Operation associated with a RunPipeline execution.", - "type": "object", - "properties": { - "computeEngine": { - "description": "Execution information specific to Google Compute Engine.", - "$ref": "ComputeEngine" + "id": "PipelineParameter" + } + }, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "protocol": "rest", + "version": "v1alpha2", + "baseUrl": "https://genomics.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/genomics": { + "description": "View and manage Genomics data" + }, + "https://www.googleapis.com/auth/compute": { + "description": "View and manage your Google Compute Engine resources" } - }, - "id": "RuntimeMetadata" - }, - "Pipeline": { - "description": "The pipeline object. Represents a transformation from a set of input\nparameters to a set of output parameters. The transformation is defined\nas a docker image and command to run within that image. Each pipeline\nis run on a Google Compute Engine VM. A pipeline can be created with the\n`create` method and then later run with the `run` method, or a pipeline can\nbe defined and run all at once with the `run` method.", - "type": "object", - "properties": { - "description": { - "description": "User-specified description.", - "type": "string" + } + } + }, + "kind": "discovery#restDescription", + "description": "Upload, process, query, and search Genomics data in the cloud.", + "servicePath": "", + "rootUrl": "https://genomics.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "genomics", + "batchPath": "batch", + "revision": "20170209", + "id": "genomics:v1alpha2", + "documentationLink": "https://cloud.google.com/genomics", + "title": "Genomics API", + "ownerName": "Google", + "discoveryVersion": "v1", + "resources": { + "pipelines": { + "methods": { + "get": { + "path": "v1alpha2/pipelines/{pipelineId}", + "id": "genomics.pipelines.get", + "description": "Retrieves a pipeline based on ID.\n\nCaller must have READ permission to the project.", + "parameterOrder": [ + "pipelineId" + ], + "response": { + "$ref": "Pipeline" + }, + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "pipelineId": { + "required": true, + "type": "string", + "location": "path", + "description": "Caller must have READ access to the project in which this pipeline\nis defined." + } + }, + "flatPath": "v1alpha2/pipelines/{pipelineId}" }, - "inputParameters": { - "description": "Input parameters of the pipeline.", - "type": "array", - "items": { - "$ref": "PipelineParameter" - } + "setOperationStatus": { + "description": "Sets status of a given operation. Any new timestamps (as determined by\ndescription) are appended to TimestampEvents. Should only be called by VMs\ncreated by the Pipelines Service and not by end users.", + "request": { + "$ref": "SetOperationStatusRequest" + }, + "response": { + "$ref": "Empty" + }, + "parameterOrder": [], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": {}, + "flatPath": "v1alpha2/pipelines:setOperationStatus", + "path": "v1alpha2/pipelines:setOperationStatus", + "id": "genomics.pipelines.setOperationStatus" }, - "resources": { - "description": "Required. Specifies resource requirements for the pipeline run.\nRequired fields:\n\n*\nminimumCpuCores\n\n*\nminimumRamGb", - "$ref": "PipelineResources" + "delete": { + "flatPath": "v1alpha2/pipelines/{pipelineId}", + "path": "v1alpha2/pipelines/{pipelineId}", + "id": "genomics.pipelines.delete", + "description": "Deletes a pipeline based on ID.\n\nCaller must have WRITE permission to the project.", + "parameterOrder": [ + "pipelineId" + ], + "response": { + "$ref": "Empty" + }, + "httpMethod": "DELETE", + "parameters": { + "pipelineId": { + "description": "Caller must have WRITE access to the project in which this pipeline\nis defined.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ] }, - "pipelineId": { - "description": "Unique pipeline id that is generated by the service when CreatePipeline\nis called. Cannot be specified in the Pipeline used in the\nCreatePipelineRequest, and will be populated in the response to\nCreatePipeline and all subsequent Get and List calls. Indicates that the\nservice has registered this pipeline.", - "type": "string" + "getControllerConfig": { + "response": { + "$ref": "ControllerConfig" + }, + "parameterOrder": [], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "operationId": { + "description": "The operation to retrieve controller configuration for.", + "type": "string", + "location": "query" + }, + "validationToken": { + "location": "query", + "format": "uint64", + "type": "string" + } + }, + "flatPath": "v1alpha2/pipelines:getControllerConfig", + "path": "v1alpha2/pipelines:getControllerConfig", + "id": "genomics.pipelines.getControllerConfig", + "description": "Gets controller configuration information. Should only be called\nby VMs created by the Pipelines Service and not by end users." }, - "outputParameters": { - "description": "Output parameters of the pipeline.", - "type": "array", - "items": { - "$ref": "PipelineParameter" + "list": { + "path": "v1alpha2/pipelines", + "id": "genomics.pipelines.list", + "description": "Lists pipelines.\n\nCaller must have READ permission to the project.", + "response": { + "$ref": "ListPipelinesResponse" + }, + "parameterOrder": [], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "pageSize": { + "location": "query", + "description": "Number of pipelines to return at once. Defaults to 256, and max\nis 2048.", + "format": "int32", + "type": "integer" + }, + "projectId": { + "location": "query", + "description": "Required. The name of the project to search for pipelines. Caller\nmust have READ access to this project.", + "type": "string" + }, + "namePrefix": { + "description": "Pipelines with names that match this prefix should be\nreturned. If unspecified, all pipelines in the project, up to\n`pageSize`, will be returned.", + "type": "string", + "location": "query" + }, + "pageToken": { + "description": "Token to use to indicate where to start getting results.\nIf unspecified, returns the first page of results.", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1alpha2/pipelines" + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "Pipeline" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": {}, + "flatPath": "v1alpha2/pipelines", + "id": "genomics.pipelines.create", + "path": "v1alpha2/pipelines", + "description": "Creates a pipeline that can be run later. Create takes a Pipeline that\nhas all fields other than `pipelineId` populated, and then returns\nthe same pipeline with `pipelineId` populated. This id can be used\nto run the pipeline.\n\nCaller must have WRITE permission to the project.", + "request": { + "$ref": "Pipeline" + } + }, + "run": { + "request": { + "$ref": "RunPipelineRequest" + }, + "description": "Runs a pipeline. If `pipelineId` is specified in the request, then\nrun a saved pipeline. If `ephemeralPipeline` is specified, then run\nthat pipeline once without saving a copy.\n\nThe caller must have READ permission to the project where the pipeline\nis stored and WRITE permission to the project where the pipeline will be\nrun, as VMs will be created and storage will be used.\n\nIf a pipeline operation is still running after 6 days, it will be canceled.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/genomics" + ], + "flatPath": "v1alpha2/pipelines:run", + "path": "v1alpha2/pipelines:run", + "id": "genomics.pipelines.run" + } + } + }, + "operations": { + "methods": { + "cancel": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "name": { + "pattern": "^operations/.+$", + "location": "path", + "description": "The name of the operation resource to be cancelled.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1alpha2/operations/{operationsId}:cancel", + "path": "v1alpha2/{+name}:cancel", + "id": "genomics.operations.cancel", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients may use Operations.GetOperation or Operations.ListOperations to check whether the cancellation succeeded or the operation completed despite cancellation.", + "request": { + "$ref": "CancelOperationRequest" } }, - "docker": { - "description": "Specifies the docker run information.", - "$ref": "DockerExecutor" - }, - "name": { - "description": "Required. A user specified pipeline name that does not have to be unique.\nThis name can be used for filtering Pipelines in ListPipelines.", - "type": "string" - }, - "projectId": { - "description": "Required. The project in which to create the pipeline. The caller must have\nWRITE access.", - "type": "string" - } - }, - "id": "Pipeline" - }, - "ControllerConfig": { - "description": "Stores the information that the controller will fetch from the\nserver in order to run. Should only be used by VMs created by the\nPipelines Service and not by end users.", - "type": "object", - "properties": { - "disks": { - "additionalProperties": { - "type": "string" + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListOperationsResponse" }, - "type": "object" - }, - "vars": { - "additionalProperties": { - "type": "string" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "parameters": { + "filter": { + "description": "A string for filtering Operations.\nThe following filter fields are supported:\n\n* projectId: Required. Corresponds to\n OperationMetadata.projectId.\n* createTime: The time this job was created, in seconds from the\n [epoch](http://en.wikipedia.org/wiki/Unix_time). Can use `\u003e=` and/or `\u003c=`\n operators.\n* status: Can be `RUNNING`, `SUCCESS`, `FAILURE`, or `CANCELED`. Only\n one status may be specified.\n* labels.key where key is a label key.\n\nExamples:\n\n* `projectId = my-project AND createTime \u003e= 1432140000`\n* `projectId = my-project AND createTime \u003e= 1432140000 AND createTime \u003c= 1432150000 AND status = RUNNING`\n* `projectId = my-project AND labels.color = *`\n* `projectId = my-project AND labels.color = red`", + "type": "string", + "location": "query" + }, + "name": { + "required": true, + "type": "string", + "pattern": "^operations$", + "location": "path", + "description": "The name of the operation collection." + }, + "pageToken": { + "location": "query", + "description": "The standard list page token.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "The maximum number of results to return. If unspecified, defaults to\n256. The maximum value is 2048.", + "format": "int32", + "type": "integer" + } }, - "type": "object" - }, - "cmd": { - "type": "string" + "flatPath": "v1alpha2/operations", + "id": "genomics.operations.list", + "path": "v1alpha2/{+name}", + "description": "Lists operations that match the specified filter in the request." }, - "gcsSinks": { - "additionalProperties": { - "$ref": "RepeatedString" + "get": { + "path": "v1alpha2/{+name}", + "id": "genomics.operations.get", + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "response": { + "$ref": "Operation" }, - "type": "object" - }, - "machineType": { - "type": "string" - }, - "gcsSources": { - "additionalProperties": { - "$ref": "RepeatedString" + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "pattern": "^operations/.+$", + "location": "path", + "description": "The name of the operation resource.", + "required": true, + "type": "string" + } }, - "type": "object" - }, - "image": { - "type": "string" - }, - "gcsLogPath": { - "type": "string" - } - }, - "id": "ControllerConfig" - }, - "ServiceAccount": { - "description": "A Google Cloud Service Account.", - "type": "object", - "properties": { - "email": { - "description": "Email address of the service account. Defaults to `default`,\nwhich uses the compute service account associated with the project.", - "type": "string" - }, - "scopes": { - "description": "List of scopes to be enabled for this service account on the VM.\nThe following scopes are automatically included:\n\n* https://www.googleapis.com/auth/compute\n* https://www.googleapis.com/auth/devstorage.full_control\n* https://www.googleapis.com/auth/genomics\n* https://www.googleapis.com/auth/logging.write\n* https://www.googleapis.com/auth/monitoring.write", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "ServiceAccount" - }, - "ImportVariantsResponse": { - "description": "The variant data import response.", - "type": "object", - "properties": { - "callSetIds": { - "description": "IDs of the call sets created during the import.", - "type": "array", - "items": { - "type": "string" - } + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/genomics" + ], + "flatPath": "v1alpha2/operations/{operationsId}" } - }, - "id": "ImportVariantsResponse" + } } }, - "revision": "20170125", - "basePath": "", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "discoveryVersion": "v1", - "baseUrl": "https://genomics.googleapis.com/", - "name": "genomics", "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "uploadType": { "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\")." }, "fields": { + "location": "query", "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "callback": { + "description": "JSONP", "type": "string", "location": "query" }, - "alt": { - "description": "Data format for response.", + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "alt": { + "default": "json", "enum": [ "json", "media", "proto" ], - "default": "json", + "type": "string", "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "type": "string" + "location": "query", + "description": "Data format for response." }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" }, "bearer_token": { "description": "OAuth bearer token.", "type": "string", "location": "query" }, + "oauth_token": { + "type": "string", + "location": "query", + "description": "OAuth 2.0 token for the current user." + }, "upload_protocol": { + "location": "query", "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" } - }, - "documentationLink": "https://cloud.google.com/genomics", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1alpha2", - "rootUrl": "https://genomics.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/genomics/v1alpha2/genomics-gen.go b/vendor/google.golang.org/api/genomics/v1alpha2/genomics-gen.go index ada8f3f75..5a178a7e6 100644 --- a/vendor/google.golang.org/api/genomics/v1alpha2/genomics-gen.go +++ b/vendor/google.golang.org/api/genomics/v1alpha2/genomics-gen.go @@ -68,9 +68,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Operations *OperationsService @@ -84,6 +85,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -1670,6 +1675,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { @@ -1819,6 +1825,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2002,6 +2009,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2175,6 +2183,7 @@ func (c *PipelinesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pipeline) if err != nil { @@ -2298,6 +2307,7 @@ func (c *PipelinesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha2/pipelines/{pipelineId}") @@ -2436,6 +2446,7 @@ func (c *PipelinesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2588,6 +2599,7 @@ func (c *PipelinesGetControllerConfigCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2760,6 +2772,7 @@ func (c *PipelinesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2932,6 +2945,7 @@ func (c *PipelinesRunCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runpipelinerequest) if err != nil { @@ -3058,6 +3072,7 @@ func (c *PipelinesSetOperationStatusCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setoperationstatusrequest) if err != nil { diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go new file mode 100644 index 000000000..cb5e67c77 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/header.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "runtime" + "strings" +) + +// GoogleClientHeader returns the value to use for the x-goog-api-client +// header, which is used internally by Google. +func GoogleClientHeader(generatorVersion, clientElement string) string { + elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)} + if clientElement != "" { + elts = append(elts, clientElement) + } + elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion)) + return strings.Join(elts, " ") +} diff --git a/vendor/google.golang.org/api/gensupport/header_test.go b/vendor/google.golang.org/api/gensupport/header_test.go new file mode 100644 index 000000000..8fdb8633e --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/header_test.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "runtime" + "strings" + "testing" +) + +func TestGoogleClientHeader(t *testing.T) { + const genVersion = "20170101" + gv := strings.Replace(runtime.Version(), " ", "_", -1) + got := GoogleClientHeader(genVersion, "gccl/xyz") + want := fmt.Sprintf("gl-go/%s gccl/xyz gdcl/%s", gv, genVersion) + if got != want { + t.Errorf("got %q, want %q", got, want) + } + + got = GoogleClientHeader(genVersion, "") + want = fmt.Sprintf("gl-go/%s gdcl/%s", gv, genVersion) + if got != want { + t.Errorf("got %q, want %q", got, want) + } +} diff --git a/vendor/google.golang.org/api/gmail/v1/gmail-api.json b/vendor/google.golang.org/api/gmail/v1/gmail-api.json index db0a3305c..a2e8d7d9c 100644 --- a/vendor/google.golang.org/api/gmail/v1/gmail-api.json +++ b/vendor/google.golang.org/api/gmail/v1/gmail-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/8k2U6SLZqLD09rqAHvstdGUpdIg\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/bv4LA8S_yqDUVUFrxfui1vcVGzk\"", "discoveryVersion": "v1", "id": "gmail:v1", "name": "gmail", "version": "v1", - "revision": "20170110", + "revision": "20170206", "title": "Gmail API", "description": "Access Gmail mailboxes including sending user email.", "ownerDomain": "google.com", @@ -669,6 +669,19 @@ } } }, + "ListSmimeInfoResponse": { + "id": "ListSmimeInfoResponse", + "type": "object", + "properties": { + "smimeInfo": { + "type": "array", + "description": "List of SmimeInfo.", + "items": { + "$ref": "SmimeInfo" + } + } + } + }, "ListThreadsResponse": { "id": "ListThreadsResponse", "type": "object", @@ -981,6 +994,43 @@ } } }, + "SmimeInfo": { + "id": "SmimeInfo", + "type": "object", + "description": "An S/MIME email config.", + "properties": { + "encryptedKeyPassword": { + "type": "string", + "description": "Encrypted key password, when key is encrypted." + }, + "expiration": { + "type": "string", + "description": "When the certificate expires (in milliseconds since epoch).", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The immutable ID for the SmimeInfo." + }, + "isDefault": { + "type": "boolean", + "description": "Whether this SmimeInfo is the default one for this user's send-as address." + }, + "issuerCn": { + "type": "string", + "description": "The S/MIME certificate issuer's common name." + }, + "pem": { + "type": "string", + "description": "PEM formatted X509 concatenated certificate string (standard base64 encoding). Format used for returning key, which includes public key as well as certificate chain (not private key)." + }, + "pkcs12": { + "type": "string", + "description": "PKCS#12 format containing a single private/public key pair and certificate chain. This format is only accepted from client for creating a new SmimeInfo and is never returned, because the private key is not intended to be exported. PKCS#12 may be encrypted, in which case encryptedKeyPassword should be set appropriately.", + "format": "byte" + } + } + }, "SmtpMsa": { "id": "SmtpMsa", "type": "object", @@ -1080,7 +1130,7 @@ }, "restrictToDomain": { "type": "boolean", - "description": "Flag that determines whether responses are sent to recipients who are outside of the user's domain. This feature is only available for Google Apps users." + "description": "Flag that determines whether responses are sent to recipients who are outside of the user's domain. This feature is only available for G Suite users." }, "startTime": { "type": "string", @@ -1513,6 +1563,24 @@ "httpMethod": "GET", "description": "Lists the history of all changes to the given mailbox. History results are returned in chronological order (increasing historyId).", "parameters": { + "historyTypes": { + "type": "string", + "description": "History types to be returned by the function", + "enum": [ + "labelAdded", + "labelRemoved", + "messageAdded", + "messageDeleted" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "repeated": true, + "location": "query" + }, "labelId": { "type": "string", "description": "Only return messages with a label matching the ID.", @@ -1902,7 +1970,7 @@ "parameters": { "deleted": { "type": "boolean", - "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Apps Vault to a Vault administrator. Only used for Google Apps for Work accounts.", + "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Vault to a Vault administrator. Only used for G Suite accounts.", "default": "false", "location": "query" }, @@ -1980,7 +2048,7 @@ "parameters": { "deleted": { "type": "boolean", - "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Apps Vault to a Vault administrator. Only used for Google Apps for Work accounts.", + "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Vault to a Vault administrator. Only used for G Suite accounts.", "default": "false", "location": "query" }, @@ -2964,6 +3032,196 @@ "https://www.googleapis.com/auth/gmail.settings.sharing" ] } + }, + "resources": { + "smimeInfo": { + "methods": { + "delete": { + "id": "gmail.users.settings.sendAs.smimeInfo.delete", + "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}", + "httpMethod": "DELETE", + "description": "Deletes the specified S/MIME config for the specified send-as alias.", + "parameters": { + "id": { + "type": "string", + "description": "The immutable ID for the SmimeInfo.", + "required": true, + "location": "path" + }, + "sendAsEmail": { + "type": "string", + "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + "required": true, + "location": "path" + }, + "userId": { + "type": "string", + "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + "default": "me", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "userId", + "sendAsEmail", + "id" + ], + "scopes": [ + "https://www.googleapis.com/auth/gmail.settings.basic", + "https://www.googleapis.com/auth/gmail.settings.sharing" + ] + }, + "get": { + "id": "gmail.users.settings.sendAs.smimeInfo.get", + "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}", + "httpMethod": "GET", + "description": "Gets the specified S/MIME config for the specified send-as alias.", + "parameters": { + "id": { + "type": "string", + "description": "The immutable ID for the SmimeInfo.", + "required": true, + "location": "path" + }, + "sendAsEmail": { + "type": "string", + "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + "required": true, + "location": "path" + }, + "userId": { + "type": "string", + "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + "default": "me", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "userId", + "sendAsEmail", + "id" + ], + "response": { + "$ref": "SmimeInfo" + }, + "scopes": [ + "https://mail.google.com/", + "https://www.googleapis.com/auth/gmail.modify", + "https://www.googleapis.com/auth/gmail.readonly", + "https://www.googleapis.com/auth/gmail.settings.basic", + "https://www.googleapis.com/auth/gmail.settings.sharing" + ] + }, + "insert": { + "id": "gmail.users.settings.sendAs.smimeInfo.insert", + "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo", + "httpMethod": "POST", + "description": "Insert (upload) the given S/MIME config for the specified send-as alias. Note that pkcs12 format is required for the key.", + "parameters": { + "sendAsEmail": { + "type": "string", + "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + "required": true, + "location": "path" + }, + "userId": { + "type": "string", + "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + "default": "me", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "userId", + "sendAsEmail" + ], + "request": { + "$ref": "SmimeInfo" + }, + "response": { + "$ref": "SmimeInfo" + }, + "scopes": [ + "https://www.googleapis.com/auth/gmail.settings.basic", + "https://www.googleapis.com/auth/gmail.settings.sharing" + ] + }, + "list": { + "id": "gmail.users.settings.sendAs.smimeInfo.list", + "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo", + "httpMethod": "GET", + "description": "Lists S/MIME configs for the specified send-as alias.", + "parameters": { + "sendAsEmail": { + "type": "string", + "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + "required": true, + "location": "path" + }, + "userId": { + "type": "string", + "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + "default": "me", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "userId", + "sendAsEmail" + ], + "response": { + "$ref": "ListSmimeInfoResponse" + }, + "scopes": [ + "https://mail.google.com/", + "https://www.googleapis.com/auth/gmail.modify", + "https://www.googleapis.com/auth/gmail.readonly", + "https://www.googleapis.com/auth/gmail.settings.basic", + "https://www.googleapis.com/auth/gmail.settings.sharing" + ] + }, + "setDefault": { + "id": "gmail.users.settings.sendAs.smimeInfo.setDefault", + "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}/setDefault", + "httpMethod": "POST", + "description": "Sets the default S/MIME config for the specified send-as alias.", + "parameters": { + "id": { + "type": "string", + "description": "The immutable ID for the SmimeInfo.", + "required": true, + "location": "path" + }, + "sendAsEmail": { + "type": "string", + "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + "required": true, + "location": "path" + }, + "userId": { + "type": "string", + "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + "default": "me", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "userId", + "sendAsEmail", + "id" + ], + "scopes": [ + "https://www.googleapis.com/auth/gmail.settings.basic", + "https://www.googleapis.com/auth/gmail.settings.sharing" + ] + } + } + } } } } diff --git a/vendor/google.golang.org/api/gmail/v1/gmail-gen.go b/vendor/google.golang.org/api/gmail/v1/gmail-gen.go index 395e6167f..840271809 100644 --- a/vendor/google.golang.org/api/gmail/v1/gmail-gen.go +++ b/vendor/google.golang.org/api/gmail/v1/gmail-gen.go @@ -90,9 +90,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Users *UsersService } @@ -104,6 +105,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewUsersService(s *Service) *UsersService { rs := &UsersService{s: s} rs.Drafts = NewUsersDraftsService(s) @@ -217,11 +222,23 @@ type UsersSettingsForwardingAddressesService struct { func NewUsersSettingsSendAsService(s *Service) *UsersSettingsSendAsService { rs := &UsersSettingsSendAsService{s: s} + rs.SmimeInfo = NewUsersSettingsSendAsSmimeInfoService(s) return rs } type UsersSettingsSendAsService struct { s *Service + + SmimeInfo *UsersSettingsSendAsSmimeInfoService +} + +func NewUsersSettingsSendAsSmimeInfoService(s *Service) *UsersSettingsSendAsSmimeInfoService { + rs := &UsersSettingsSendAsSmimeInfoService{s: s} + return rs +} + +type UsersSettingsSendAsSmimeInfoService struct { + s *Service } func NewUsersThreadsService(s *Service) *UsersThreadsService { @@ -1099,6 +1116,37 @@ func (s *ListSendAsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ListSmimeInfoResponse struct { + // SmimeInfo: List of SmimeInfo. + SmimeInfo []*SmimeInfo `json:"smimeInfo,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "SmimeInfo") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SmimeInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListSmimeInfoResponse) MarshalJSON() ([]byte, error) { + type noMethod ListSmimeInfoResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ListThreadsResponse struct { // NextPageToken: Page token to retrieve the next page of results in the // list. @@ -1573,6 +1621,66 @@ func (s *SendAs) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SmimeInfo: An S/MIME email config. +type SmimeInfo struct { + // EncryptedKeyPassword: Encrypted key password, when key is encrypted. + EncryptedKeyPassword string `json:"encryptedKeyPassword,omitempty"` + + // Expiration: When the certificate expires (in milliseconds since + // epoch). + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: The immutable ID for the SmimeInfo. + Id string `json:"id,omitempty"` + + // IsDefault: Whether this SmimeInfo is the default one for this user's + // send-as address. + IsDefault bool `json:"isDefault,omitempty"` + + // IssuerCn: The S/MIME certificate issuer's common name. + IssuerCn string `json:"issuerCn,omitempty"` + + // Pem: PEM formatted X509 concatenated certificate string (standard + // base64 encoding). Format used for returning key, which includes + // public key as well as certificate chain (not private key). + Pem string `json:"pem,omitempty"` + + // Pkcs12: PKCS#12 format containing a single private/public key pair + // and certificate chain. This format is only accepted from client for + // creating a new SmimeInfo and is never returned, because the private + // key is not intended to be exported. PKCS#12 may be encrypted, in + // which case encryptedKeyPassword should be set appropriately. + Pkcs12 string `json:"pkcs12,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "EncryptedKeyPassword") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EncryptedKeyPassword") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SmimeInfo) MarshalJSON() ([]byte, error) { + type noMethod SmimeInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SmtpMsa: Configuration for communication with an SMTP service. type SmtpMsa struct { // Host: The hostname of the SMTP service. Required. @@ -1700,7 +1808,7 @@ type VacationSettings struct { // RestrictToDomain: Flag that determines whether responses are sent to // recipients who are outside of the user's domain. This feature is only - // available for Google Apps users. + // available for G Suite users. RestrictToDomain bool `json:"restrictToDomain,omitempty"` // StartTime: An optional start time for sending auto-replies (epoch @@ -1884,6 +1992,7 @@ func (c *UsersGetProfileCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2015,6 +2124,7 @@ func (c *UsersStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/stop") @@ -2117,6 +2227,7 @@ func (c *UsersWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.watchrequest) if err != nil { @@ -2307,6 +2418,7 @@ func (c *UsersDraftsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.draft) if err != nil { @@ -2509,6 +2621,7 @@ func (c *UsersDraftsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/drafts/{id}") @@ -2641,6 +2754,7 @@ func (c *UsersDraftsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2838,6 +2952,7 @@ func (c *UsersDraftsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3068,6 +3183,7 @@ func (c *UsersDraftsSendCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.draft) if err != nil { @@ -3324,6 +3440,7 @@ func (c *UsersDraftsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.draft) if err != nil { @@ -3502,6 +3619,19 @@ func (r *UsersHistoryService) List(userId string) *UsersHistoryListCall { return c } +// HistoryTypes sets the optional parameter "historyTypes": History +// types to be returned by the function +// +// Possible values: +// "labelAdded" +// "labelRemoved" +// "messageAdded" +// "messageDeleted" +func (c *UsersHistoryListCall) HistoryTypes(historyTypes ...string) *UsersHistoryListCall { + c.urlParams_.SetMulti("historyTypes", append([]string{}, historyTypes...)) + return c +} + // LabelId sets the optional parameter "labelId": Only return messages // with a label matching the ID. func (c *UsersHistoryListCall) LabelId(labelId string) *UsersHistoryListCall { @@ -3581,6 +3711,7 @@ func (c *UsersHistoryListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3641,6 +3772,24 @@ func (c *UsersHistoryListCall) Do(opts ...googleapi.CallOption) (*ListHistoryRes // "userId" // ], // "parameters": { + // "historyTypes": { + // "description": "History types to be returned by the function", + // "enum": [ + // "labelAdded", + // "labelRemoved", + // "messageAdded", + // "messageDeleted" + // ], + // "enumDescriptions": [ + // "", + // "", + // "", + // "" + // ], + // "location": "query", + // "repeated": true, + // "type": "string" + // }, // "labelId": { // "description": "Only return messages with a label matching the ID.", // "location": "query", @@ -3757,6 +3906,7 @@ func (c *UsersLabelsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.label) if err != nil { @@ -3894,6 +4044,7 @@ func (c *UsersLabelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/labels/{id}") @@ -4013,6 +4164,7 @@ func (c *UsersLabelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4163,6 +4315,7 @@ func (c *UsersLabelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4299,6 +4452,7 @@ func (c *UsersLabelsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.label) if err != nil { @@ -4445,6 +4599,7 @@ func (c *UsersLabelsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.label) if err != nil { @@ -4591,6 +4746,7 @@ func (c *UsersMessagesBatchDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchdeletemessagesrequest) if err != nil { @@ -4697,6 +4853,7 @@ func (c *UsersMessagesBatchModifyCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchmodifymessagesrequest) if err != nil { @@ -4805,6 +4962,7 @@ func (c *UsersMessagesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/messages/{id}") @@ -4942,6 +5100,7 @@ func (c *UsersMessagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5083,9 +5242,8 @@ func (r *UsersMessagesService) Import(userId string, message *Message) *UsersMes } // Deleted sets the optional parameter "deleted": Mark the email as -// permanently deleted (not TRASH) and only visible in Google Apps Vault -// to a Vault administrator. Only used for Google Apps for Work -// accounts. +// permanently deleted (not TRASH) and only visible in Google Vault to a +// Vault administrator. Only used for G Suite accounts. func (c *UsersMessagesImportCall) Deleted(deleted bool) *UsersMessagesImportCall { c.urlParams_.Set("deleted", fmt.Sprint(deleted)) return c @@ -5197,6 +5355,7 @@ func (c *UsersMessagesImportCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.message) if err != nil { @@ -5325,7 +5484,7 @@ func (c *UsersMessagesImportCall) Do(opts ...googleapi.CallOption) (*Message, er // "parameters": { // "deleted": { // "default": "false", - // "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Apps Vault to a Vault administrator. Only used for Google Apps for Work accounts.", + // "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Vault to a Vault administrator. Only used for G Suite accounts.", // "location": "query", // "type": "boolean" // }, @@ -5407,9 +5566,8 @@ func (r *UsersMessagesService) Insert(userId string, message *Message) *UsersMes } // Deleted sets the optional parameter "deleted": Mark the email as -// permanently deleted (not TRASH) and only visible in Google Apps Vault -// to a Vault administrator. Only used for Google Apps for Work -// accounts. +// permanently deleted (not TRASH) and only visible in Google Vault to a +// Vault administrator. Only used for G Suite accounts. func (c *UsersMessagesInsertCall) Deleted(deleted bool) *UsersMessagesInsertCall { c.urlParams_.Set("deleted", fmt.Sprint(deleted)) return c @@ -5505,6 +5663,7 @@ func (c *UsersMessagesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.message) if err != nil { @@ -5633,7 +5792,7 @@ func (c *UsersMessagesInsertCall) Do(opts ...googleapi.CallOption) (*Message, er // "parameters": { // "deleted": { // "default": "false", - // "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Apps Vault to a Vault administrator. Only used for Google Apps for Work accounts.", + // "description": "Mark the email as permanently deleted (not TRASH) and only visible in Google Vault to a Vault administrator. Only used for G Suite accounts.", // "location": "query", // "type": "boolean" // }, @@ -5773,6 +5932,7 @@ func (c *UsersMessagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5957,6 +6117,7 @@ func (c *UsersMessagesModifyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifymessagerequest) if err != nil { @@ -6154,6 +6315,7 @@ func (c *UsersMessagesSendCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.message) if err != nil { @@ -6356,6 +6518,7 @@ func (c *UsersMessagesTrashCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/messages/{id}/trash") @@ -6491,6 +6654,7 @@ func (c *UsersMessagesUntrashCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/messages/{id}/untrash") @@ -6639,6 +6803,7 @@ func (c *UsersMessagesAttachmentsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6796,6 +6961,7 @@ func (c *UsersSettingsGetAutoForwardingCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6937,6 +7103,7 @@ func (c *UsersSettingsGetImapCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7078,6 +7245,7 @@ func (c *UsersSettingsGetPopCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7219,6 +7387,7 @@ func (c *UsersSettingsGetVacationCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7353,6 +7522,7 @@ func (c *UsersSettingsUpdateAutoForwardingCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoforwarding) if err != nil { @@ -7487,6 +7657,7 @@ func (c *UsersSettingsUpdateImapCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.imapsettings) if err != nil { @@ -7621,6 +7792,7 @@ func (c *UsersSettingsUpdatePopCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.popsettings) if err != nil { @@ -7755,6 +7927,7 @@ func (c *UsersSettingsUpdateVacationCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.vacationsettings) if err != nil { @@ -7889,6 +8062,7 @@ func (c *UsersSettingsFiltersCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.filter) if err != nil { @@ -8023,6 +8197,7 @@ func (c *UsersSettingsFiltersDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/filters/{id}") @@ -8140,6 +8315,7 @@ func (c *UsersSettingsFiltersGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8289,6 +8465,7 @@ func (c *UsersSettingsFiltersListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8424,6 +8601,7 @@ func (c *UsersSettingsForwardingAddressesCreateCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingaddress) if err != nil { @@ -8559,6 +8737,7 @@ func (c *UsersSettingsForwardingAddressesDeleteCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/forwardingAddresses/{forwardingEmail}") @@ -8676,6 +8855,7 @@ func (c *UsersSettingsForwardingAddressesGetCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8825,6 +9005,7 @@ func (c *UsersSettingsForwardingAddressesListCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8964,6 +9145,7 @@ func (c *UsersSettingsSendAsCreateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sendas) if err != nil { @@ -9099,6 +9281,7 @@ func (c *UsersSettingsSendAsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/sendAs/{sendAsEmail}") @@ -9217,6 +9400,7 @@ func (c *UsersSettingsSendAsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9368,6 +9552,7 @@ func (c *UsersSettingsSendAsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9504,6 +9689,7 @@ func (c *UsersSettingsSendAsPatchCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sendas) if err != nil { @@ -9650,6 +9836,7 @@ func (c *UsersSettingsSendAsUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sendas) if err != nil { @@ -9794,6 +9981,7 @@ func (c *UsersSettingsSendAsVerifyCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/sendAs/{sendAsEmail}/verify") @@ -9850,6 +10038,708 @@ func (c *UsersSettingsSendAsVerifyCall) Do(opts ...googleapi.CallOption) error { } +// method id "gmail.users.settings.sendAs.smimeInfo.delete": + +type UsersSettingsSendAsSmimeInfoDeleteCall struct { + s *Service + userId string + sendAsEmail string + id string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified S/MIME config for the specified send-as +// alias. +func (r *UsersSettingsSendAsSmimeInfoService) Delete(userId string, sendAsEmail string, id string) *UsersSettingsSendAsSmimeInfoDeleteCall { + c := &UsersSettingsSendAsSmimeInfoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userId = userId + c.sendAsEmail = sendAsEmail + c.id = id + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersSettingsSendAsSmimeInfoDeleteCall) Fields(s ...googleapi.Field) *UsersSettingsSendAsSmimeInfoDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersSettingsSendAsSmimeInfoDeleteCall) Context(ctx context.Context) *UsersSettingsSendAsSmimeInfoDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersSettingsSendAsSmimeInfoDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersSettingsSendAsSmimeInfoDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userId": c.userId, + "sendAsEmail": c.sendAsEmail, + "id": c.id, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "gmail.users.settings.sendAs.smimeInfo.delete" call. +func (c *UsersSettingsSendAsSmimeInfoDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified S/MIME config for the specified send-as alias.", + // "httpMethod": "DELETE", + // "id": "gmail.users.settings.sendAs.smimeInfo.delete", + // "parameterOrder": [ + // "userId", + // "sendAsEmail", + // "id" + // ], + // "parameters": { + // "id": { + // "description": "The immutable ID for the SmimeInfo.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sendAsEmail": { + // "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userId": { + // "default": "me", + // "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}", + // "scopes": [ + // "https://www.googleapis.com/auth/gmail.settings.basic", + // "https://www.googleapis.com/auth/gmail.settings.sharing" + // ] + // } + +} + +// method id "gmail.users.settings.sendAs.smimeInfo.get": + +type UsersSettingsSendAsSmimeInfoGetCall struct { + s *Service + userId string + sendAsEmail string + id string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the specified S/MIME config for the specified send-as +// alias. +func (r *UsersSettingsSendAsSmimeInfoService) Get(userId string, sendAsEmail string, id string) *UsersSettingsSendAsSmimeInfoGetCall { + c := &UsersSettingsSendAsSmimeInfoGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userId = userId + c.sendAsEmail = sendAsEmail + c.id = id + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersSettingsSendAsSmimeInfoGetCall) Fields(s ...googleapi.Field) *UsersSettingsSendAsSmimeInfoGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UsersSettingsSendAsSmimeInfoGetCall) IfNoneMatch(entityTag string) *UsersSettingsSendAsSmimeInfoGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersSettingsSendAsSmimeInfoGetCall) Context(ctx context.Context) *UsersSettingsSendAsSmimeInfoGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersSettingsSendAsSmimeInfoGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersSettingsSendAsSmimeInfoGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userId": c.userId, + "sendAsEmail": c.sendAsEmail, + "id": c.id, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "gmail.users.settings.sendAs.smimeInfo.get" call. +// Exactly one of *SmimeInfo or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SmimeInfo.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersSettingsSendAsSmimeInfoGetCall) Do(opts ...googleapi.CallOption) (*SmimeInfo, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SmimeInfo{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the specified S/MIME config for the specified send-as alias.", + // "httpMethod": "GET", + // "id": "gmail.users.settings.sendAs.smimeInfo.get", + // "parameterOrder": [ + // "userId", + // "sendAsEmail", + // "id" + // ], + // "parameters": { + // "id": { + // "description": "The immutable ID for the SmimeInfo.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sendAsEmail": { + // "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userId": { + // "default": "me", + // "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}", + // "response": { + // "$ref": "SmimeInfo" + // }, + // "scopes": [ + // "https://mail.google.com/", + // "https://www.googleapis.com/auth/gmail.modify", + // "https://www.googleapis.com/auth/gmail.readonly", + // "https://www.googleapis.com/auth/gmail.settings.basic", + // "https://www.googleapis.com/auth/gmail.settings.sharing" + // ] + // } + +} + +// method id "gmail.users.settings.sendAs.smimeInfo.insert": + +type UsersSettingsSendAsSmimeInfoInsertCall struct { + s *Service + userId string + sendAsEmail string + smimeinfo *SmimeInfo + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Insert (upload) the given S/MIME config for the specified +// send-as alias. Note that pkcs12 format is required for the key. +func (r *UsersSettingsSendAsSmimeInfoService) Insert(userId string, sendAsEmail string, smimeinfo *SmimeInfo) *UsersSettingsSendAsSmimeInfoInsertCall { + c := &UsersSettingsSendAsSmimeInfoInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userId = userId + c.sendAsEmail = sendAsEmail + c.smimeinfo = smimeinfo + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersSettingsSendAsSmimeInfoInsertCall) Fields(s ...googleapi.Field) *UsersSettingsSendAsSmimeInfoInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersSettingsSendAsSmimeInfoInsertCall) Context(ctx context.Context) *UsersSettingsSendAsSmimeInfoInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersSettingsSendAsSmimeInfoInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersSettingsSendAsSmimeInfoInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.smimeinfo) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userId": c.userId, + "sendAsEmail": c.sendAsEmail, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "gmail.users.settings.sendAs.smimeInfo.insert" call. +// Exactly one of *SmimeInfo or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SmimeInfo.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersSettingsSendAsSmimeInfoInsertCall) Do(opts ...googleapi.CallOption) (*SmimeInfo, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SmimeInfo{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Insert (upload) the given S/MIME config for the specified send-as alias. Note that pkcs12 format is required for the key.", + // "httpMethod": "POST", + // "id": "gmail.users.settings.sendAs.smimeInfo.insert", + // "parameterOrder": [ + // "userId", + // "sendAsEmail" + // ], + // "parameters": { + // "sendAsEmail": { + // "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userId": { + // "default": "me", + // "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo", + // "request": { + // "$ref": "SmimeInfo" + // }, + // "response": { + // "$ref": "SmimeInfo" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/gmail.settings.basic", + // "https://www.googleapis.com/auth/gmail.settings.sharing" + // ] + // } + +} + +// method id "gmail.users.settings.sendAs.smimeInfo.list": + +type UsersSettingsSendAsSmimeInfoListCall struct { + s *Service + userId string + sendAsEmail string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists S/MIME configs for the specified send-as alias. +func (r *UsersSettingsSendAsSmimeInfoService) List(userId string, sendAsEmail string) *UsersSettingsSendAsSmimeInfoListCall { + c := &UsersSettingsSendAsSmimeInfoListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userId = userId + c.sendAsEmail = sendAsEmail + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersSettingsSendAsSmimeInfoListCall) Fields(s ...googleapi.Field) *UsersSettingsSendAsSmimeInfoListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UsersSettingsSendAsSmimeInfoListCall) IfNoneMatch(entityTag string) *UsersSettingsSendAsSmimeInfoListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersSettingsSendAsSmimeInfoListCall) Context(ctx context.Context) *UsersSettingsSendAsSmimeInfoListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersSettingsSendAsSmimeInfoListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersSettingsSendAsSmimeInfoListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userId": c.userId, + "sendAsEmail": c.sendAsEmail, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "gmail.users.settings.sendAs.smimeInfo.list" call. +// Exactly one of *ListSmimeInfoResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListSmimeInfoResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UsersSettingsSendAsSmimeInfoListCall) Do(opts ...googleapi.CallOption) (*ListSmimeInfoResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListSmimeInfoResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists S/MIME configs for the specified send-as alias.", + // "httpMethod": "GET", + // "id": "gmail.users.settings.sendAs.smimeInfo.list", + // "parameterOrder": [ + // "userId", + // "sendAsEmail" + // ], + // "parameters": { + // "sendAsEmail": { + // "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userId": { + // "default": "me", + // "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo", + // "response": { + // "$ref": "ListSmimeInfoResponse" + // }, + // "scopes": [ + // "https://mail.google.com/", + // "https://www.googleapis.com/auth/gmail.modify", + // "https://www.googleapis.com/auth/gmail.readonly", + // "https://www.googleapis.com/auth/gmail.settings.basic", + // "https://www.googleapis.com/auth/gmail.settings.sharing" + // ] + // } + +} + +// method id "gmail.users.settings.sendAs.smimeInfo.setDefault": + +type UsersSettingsSendAsSmimeInfoSetDefaultCall struct { + s *Service + userId string + sendAsEmail string + id string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetDefault: Sets the default S/MIME config for the specified send-as +// alias. +func (r *UsersSettingsSendAsSmimeInfoService) SetDefault(userId string, sendAsEmail string, id string) *UsersSettingsSendAsSmimeInfoSetDefaultCall { + c := &UsersSettingsSendAsSmimeInfoSetDefaultCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userId = userId + c.sendAsEmail = sendAsEmail + c.id = id + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersSettingsSendAsSmimeInfoSetDefaultCall) Fields(s ...googleapi.Field) *UsersSettingsSendAsSmimeInfoSetDefaultCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersSettingsSendAsSmimeInfoSetDefaultCall) Context(ctx context.Context) *UsersSettingsSendAsSmimeInfoSetDefaultCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersSettingsSendAsSmimeInfoSetDefaultCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersSettingsSendAsSmimeInfoSetDefaultCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}/setDefault") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userId": c.userId, + "sendAsEmail": c.sendAsEmail, + "id": c.id, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "gmail.users.settings.sendAs.smimeInfo.setDefault" call. +func (c *UsersSettingsSendAsSmimeInfoSetDefaultCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Sets the default S/MIME config for the specified send-as alias.", + // "httpMethod": "POST", + // "id": "gmail.users.settings.sendAs.smimeInfo.setDefault", + // "parameterOrder": [ + // "userId", + // "sendAsEmail", + // "id" + // ], + // "parameters": { + // "id": { + // "description": "The immutable ID for the SmimeInfo.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sendAsEmail": { + // "description": "The email address that appears in the \"From:\" header for mail sent using this alias.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userId": { + // "default": "me", + // "description": "The user's email address. The special value me can be used to indicate the authenticated user.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{userId}/settings/sendAs/{sendAsEmail}/smimeInfo/{id}/setDefault", + // "scopes": [ + // "https://www.googleapis.com/auth/gmail.settings.basic", + // "https://www.googleapis.com/auth/gmail.settings.sharing" + // ] + // } + +} + // method id "gmail.users.threads.delete": type UsersThreadsDeleteCall struct { @@ -9901,6 +10791,7 @@ func (c *UsersThreadsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/threads/{id}") @@ -10037,6 +10928,7 @@ func (c *UsersThreadsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10246,6 +11138,7 @@ func (c *UsersThreadsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10431,6 +11324,7 @@ func (c *UsersThreadsModifyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifythreadrequest) if err != nil { @@ -10574,6 +11468,7 @@ func (c *UsersThreadsTrashCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/threads/{id}/trash") @@ -10709,6 +11604,7 @@ func (c *UsersThreadsUntrashCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{userId}/threads/{id}/untrash") diff --git a/vendor/google.golang.org/api/google-api-go-generator/gen.go b/vendor/google.golang.org/api/google-api-go-generator/gen.go index 94f73c0b0..2ec81e03d 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/gen.go +++ b/vendor/google.golang.org/api/google-api-go-generator/gen.go @@ -27,7 +27,10 @@ import ( "google.golang.org/api/google-api-go-generator/internal/disco" ) -const googleDiscoveryURL = "https://www.googleapis.com/discovery/v1/apis" +const ( + googleDiscoveryURL = "https://www.googleapis.com/discovery/v1/apis" + generatorVersion = "20170210" +) var ( apiToGenerate = flag.String("api", "*", "The API ID to generate, like 'tasks:v1'. A value of '*' means all.") @@ -389,7 +392,7 @@ func (a *API) Target() string { // (typically "Service"). func (a *API) ServiceType() string { switch a.Name { - case "appengine", "content", "servicemanagement": + case "appengine", "content", "servicemanagement", "serviceuser": return "APIService" default: return "Service" @@ -590,6 +593,7 @@ func (a *API) GenerateCode() ([]byte, error) { pn(" client *http.Client") pn(" BasePath string // API endpoint base URL") pn(" UserAgent string // optional additional User-Agent fragment") + pn(" GoogleClientHeaderElement string // client header fragment, for Google use only") for _, res := range a.doc.Resources { pn("\n\t%s\t*%s", resourceGoField(res), resourceGoType(res)) @@ -599,6 +603,9 @@ func (a *API) GenerateCode() ([]byte, error) { pn(` if s.UserAgent == "" { return googleapi.UserAgent }`) pn(` return googleapi.UserAgent + " " + s.UserAgent`) pn("}\n") + pn("\nfunc (s *%s) clientHeader() string {", service) + pn(" return gensupport.GoogleClientHeader(%q, s.GoogleClientHeaderElement)", generatorVersion) + pn("}\n") for _, res := range a.doc.Resources { a.generateResource(res) @@ -1785,6 +1792,7 @@ func (meth *Method) generateCode() { pn(" reqHeaders[k] = v") pn("}") pn(`reqHeaders.Set("User-Agent",c.s.userAgent())`) + pn(`reqHeaders.Set("x-goog-api-client", c.s.clientHeader())`) if httpMethod == "GET" { pn(`if c.ifNoneMatch_ != "" {`) pn(` reqHeaders.Set("If-None-Match", c.ifNoneMatch_)`) diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/any.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/any.want index 7804c64b6..6ccd585dd 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/any.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/any.want @@ -59,9 +59,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -73,6 +74,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.LogServices = NewProjectsLogServicesService(s) @@ -800,6 +805,7 @@ func (c *ProjectsLogServicesListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1027,6 +1033,7 @@ func (c *ProjectsLogServicesIndexesListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1213,6 +1220,7 @@ func (c *ProjectsLogServicesSinksCreateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -1356,6 +1364,7 @@ func (c *ProjectsLogServicesSinksDeleteCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}") @@ -1510,6 +1519,7 @@ func (c *ProjectsLogServicesSinksGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1665,6 +1675,7 @@ func (c *ProjectsLogServicesSinksListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1805,6 +1816,7 @@ func (c *ProjectsLogServicesSinksUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -1955,6 +1967,7 @@ func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}") @@ -2138,6 +2151,7 @@ func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2317,6 +2331,7 @@ func (c *ProjectsLogsEntriesWriteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest) if err != nil { @@ -2460,6 +2475,7 @@ func (c *ProjectsLogsSinksCreateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -2603,6 +2619,7 @@ func (c *ProjectsLogsSinksDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}") @@ -2757,6 +2774,7 @@ func (c *ProjectsLogsSinksGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2912,6 +2930,7 @@ func (c *ProjectsLogsSinksListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3052,6 +3071,7 @@ func (c *ProjectsLogsSinksUpdateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofarray-1.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofarray-1.want index 2addf997e..5ae8ebf9d 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofarray-1.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofarray-1.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + // GeoJsonMultiPolygon: Multi Polygon type GeoJsonMultiPolygon struct { // Coordinates: Coordinate arrays. diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofenum.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofenum.want index a08df332b..0c16296f6 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofenum.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofenum.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + // Container: Represents a Google Tag Manager Container. type Container struct { // AccountId: GTM Account ID. diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofobjects.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofobjects.want index 78f3237c5..9b80efdcf 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofobjects.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofobjects.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + type Analyze struct { // Errors: List of errors with the data. Errors []map[string]Property `json:"errors,omitempty"` diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofstrings.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofstrings.want index 60561103c..c86e9fa74 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofstrings.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/arrayofmapofstrings.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + type Analyze struct { // Errors: List of errors with the data. Errors []map[string]string `json:"errors,omitempty"` diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/blogger-3.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/blogger-3.want index d5baa96dc..c13318fb7 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/blogger-3.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/blogger-3.want @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BlogUserInfos *BlogUserInfosService @@ -99,6 +100,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBlogUserInfosService(s *Service) *BlogUserInfosService { rs := &BlogUserInfosService{s: s} return rs @@ -1585,6 +1590,7 @@ func (c *BlogUserInfosGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1744,6 +1750,7 @@ func (c *BlogsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1887,6 +1894,7 @@ func (c *BlogsGetByUrlCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2041,6 +2049,7 @@ func (c *BlogsListByUserCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2191,6 +2200,7 @@ func (c *CommentsApproveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}/approve") @@ -2334,6 +2344,7 @@ func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}") @@ -2460,6 +2471,7 @@ func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2674,6 +2686,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2936,6 +2949,7 @@ func (c *CommentsListByBlogCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3116,6 +3130,7 @@ func (c *CommentsMarkAsSpamCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}/spam") @@ -3259,6 +3274,7 @@ func (c *CommentsRemoveContentCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/comments/{commentId}/removecontent") @@ -3420,6 +3436,7 @@ func (c *PageViewsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3563,6 +3580,7 @@ func (c *PagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/pages/{pageId}") @@ -3690,6 +3708,7 @@ func (c *PagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3841,6 +3860,7 @@ func (c *PagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.page) if err != nil { @@ -4013,6 +4033,7 @@ func (c *PagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4178,6 +4199,7 @@ func (c *PagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.page) if err != nil { @@ -4321,6 +4343,7 @@ func (c *PagesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.page) if err != nil { @@ -4482,6 +4505,7 @@ func (c *PostUserInfosGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4719,6 +4743,7 @@ func (c *PostUserInfosListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4953,6 +4978,7 @@ func (c *PostsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}") @@ -5087,6 +5113,7 @@ func (c *PostsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5272,6 +5299,7 @@ func (c *PostsGetByPathCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5435,6 +5463,7 @@ func (c *PostsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.post) if err != nil { @@ -5665,6 +5694,7 @@ func (c *PostsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5899,6 +5929,7 @@ func (c *PostsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.post) if err != nil { @@ -6047,6 +6078,7 @@ func (c *PostsPublishCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/publish") @@ -6186,6 +6218,7 @@ func (c *PostsRevertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "blogs/{blogId}/posts/{postId}/revert") @@ -6348,6 +6381,7 @@ func (c *PostsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6506,6 +6540,7 @@ func (c *PostsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.post) if err != nil { @@ -6656,6 +6691,7 @@ func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/floats.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/floats.want index e1ac9efbf..8a37ddc4a 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/floats.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/floats.want @@ -54,9 +54,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -66,6 +67,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + // Utilization: CPU utilization policy. type Utilization struct { Average float64 `json:"average,omitempty"` diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/getwithoutbody.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/getwithoutbody.want index 111b492cd..2d4a7fb90 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/getwithoutbody.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/getwithoutbody.want @@ -53,9 +53,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only MetricDescriptors *MetricDescriptorsService } @@ -67,6 +68,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewMetricDescriptorsService(s *Service) *MetricDescriptorsService { rs := &MetricDescriptorsService{s: s} return rs @@ -227,6 +232,7 @@ func (c *MetricDescriptorsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofany.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofany.want index 97eab9187..94a8daa07 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofany.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofany.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + type JsonValue interface{} type TableDataInsertAllRequest struct { diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofarrayofobjects.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofarrayofobjects.want index 7de9989f8..ccdec5b7f 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofarrayofobjects.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofarrayofobjects.want @@ -53,9 +53,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Atlas *AtlasService } @@ -67,6 +68,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAtlasService(s *Service) *AtlasService { rs := &AtlasService{s: s} return rs @@ -174,6 +179,7 @@ func (c *AtlasGetMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofint64strings.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofint64strings.want index cc98c6a00..56786a8f7 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofint64strings.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofint64strings.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + type TestResultSummaryToolGroupTestSuite struct { Passed bool `json:"passed,omitempty"` diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofobjects.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofobjects.want index f425f2168..2494e62aa 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofobjects.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofobjects.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + type Entity struct { // Properties: The entity's properties. Properties map[string]EntityProperties `json:"properties,omitempty"` diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofstrings-1.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofstrings-1.want index ce3dcd18f..5b1c38d51 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofstrings-1.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/mapofstrings-1.want @@ -53,9 +53,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Atlas *AtlasService } @@ -67,6 +68,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAtlasService(s *Service) *AtlasService { rs := &AtlasService{s: s} return rs @@ -171,6 +176,7 @@ func (c *AtlasGetMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/param-rename.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/param-rename.want index d48a2499c..cc3202514 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/param-rename.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/param-rename.want @@ -54,9 +54,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Events *EventsService @@ -70,6 +71,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewEventsService(s *Service) *EventsService { rs := &EventsService{s: s} return rs @@ -149,6 +154,7 @@ func (c *EventsMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "calendars/{calendarId}/events/{eventId}/move") @@ -295,6 +301,7 @@ func (c *ReportsQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/quotednum.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/quotednum.want index 222faf4fb..07bf259ca 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/quotednum.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/quotednum.want @@ -60,9 +60,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -72,6 +73,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + // Creative: A creative and its classification data. type Creative struct { // AdvertiserId: Detected advertiser id, if any. Read-only. This field diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/repeated.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/repeated.want index 0428240ca..95165f7ce 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/repeated.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/repeated.want @@ -53,9 +53,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService } @@ -67,6 +68,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Reports = NewAccountsReportsService(s) @@ -182,6 +187,7 @@ func (c *AccountsReportsGenerateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/resource-named-service.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/resource-named-service.want index 59b3b9288..08b52fe7f 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/resource-named-service.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/resource-named-service.want @@ -61,9 +61,10 @@ func New(client *http.Client) (*APIService, error) { } type APIService struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Apps *AppsService } @@ -75,6 +76,10 @@ func (s *APIService) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAppsService(s *APIService) *AppsService { rs := &AppsService{s: s} rs.Locations = NewAppsLocationsService(s) @@ -2180,6 +2185,7 @@ func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2312,6 +2318,7 @@ func (c *AppsRepairCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.repairapplicationrequest) if err != nil { @@ -2456,6 +2463,7 @@ func (c *AppsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2623,6 +2631,7 @@ func (c *AppsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2801,6 +2810,7 @@ func (c *AppsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2971,6 +2981,7 @@ func (c *AppsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3136,6 +3147,7 @@ func (c *AppsServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/apps/{appsId}/services/{servicesId}") @@ -3280,6 +3292,7 @@ func (c *AppsServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3439,6 +3452,7 @@ func (c *AppsServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3628,6 +3642,7 @@ func (c *AppsServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.service) if err != nil { @@ -3781,6 +3796,7 @@ func (c *AppsServicesVersionsCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -3924,6 +3940,7 @@ func (c *AppsServicesVersionsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/apps/{appsId}/services/{servicesId}/versions/{versionsId}") @@ -4091,6 +4108,7 @@ func (c *AppsServicesVersionsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4280,6 +4298,7 @@ func (c *AppsServicesVersionsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4486,6 +4505,7 @@ func (c *AppsServicesVersionsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.version) if err != nil { @@ -4652,6 +4672,7 @@ func (c *AppsServicesVersionsInstancesDebugCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.debuginstancerequest) if err != nil { @@ -4813,6 +4834,7 @@ func (c *AppsServicesVersionsInstancesDeleteCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/apps/{appsId}/services/{servicesId}/versions/{versionsId}/instances/{instancesId}") @@ -4977,6 +4999,7 @@ func (c *AppsServicesVersionsInstancesGetCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5156,6 +5179,7 @@ func (c *AppsServicesVersionsInstancesListCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/unfortunatedefaults.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/unfortunatedefaults.want index f884ed239..afdeba5b4 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/unfortunatedefaults.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/unfortunatedefaults.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + // Thing: don't care type Thing struct { // BoolEmptyDefaultA: diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/variants.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/variants.want index 1bdd7be92..5ec4e51f2 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/variants.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/variants.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + type GeoJsonGeometry map[string]interface{} func (t GeoJsonGeometry) Type() string { diff --git a/vendor/google.golang.org/api/google-api-go-generator/testdata/wrapnewlines.want b/vendor/google.golang.org/api/google-api-go-generator/testdata/wrapnewlines.want index 4d44d078b..3ac355113 100644 --- a/vendor/google.golang.org/api/google-api-go-generator/testdata/wrapnewlines.want +++ b/vendor/google.golang.org/api/google-api-go-generator/testdata/wrapnewlines.want @@ -52,9 +52,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only } func (s *Service) userAgent() string { @@ -64,6 +65,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + // Thing: don't care type Thing struct { // Oneline: First sentence. Second sentence. Description is long enough diff --git a/vendor/google.golang.org/api/groupsmigration/v1/groupsmigration-gen.go b/vendor/google.golang.org/api/groupsmigration/v1/groupsmigration-gen.go index 127ef0298..766de80e6 100644 --- a/vendor/google.golang.org/api/groupsmigration/v1/groupsmigration-gen.go +++ b/vendor/google.golang.org/api/groupsmigration/v1/groupsmigration-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Archive *ArchiveService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewArchiveService(s *Service) *ArchiveService { rs := &ArchiveService{s: s} return rs @@ -220,6 +225,7 @@ func (c *ArchiveInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{groupId}/archive") diff --git a/vendor/google.golang.org/api/groupssettings/v1/groupssettings-api.json b/vendor/google.golang.org/api/groupssettings/v1/groupssettings-api.json index a28e82b61..80d7bd805 100644 --- a/vendor/google.golang.org/api/groupssettings/v1/groupssettings-api.json +++ b/vendor/google.golang.org/api/groupssettings/v1/groupssettings-api.json @@ -1,6 +1,6 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/26Y5Yyn2D8WwxhPa2zJyQ3Axz8I\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/8JFCy9G09DiKop8V7jh2RwsOYyI\"", "discoveryVersion": "v1", "id": "groupssettings:v1", "name": "groupssettings", @@ -72,7 +72,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/apps.groups.settings": { - "description": "View and manage the settings of a Google Apps Group" + "description": "View and manage the settings of a G Suite group" } } } diff --git a/vendor/google.golang.org/api/groupssettings/v1/groupssettings-gen.go b/vendor/google.golang.org/api/groupssettings/v1/groupssettings-gen.go index 0ea0f6b19..c14469ce9 100644 --- a/vendor/google.golang.org/api/groupssettings/v1/groupssettings-gen.go +++ b/vendor/google.golang.org/api/groupssettings/v1/groupssettings-gen.go @@ -47,7 +47,7 @@ const basePath = "https://www.googleapis.com/groups/v1/groups/" // OAuth2 scopes used by this API. const ( - // View and manage the settings of a Google Apps Group + // View and manage the settings of a G Suite group AppsGroupsSettingsScope = "https://www.googleapis.com/auth/apps.groups.settings" ) @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Groups *GroupsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGroupsService(s *Service) *GroupsService { rs := &GroupsService{s: s} return rs @@ -290,6 +295,7 @@ func (c *GroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -419,6 +425,7 @@ func (c *GroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groups) if err != nil { @@ -552,6 +559,7 @@ func (c *GroupsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groups) if err != nil { diff --git a/vendor/google.golang.org/api/iam/v1/iam-api.json b/vendor/google.golang.org/api/iam/v1/iam-api.json index c7bac1e04..2a2c387e4 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -1,875 +1,981 @@ { - "kind": "discovery#restDescription", - "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/bQX0iw41CX8vYvVm7WSKLdyxMvI\"", - "discoveryVersion": "v1", - "id": "iam:v1", - "name": "iam", - "canonicalName": "iam", - "version": "v1", - "revision": "20160915", - "title": "Google Identity and Access Management (IAM) API", - "description": "Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/iam/", - "protocol": "rest", - "baseUrl": "https://iam.googleapis.com/", - "basePath": "", - "rootUrl": "https://iam.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "version_module": true, - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" - }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" - }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" - }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "schemas": { - "ListServiceAccountsResponse": { - "id": "ListServiceAccountsResponse", - "type": "object", - "description": "The service account list response.", - "properties": { - "accounts": { - "type": "array", - "description": "The list of matching service accounts.", - "items": { - "$ref": "ServiceAccount" - } - }, - "nextPageToken": { - "type": "string", - "description": "To retrieve the next page of results, set ListServiceAccountsRequest.page_token to this value." - } - } - }, - "ServiceAccount": { - "id": "ServiceAccount", - "type": "object", - "description": "A service account in the Identity and Access Management API. To create a service account, specify the `project_id` and the `account_id` for the account. The `account_id` is unique within the project, and is used to generate the service account email address and a stable `unique_id`. If the account already exists, the account's resource name is returned in util::Status's ResourceInfo.resource_name in the format of projects/{project}/serviceAccounts/{email}. The caller can use the name in other methods to access the account. All other methods can identify the service account using the format `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "properties": { - "name": { - "type": "string", - "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Requests using `-` as a wildcard for the project will infer the project from the `account` and the `account` value can be the `email` address or the `unique_id` of the service account. In responses the resource name will always be in the format `projects/{project}/serviceAccounts/{email}`." - }, - "projectId": { - "type": "string", - "description": "@OutputOnly The id of the project that owns the service account." - }, - "uniqueId": { - "type": "string", - "description": "@OutputOnly The unique and stable id of the service account." - }, - "email": { - "type": "string", - "description": "@OutputOnly The email address of the service account." - }, - "displayName": { - "type": "string", - "description": "Optional. A user-specified description of the service account. Must be fewer than 100 UTF-8 bytes." - }, - "etag": { - "type": "string", - "description": "Used to perform a consistent read-modify-write.", - "format": "byte" - }, - "oauth2ClientId": { - "type": "string", - "description": "@OutputOnly. The OAuth2 client id for the service account. This is used in conjunction with the OAuth2 clientconfig API to make three legged OAuth2 (3LO) flows to access the data of Google users." - } - } - }, - "CreateServiceAccountRequest": { - "id": "CreateServiceAccountRequest", - "type": "object", - "description": "The service account create request.", - "properties": { - "accountId": { - "type": "string", - "description": "Required. The account id that is used to generate the service account email address and a stable unique id. It is unique within a project, must be 6-30 characters long, and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035." - }, - "serviceAccount": { - "$ref": "ServiceAccount", - "description": "The ServiceAccount resource to create. Currently, only the following values are user assignable: `display_name` ." - } - } - }, - "Empty": { - "id": "Empty", - "type": "object", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`." - }, - "ListServiceAccountKeysResponse": { - "id": "ListServiceAccountKeysResponse", - "type": "object", - "description": "The service account keys list response.", - "properties": { - "keys": { - "type": "array", - "description": "The public keys for the service account.", - "items": { - "$ref": "ServiceAccountKey" - } - } - } - }, - "ServiceAccountKey": { - "id": "ServiceAccountKey", - "type": "object", - "description": "Represents a service account key. A service account has two sets of key-pairs: user-managed, and system-managed. User-managed key-pairs can be created and deleted by users. Users are responsible for rotating these keys periodically to ensure security of their service accounts. Users retain the private key of these key-pairs, and Google retains ONLY the public key. System-managed key-pairs are managed automatically by Google, and rotated daily without user intervention. The private key never leaves Google's servers to maximize security. Public keys for all service accounts are also published at the OAuth2 Service Account API.", - "properties": { - "name": { - "type": "string", - "description": "The resource name of the service account key in the following format `projects/{project}/serviceAccounts/{account}/keys/{key}`." - }, - "privateKeyType": { - "type": "string", - "description": "The output format for the private key. Only provided in `CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or `ListServiceAccountKey` responses. Google never exposes system-managed private keys, and never retains user-managed private keys.", - "enum": [ - "TYPE_UNSPECIFIED", - "TYPE_PKCS12_FILE", - "TYPE_GOOGLE_CREDENTIALS_FILE" - ] - }, - "keyAlgorithm": { - "type": "string", - "description": "Specifies the algorithm (and possibly key size) for the key.", - "enum": [ - "KEY_ALG_UNSPECIFIED", - "KEY_ALG_RSA_1024", - "KEY_ALG_RSA_2048" - ] - }, - "privateKeyData": { - "type": "string", - "description": "The private key data. Only provided in `CreateServiceAccountKey` responses.", - "format": "byte" - }, - "publicKeyData": { - "type": "string", - "description": "The public key data. Only provided in `GetServiceAccountKey` responses.", - "format": "byte" - }, - "validAfterTime": { - "type": "string", - "description": "The key can be used after this timestamp." - }, - "validBeforeTime": { - "type": "string", - "description": "The key can be used before this timestamp." - } - } - }, - "CreateServiceAccountKeyRequest": { - "id": "CreateServiceAccountKeyRequest", - "type": "object", - "description": "The service account key create request.", - "properties": { - "privateKeyType": { - "type": "string", - "description": "The output format of the private key. `GOOGLE_CREDENTIALS_FILE` is the default output format.", - "enum": [ - "TYPE_UNSPECIFIED", - "TYPE_PKCS12_FILE", - "TYPE_GOOGLE_CREDENTIALS_FILE" - ] - }, - "keyAlgorithm": { - "type": "string", - "description": "Which type of key and algorithm to use for the key. The default is currently a 4K RSA key. However this may change in the future.", - "enum": [ - "KEY_ALG_UNSPECIFIED", - "KEY_ALG_RSA_1024", - "KEY_ALG_RSA_2048" - ] - } - } - }, - "SignBlobRequest": { - "id": "SignBlobRequest", - "type": "object", - "description": "The service account sign blob request.", - "properties": { - "bytesToSign": { - "type": "string", - "description": "The bytes to sign.", - "format": "byte" - } - } - }, - "SignBlobResponse": { - "id": "SignBlobResponse", - "type": "object", - "description": "The service account sign blob response.", - "properties": { - "keyId": { - "type": "string", - "description": "The id of the key used to sign the blob." - }, - "signature": { - "type": "string", - "description": "The signed blob.", - "format": "byte" - } - } - }, - "Policy": { - "id": "Policy", - "type": "object", - "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources. A `Policy` consists of a list of `bindings`. A `Binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM. **Example** { \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\", ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] } For a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam).", - "properties": { - "version": { - "type": "integer", - "description": "Version of the `Policy`. The default version is 0.", - "format": "int32" - }, - "bindings": { - "type": "array", - "description": "Associates a list of `members` to a `role`. Multiple `bindings` must not be specified for the same `role`. `bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - } - }, - "etag": { - "type": "string", - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. If no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", - "format": "byte" - } - } - }, - "Binding": { - "id": "Binding", - "type": "object", - "description": "Associates `members` with a `role`.", - "properties": { - "role": { - "type": "string", - "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Required" - }, - "members": { - "type": "array", - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` or `joe@example.com`. * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`.", - "items": { - "type": "string" - } - } - } - }, - "SetIamPolicyRequest": { - "id": "SetIamPolicyRequest", - "type": "object", - "description": "Request message for `SetIamPolicy` method.", - "properties": { - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." - } - } - }, - "TestIamPermissionsRequest": { - "id": "TestIamPermissionsRequest", - "type": "object", - "description": "Request message for `TestIamPermissions` method.", - "properties": { - "permissions": { - "type": "array", - "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "items": { - "type": "string" - } - } - } - }, - "TestIamPermissionsResponse": { - "id": "TestIamPermissionsResponse", - "type": "object", - "description": "Response message for `TestIamPermissions` method.", - "properties": { - "permissions": { - "type": "array", - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - } - } - } - }, - "QueryGrantableRolesRequest": { - "id": "QueryGrantableRolesRequest", - "type": "object", - "description": "The grantable role query request.", - "properties": { - "fullResourceName": { - "type": "string", - "description": "Required. The full resource name to query from the list of grantable roles. The name follows the Google Cloud Platform resource format. For example, a Cloud Platform project with id `my-project` will be named `//cloudresourcemanager.googleapis.com/projects/my-project`." - } - } - }, - "QueryGrantableRolesResponse": { - "id": "QueryGrantableRolesResponse", - "type": "object", - "description": "The grantable role query response.", - "properties": { - "roles": { - "type": "array", - "description": "The list of matching roles.", - "items": { - "$ref": "Role" - } - } - } - }, - "Role": { - "id": "Role", - "type": "object", - "description": "A role in the Identity and Access Management API.", - "properties": { - "name": { - "type": "string", - "description": "The name of the role. When Role is used in CreateRole, the role name must not be set. When Role is used in output and other input such as UpdateRole, the role name is the complete path, e.g., roles/logging.viewer for curated roles and organizations/{organization-id}/roles/logging.viewer for custom roles." - }, - "title": { - "type": "string", - "description": "Optional. A human-readable title for the role. Typically this is limited to 100 UTF-8 bytes." - }, - "description": { - "type": "string", - "description": "Optional. A human-readable description for the role." - } - } - }, - "AuditData": { - "id": "AuditData", - "type": "object", - "description": "Audit log information specific to Cloud IAM. This message is serialized as an `Any` type in the `ServiceData` message of an `AuditLog` message.", - "properties": { - "policyDelta": { - "$ref": "PolicyDelta", - "description": "Policy delta between the original policy and the newly set policy." - } - } - }, - "PolicyDelta": { - "id": "PolicyDelta", - "type": "object", - "description": "The difference delta between two policies.", - "properties": { - "bindingDeltas": { - "type": "array", - "description": "The delta for Bindings between two policies.", - "items": { - "$ref": "BindingDelta" - } - } - } - }, - "BindingDelta": { - "id": "BindingDelta", - "type": "object", - "description": "One delta entry for Binding. Each individual change (only one member in each entry) to a binding will be a separate entry.", - "properties": { - "action": { - "type": "string", - "description": "The action that was performed on a Binding. Required", - "enum": [ - "ACTION_UNSPECIFIED", - "ADD", - "REMOVE" - ] - }, - "role": { - "type": "string", - "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Required" - }, - "member": { - "type": "string", - "description": "A single identity requesting access for a Cloud Platform resource. Follows the same format of Binding.members. Required" - } - } - } - }, - "resources": { - "projects": { - "resources": { - "serviceAccounts": { - "methods": { - "list": { - "id": "iam.projects.serviceAccounts.list", - "path": "v1/{+name}/serviceAccounts", - "httpMethod": "GET", - "description": "Lists ServiceAccounts for a project.", - "parameters": { + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "ListServiceAccountKeysResponse": { + "description": "The service account keys list response.", + "type": "object", + "properties": { + "keys": { + "description": "The public keys for the service account.", + "type": "array", + "items": { + "$ref": "ServiceAccountKey" + } + } + }, + "id": "ListServiceAccountKeysResponse" + }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse" + }, + "ServiceAccountKey": { + "description": "Represents a service account key.\n\nA service account has two sets of key-pairs: user-managed, and\nsystem-managed.\n\nUser-managed key-pairs can be created and deleted by users. Users are\nresponsible for rotating these keys periodically to ensure security of\ntheir service accounts. Users retain the private key of these key-pairs,\nand Google retains ONLY the public key.\n\nSystem-managed key-pairs are managed automatically by Google, and rotated\ndaily without user intervention. The private key never leaves Google's\nservers to maximize security.\n\nPublic keys for all service accounts are also published at the OAuth2\nService Account API.", + "type": "object", + "properties": { + "privateKeyData": { + "description": "The private key data. Only provided in `CreateServiceAccountKey`\nresponses.", + "format": "byte", + "type": "string" + }, + "publicKeyData": { + "description": "The public key data. Only provided in `GetServiceAccountKey` responses.", + "format": "byte", + "type": "string" + }, "name": { - "type": "string", - "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", - "required": true, - "pattern": "^projects/[^/]*$", - "location": "path" + "description": "The resource name of the service account key in the following format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.", + "type": "string" + }, + "validBeforeTime": { + "description": "The key can be used before this timestamp.", + "format": "google-datetime", + "type": "string" + }, + "keyAlgorithm": { + "description": "Specifies the algorithm (and possibly key size) for the key.", + "type": "string", + "enumDescriptions": [ + "An unspecified key algorithm.", + "1k RSA Key.", + "2k RSA Key." + ], + "enum": [ + "KEY_ALG_UNSPECIFIED", + "KEY_ALG_RSA_1024", + "KEY_ALG_RSA_2048" + ] }, - "pageSize": { - "type": "integer", - "description": "Optional limit on the number of service accounts to include in the response. Further accounts can subsequently be obtained by including the ListServiceAccountsResponse.next_page_token in a subsequent request.", - "format": "int32", - "location": "query" + "privateKeyType": { + "enumDescriptions": [ + "Unspecified. Equivalent to `TYPE_GOOGLE_CREDENTIALS_FILE`.", + "PKCS12 format.\nThe password for the PKCS12 file is `notasecret`.\nFor more information, see https://tools.ietf.org/html/rfc7292.", + "Google Credentials File format." + ], + "enum": [ + "TYPE_UNSPECIFIED", + "TYPE_PKCS12_FILE", + "TYPE_GOOGLE_CREDENTIALS_FILE" + ], + "description": "The output format for the private key.\nOnly provided in `CreateServiceAccountKey` responses, not\nin `GetServiceAccountKey` or `ListServiceAccountKey` responses.\n\nGoogle never exposes system-managed private keys, and never retains\nuser-managed private keys.", + "type": "string" }, - "pageToken": { - "type": "string", - "description": "Optional pagination token returned in an earlier ListServiceAccountsResponse.next_page_token.", - "location": "query" + "validAfterTime": { + "description": "The key can be used after this timestamp.", + "format": "google-datetime", + "type": "string" } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListServiceAccountsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "get": { - "id": "iam.projects.serviceAccounts.get", - "path": "v1/{+name}", - "httpMethod": "GET", - "description": "Gets a ServiceAccount.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "id": "ServiceAccountKey" + }, + "CreateServiceAccountKeyRequest": { + "description": "The service account key create request.", + "type": "object", + "properties": { + "keyAlgorithm": { + "description": "Which type of key and algorithm to use for the key.\nThe default is currently a 4K RSA key. However this may change in the\nfuture.", + "type": "string", + "enumDescriptions": [ + "An unspecified key algorithm.", + "1k RSA Key.", + "2k RSA Key." + ], + "enum": [ + "KEY_ALG_UNSPECIFIED", + "KEY_ALG_RSA_1024", + "KEY_ALG_RSA_2048" + ] + }, + "privateKeyType": { + "description": "The output format of the private key. `GOOGLE_CREDENTIALS_FILE` is the\ndefault output format.", + "type": "string", + "enumDescriptions": [ + "Unspecified. Equivalent to `TYPE_GOOGLE_CREDENTIALS_FILE`.", + "PKCS12 format.\nThe password for the PKCS12 file is `notasecret`.\nFor more information, see https://tools.ietf.org/html/rfc7292.", + "Google Credentials File format." + ], + "enum": [ + "TYPE_UNSPECIFIED", + "TYPE_PKCS12_FILE", + "TYPE_GOOGLE_CREDENTIALS_FILE" + ] } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ServiceAccount" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "create": { - "id": "iam.projects.serviceAccounts.create", - "path": "v1/{+name}/serviceAccounts", - "httpMethod": "POST", - "description": "Creates a ServiceAccount and returns it.", - "parameters": { - "name": { - "type": "string", - "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", - "required": true, - "pattern": "^projects/[^/]*$", - "location": "path" + "id": "CreateServiceAccountKeyRequest" + }, + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "array", + "items": { + "type": "string" + } } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "CreateServiceAccountRequest" - }, - "response": { - "$ref": "ServiceAccount" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "update": { - "id": "iam.projects.serviceAccounts.update", - "path": "v1/{+name}", - "httpMethod": "PUT", - "description": "Updates a ServiceAccount. Currently, only the following fields are updatable: `display_name` . The `etag` is mandatory.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Requests using `-` as a wildcard for the project will infer the project from the `account` and the `account` value can be the `email` address or the `unique_id` of the service account. In responses the resource name will always be in the format `projects/{project}/serviceAccounts/{email}`.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "id": "TestIamPermissionsRequest" + }, + "SignBlobResponse": { + "description": "The service account sign blob response.", + "type": "object", + "properties": { + "signature": { + "description": "The signed blob.", + "format": "byte", + "type": "string" + }, + "keyId": { + "description": "The id of the key used to sign the blob.", + "type": "string" } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "ServiceAccount" - }, - "response": { - "$ref": "ServiceAccount" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "delete": { - "id": "iam.projects.serviceAccounts.delete", - "path": "v1/{+name}", - "httpMethod": "DELETE", - "description": "Deletes a ServiceAccount.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "id": "SignBlobResponse" + }, + "SignJwtResponse": { + "description": "The service account sign JWT response.", + "type": "object", + "properties": { + "keyId": { + "description": "The id of the key used to sign the JWT.", + "type": "string" + }, + "signedJwt": { + "description": "The signed JWT.", + "type": "string" } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "signBlob": { - "id": "iam.projects.serviceAccounts.signBlob", - "path": "v1/{+name}:signBlob", - "httpMethod": "POST", - "description": "Signs a blob using a service account's system-managed private key.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "id": "SignJwtResponse" + }, + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "type": "object", + "properties": { + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + }, + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "SignBlobRequest" - }, - "response": { - "$ref": "SignBlobResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "getIamPolicy": { - "id": "iam.projects.serviceAccounts.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "httpMethod": "POST", - "description": "Returns the IAM access control policy for a ServiceAccount.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path. For example, a Project resource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "id": "Policy" + }, + "SignJwtRequest": { + "description": "The service account sign JWT request.", + "type": "object", + "properties": { + "payload": { + "description": "The JWT payload to sign, a JSON JWT Claim set.", + "type": "string" } - }, - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "setIamPolicy": { - "id": "iam.projects.serviceAccounts.setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "httpMethod": "POST", - "description": "Sets the IAM access control policy for a ServiceAccount.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path. For example, a Project resource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "id": "SignJwtRequest" + }, + "AuditData": { + "description": "Audit log information specific to Cloud IAM. This message is serialized\nas an `Any` type in the `ServiceData` message of an\n`AuditLog` message.", + "type": "object", + "properties": { + "policyDelta": { + "description": "Policy delta between the original policy and the newly set policy.", + "$ref": "PolicyDelta" } - }, - "parameterOrder": [ - "resource" - ], - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] }, - "testIamPermissions": { - "id": "iam.projects.serviceAccounts.testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "httpMethod": "POST", - "description": "Tests the specified permissions against the IAM access control policy for a ServiceAccount.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path. For example, a Project resource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "id": "AuditData" + }, + "BindingDelta": { + "description": "One delta entry for Binding. Each individual change (only one member in each\nentry) to a binding will be a separate entry.", + "type": "object", + "properties": { + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + }, + "action": { + "description": "The action that was performed on a Binding.\nRequired", + "type": "string", + "enumDescriptions": [ + "Unspecified.", + "Addition of a Binding.", + "Removal of a Binding." + ], + "enum": [ + "ACTION_UNSPECIFIED", + "ADD", + "REMOVE" + ] + }, + "member": { + "description": "A single identity requesting access for a Cloud Platform resource.\nFollows the same format of Binding.members.\nRequired", + "type": "string" } - }, - "parameterOrder": [ - "resource" - ], - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - }, - "resources": { - "keys": { - "methods": { - "list": { - "id": "iam.projects.serviceAccounts.keys.list", - "path": "v1/{+name}/keys", - "httpMethod": "GET", - "description": "Lists ServiceAccountKeys.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project, will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" - }, - "keyTypes": { - "type": "string", - "description": "Filters the types of keys the user wants to include in the list response. Duplicate key types are not allowed. If no key type is provided, all keys are returned.", - "enum": [ - "KEY_TYPE_UNSPECIFIED", - "USER_MANAGED", - "SYSTEM_MANAGED" - ], - "repeated": true, - "location": "query" + }, + "id": "BindingDelta" + }, + "PolicyDelta": { + "description": "The difference delta between two policies.", + "type": "object", + "properties": { + "bindingDeltas": { + "description": "The delta for Bindings between two policies.", + "type": "array", + "items": { + "$ref": "BindingDelta" } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListServiceAccountKeysResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + } + }, + "id": "PolicyDelta" + }, + "ListServiceAccountsResponse": { + "description": "The service account list response.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "To retrieve the next page of results, set\nListServiceAccountsRequest.page_token\nto this value.", + "type": "string" }, - "get": { - "id": "iam.projects.serviceAccounts.keys.get", - "path": "v1/{+name}", - "httpMethod": "GET", - "description": "Gets the ServiceAccountKey by key id.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account key in the following format: `projects/{project}/serviceAccounts/{account}/keys/{key}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*/keys/[^/]*$", - "location": "path" - }, - "publicKeyType": { - "type": "string", - "description": "The output format of the public key requested. X509_PEM is the default output format.", - "enum": [ - "TYPE_NONE", - "TYPE_X509_PEM_FILE", - "TYPE_RAW_PUBLIC_KEY" - ], - "location": "query" + "accounts": { + "description": "The list of matching service accounts.", + "type": "array", + "items": { + "$ref": "ServiceAccount" } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ServiceAccountKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + } + }, + "id": "ListServiceAccountsResponse" + }, + "CreateServiceAccountRequest": { + "description": "The service account create request.", + "type": "object", + "properties": { + "accountId": { + "description": "Required. The account id that is used to generate the service account\nemail address and a stable unique id. It is unique within a project,\nmust be 6-30 characters long, and match the regular expression\n`[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.", + "type": "string" }, - "create": { - "id": "iam.projects.serviceAccounts.keys.create", - "path": "v1/{+name}/keys", - "httpMethod": "POST", - "description": "Creates a ServiceAccountKey and returns it.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", - "location": "path" + "serviceAccount": { + "description": "The ServiceAccount resource to create.\nCurrently, only the following values are user assignable:\n`display_name` .", + "$ref": "ServiceAccount" + } + }, + "id": "CreateServiceAccountRequest" + }, + "QueryGrantableRolesResponse": { + "description": "The grantable role query response.", + "type": "object", + "properties": { + "roles": { + "description": "The list of matching roles.", + "type": "array", + "items": { + "$ref": "Role" } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "CreateServiceAccountKeyRequest" - }, - "response": { - "$ref": "ServiceAccountKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + } + }, + "id": "QueryGrantableRolesResponse" + }, + "SignBlobRequest": { + "description": "The service account sign blob request.", + "type": "object", + "properties": { + "bytesToSign": { + "description": "The bytes to sign.", + "format": "byte", + "type": "string" + } + }, + "id": "SignBlobRequest" + }, + "Role": { + "description": "A role in the Identity and Access Management API.", + "type": "object", + "properties": { + "description": { + "description": "Optional. A human-readable description for the role.", + "type": "string" + }, + "title": { + "description": "Optional. A human-readable title for the role. Typically this\nis limited to 100 UTF-8 bytes.", + "type": "string" + }, + "name": { + "description": "The name of the role.\n\nWhen Role is used in CreateRole, the role name must not be set.\n\nWhen Role is used in output and other input such as UpdateRole, the role\nname is the complete path, e.g., roles/logging.viewer for curated roles\nand organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.", + "type": "string" + } + }, + "id": "Role" + }, + "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "type": "object", + "properties": { + "policy": { + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", + "$ref": "Policy" + } + }, + "id": "SetIamPolicyRequest" + }, + "Binding": { + "description": "Associates `members` with a `role`.", + "type": "object", + "properties": { + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "type": "array", + "items": { + "type": "string" + } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + } + }, + "id": "Binding" + }, + "ServiceAccount": { + "description": "A service account in the Identity and Access Management API.\n\nTo create a service account, specify the `project_id` and the `account_id`\nfor the account. The `account_id` is unique within the project, and is used\nto generate the service account email address and a stable\n`unique_id`.\n\nIf the account already exists, the account's resource name is returned\nin util::Status's ResourceInfo.resource_name in the format of\nprojects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}. The caller can\nuse the name in other methods to access the account.\n\nAll other methods can identify the service account using the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "type": "object", + "properties": { + "oauth2ClientId": { + "description": "@OutputOnly. The OAuth2 client id for the service account.\nThis is used in conjunction with the OAuth2 clientconfig API to make\nthree legged OAuth2 (3LO) flows to access the data of Google users.", + "type": "string" }, - "delete": { - "id": "iam.projects.serviceAccounts.keys.delete", - "path": "v1/{+name}", - "httpMethod": "DELETE", - "description": "Deletes a ServiceAccountKey.", - "parameters": { - "name": { - "type": "string", - "description": "The resource name of the service account key in the following format: `projects/{project}/serviceAccounts/{account}/keys/{key}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", - "required": true, - "pattern": "^projects/[^/]*/serviceAccounts/[^/]*/keys/[^/]*$", - "location": "path" + "uniqueId": { + "description": "@OutputOnly The unique and stable id of the service account.", + "type": "string" + }, + "displayName": { + "description": "Optional. A user-specified description of the service account. Must be\nfewer than 100 UTF-8 bytes.", + "type": "string" + }, + "etag": { + "description": "Used to perform a consistent read-modify-write.", + "format": "byte", + "type": "string" + }, + "email": { + "description": "@OutputOnly The email address of the service account.", + "type": "string" + }, + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nRequests using `-` as a wildcard for the project will infer the project\nfrom the `account` and the `account` value can be the `email` address or\nthe `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.", + "type": "string" + }, + "projectId": { + "description": "@OutputOnly The id of the project that owns the service account.", + "type": "string" + } + }, + "id": "ServiceAccount" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "QueryGrantableRolesRequest": { + "description": "The grantable role query request.", + "type": "object", + "properties": { + "fullResourceName": { + "description": "Required. The full resource name to query from the list of grantable roles.\n\nThe name follows the Google Cloud Platform resource format.\nFor example, a Cloud Platform project with id `my-project` will be named\n`//cloudresourcemanager.googleapis.com/projects/my-project`.", + "type": "string" + } + }, + "id": "QueryGrantableRolesRequest" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "canonicalName": "iam", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://iam.googleapis.com/", + "ownerDomain": "google.com", + "name": "iam", + "batchPath": "batch", + "title": "Google Identity and Access Management (IAM) API", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "serviceAccounts": { + "methods": { + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "location": "path", + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "iam.projects.serviceAccounts.testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Tests the specified permissions against the IAM access control policy\nfor a ServiceAccount." + }, + "delete": { + "description": "Deletes a ServiceAccount.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", + "path": "v1/{+name}", + "id": "iam.projects.serviceAccounts.delete" + }, + "list": { + "description": "Lists ServiceAccounts for a project.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListServiceAccountsResponse" + }, + "parameters": { + "name": { + "location": "path", + "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + }, + "pageToken": { + "location": "query", + "description": "Optional pagination token returned in an earlier\nListServiceAccountsResponse.next_page_token.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Optional limit on the number of service accounts to include in the\nresponse. Further accounts can subsequently be obtained by including the\nListServiceAccountsResponse.next_page_token\nin a subsequent request.", + "format": "int32", + "type": "integer" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts", + "id": "iam.projects.serviceAccounts.list", + "path": "v1/{+name}/serviceAccounts" + }, + "signBlob": { + "request": { + "$ref": "SignBlobRequest" + }, + "description": "Signs a blob using a service account's system-managed private key.", + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "SignBlobResponse" + }, + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", + "id": "iam.projects.serviceAccounts.signBlob", + "path": "v1/{+name}:signBlob" + }, + "create": { + "flatPath": "v1/projects/{projectsId}/serviceAccounts", + "path": "v1/{+name}/serviceAccounts", + "id": "iam.projects.serviceAccounts.create", + "request": { + "$ref": "CreateServiceAccountRequest" + }, + "description": "Creates a ServiceAccount\nand returns it.", + "response": { + "$ref": "ServiceAccount" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "parameters": { + "name": { + "location": "path", + "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "signJwt": { + "description": "Signs a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", + "request": { + "$ref": "SignJwtRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "SignJwtResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + "id": "iam.projects.serviceAccounts.signJwt", + "path": "v1/{+name}:signJwt" + }, + "setIamPolicy": { + "description": "Sets the IAM access control policy for a\nServiceAccount.", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "iam.projects.serviceAccounts.setIamPolicy" + }, + "getIamPolicy": { + "description": "Returns the IAM access control policy for a\nServiceAccount.", + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "parameters": { + "resource": { + "location": "path", + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", + "id": "iam.projects.serviceAccounts.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy" + }, + "get": { + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", + "id": "iam.projects.serviceAccounts.get", + "path": "v1/{+name}", + "description": "Gets a ServiceAccount.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ServiceAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path" + } + } + }, + "update": { + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", + "id": "iam.projects.serviceAccounts.update", + "path": "v1/{+name}", + "description": "Updates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` .\nThe `etag` is mandatory.", + "request": { + "$ref": "ServiceAccount" + }, + "httpMethod": "PUT", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ServiceAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nRequests using `-` as a wildcard for the project will infer the project\nfrom the `account` and the `account` value can be the `email` address or\nthe `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path" + } + } + } + }, + "resources": { + "keys": { + "methods": { + "delete": { + "httpMethod": "DELETE", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", + "id": "iam.projects.serviceAccounts.keys.delete", + "path": "v1/{+name}", + "description": "Deletes a ServiceAccountKey." + }, + "list": { + "description": "Lists ServiceAccountKeys.", + "response": { + "$ref": "ListServiceAccountKeysResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nUsing `-` as a wildcard for the project, will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$" + }, + "keyTypes": { + "description": "Filters the types of keys the user wants to include in the list\nresponse. Duplicate key types are not allowed. If no key type\nis provided, all keys are returned.", + "type": "string", + "repeated": true, + "location": "query", + "enum": [ + "KEY_TYPE_UNSPECIFIED", + "USER_MANAGED", + "SYSTEM_MANAGED" + ] + } + }, + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", + "path": "v1/{+name}/keys", + "id": "iam.projects.serviceAccounts.keys.list" + }, + "get": { + "description": "Gets the ServiceAccountKey\nby key id.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ServiceAccountKey" + }, + "parameters": { + "publicKeyType": { + "location": "query", + "enum": [ + "TYPE_NONE", + "TYPE_X509_PEM_FILE", + "TYPE_RAW_PUBLIC_KEY" + ], + "description": "The output format of the public key requested.\nX509_PEM is the default output format.", + "type": "string" + }, + "name": { + "location": "path", + "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\n\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", + "id": "iam.projects.serviceAccounts.keys.get", + "path": "v1/{+name}" + }, + "create": { + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", + "path": "v1/{+name}/keys", + "id": "iam.projects.serviceAccounts.keys.create", + "request": { + "$ref": "CreateServiceAccountKeyRequest" + }, + "description": "Creates a ServiceAccountKey\nand returns it.", + "response": { + "$ref": "ServiceAccountKey" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } } - }, - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] } - } } - } + }, + "roles": { + "methods": { + "queryGrantableRoles": { + "description": "Queries roles that can be granted on a particular resource.\nA role is grantable if it can be used as the role in a binding for a policy\nfor that resource.", + "request": { + "$ref": "QueryGrantableRolesRequest" + }, + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "QueryGrantableRolesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": {}, + "flatPath": "v1/roles:queryGrantableRoles", + "id": "iam.roles.queryGrantableRoles", + "path": "v1/roles:queryGrantableRoles" + } + } } - } }, - "roles": { - "methods": { - "queryGrantableRoles": { - "id": "iam.roles.queryGrantableRoles", - "path": "v1/roles:queryGrantableRoles", - "httpMethod": "POST", - "description": "Queries roles that can be granted on a particular resource. A role is grantable if it can be used as the role in a binding for a policy for that resource.", - "request": { - "$ref": "QueryGrantableRolesRequest" - }, - "response": { - "$ref": "QueryGrantableRolesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" } - } - } - } + }, + "version": "v1", + "baseUrl": "https://iam.googleapis.com/", + "kind": "discovery#restDescription", + "description": "Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.", + "servicePath": "", + "basePath": "", + "revision": "20170126", + "id": "iam:v1", + "documentationLink": "https://cloud.google.com/iam/" } diff --git a/vendor/google.golang.org/api/iam/v1/iam-gen.go b/vendor/google.golang.org/api/iam/v1/iam-gen.go index 6bb880952..43afc3dcd 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.ServiceAccounts = NewProjectsServiceAccountsService(s) @@ -121,7 +126,8 @@ type RolesService struct { } // AuditData: Audit log information specific to Cloud IAM. This message -// is serialized as an `Any` type in the `ServiceData` message of an +// is serialized +// as an `Any` type in the `ServiceData` message of an // `AuditLog` message. type AuditData struct { // PolicyDelta: Policy delta between the original policy and the newly @@ -154,24 +160,43 @@ func (s *AuditData) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { // Members: Specifies the identities requesting access for a Cloud - // Platform resource. `members` can have the following values: * - // `allUsers`: A special identifier that represents anyone who is on the - // internet; with or without a Google account. * - // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@gmail.com` or `joe@example.com`. * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `domain:{domain}`: A Google Apps domain name - // that represents all the users of that domain. For example, - // `google.com` or `example.com`. + // Platform resource. + // `members` can have the following values: + // + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents + // anyone + // who is authenticated with a Google account or a service + // account. + // + // * `user:{emailid}`: An email address that represents a specific + // Google + // account. For example, `alice@gmail.com` or `joe@example.com`. + // + // + // * `serviceAccount:{emailid}`: An email address that represents a + // service + // account. For example, + // `my-other-app@appspot.gserviceaccount.com`. + // + // * `group:{emailid}`: An email address that represents a Google + // group. + // For example, `admins@example.com`. + // + // * `domain:{domain}`: A Google Apps domain name that represents all + // the + // users of that domain. For example, `google.com` or + // `example.com`. + // + // Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. For example, - // `roles/viewer`, `roles/editor`, or `roles/owner`. Required + // Role: Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or + // `roles/owner`. + // Required Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Members") to @@ -198,23 +223,28 @@ func (s *Binding) MarshalJSON() ([]byte, error) { } // BindingDelta: One delta entry for Binding. Each individual change -// (only one member in each entry) to a binding will be a separate -// entry. +// (only one member in each +// entry) to a binding will be a separate entry. type BindingDelta struct { - // Action: The action that was performed on a Binding. Required + // Action: The action that was performed on a Binding. + // Required // // Possible values: - // "ACTION_UNSPECIFIED" - // "ADD" - // "REMOVE" + // "ACTION_UNSPECIFIED" - Unspecified. + // "ADD" - Addition of a Binding. + // "REMOVE" - Removal of a Binding. Action string `json:"action,omitempty"` // Member: A single identity requesting access for a Cloud Platform - // resource. Follows the same format of Binding.members. Required + // resource. + // Follows the same format of Binding.members. + // Required Member string `json:"member,omitempty"` - // Role: Role that is assigned to `members`. For example, - // `roles/viewer`, `roles/editor`, or `roles/owner`. Required + // Role: Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or + // `roles/owner`. + // Required Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Action") to @@ -243,23 +273,28 @@ func (s *BindingDelta) MarshalJSON() ([]byte, error) { // CreateServiceAccountKeyRequest: The service account key create // request. type CreateServiceAccountKeyRequest struct { - // KeyAlgorithm: Which type of key and algorithm to use for the key. The - // default is currently a 4K RSA key. However this may change in the + // KeyAlgorithm: Which type of key and algorithm to use for the key. + // The default is currently a 4K RSA key. However this may change in + // the // future. // // Possible values: - // "KEY_ALG_UNSPECIFIED" - // "KEY_ALG_RSA_1024" - // "KEY_ALG_RSA_2048" + // "KEY_ALG_UNSPECIFIED" - An unspecified key algorithm. + // "KEY_ALG_RSA_1024" - 1k RSA Key. + // "KEY_ALG_RSA_2048" - 2k RSA Key. KeyAlgorithm string `json:"keyAlgorithm,omitempty"` // PrivateKeyType: The output format of the private key. - // `GOOGLE_CREDENTIALS_FILE` is the default output format. + // `GOOGLE_CREDENTIALS_FILE` is the + // default output format. // // Possible values: - // "TYPE_UNSPECIFIED" - // "TYPE_PKCS12_FILE" - // "TYPE_GOOGLE_CREDENTIALS_FILE" + // "TYPE_UNSPECIFIED" - Unspecified. Equivalent to + // `TYPE_GOOGLE_CREDENTIALS_FILE`. + // "TYPE_PKCS12_FILE" - PKCS12 format. + // The password for the PKCS12 file is `notasecret`. + // For more information, see https://tools.ietf.org/html/rfc7292. + // "TYPE_GOOGLE_CREDENTIALS_FILE" - Google Credentials File format. PrivateKeyType string `json:"privateKeyType,omitempty"` // ForceSendFields is a list of field names (e.g. "KeyAlgorithm") to @@ -288,13 +323,18 @@ func (s *CreateServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { // CreateServiceAccountRequest: The service account create request. type CreateServiceAccountRequest struct { // AccountId: Required. The account id that is used to generate the - // service account email address and a stable unique id. It is unique - // within a project, must be 6-30 characters long, and match the regular - // expression `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035. + // service account + // email address and a stable unique id. It is unique within a + // project, + // must be 6-30 characters long, and match the regular + // expression + // `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035. AccountId string `json:"accountId,omitempty"` - // ServiceAccount: The ServiceAccount resource to create. Currently, - // only the following values are user assignable: `display_name` . + // ServiceAccount: The ServiceAccount resource to create. + // Currently, only the following values are user + // assignable: + // `display_name` . ServiceAccount *ServiceAccount `json:"serviceAccount,omitempty"` // ForceSendFields is a list of field names (e.g. "AccountId") to @@ -321,11 +361,17 @@ func (s *CreateServiceAccountRequest) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated empty messages in your APIs. A typical example is to use -// it as the request or the response type of an API method. For -// instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for `Empty` is -// empty JSON object `{}`. +// duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -370,8 +416,10 @@ type ListServiceAccountsResponse struct { // Accounts: The list of matching service accounts. Accounts []*ServiceAccount `json:"accounts,omitempty"` - // NextPageToken: To retrieve the next page of results, set - // ListServiceAccountsRequest.page_token to this value. + // NextPageToken: To retrieve the next page of results, + // set + // ListServiceAccountsRequest.page_token + // to this value. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -402,33 +450,66 @@ func (s *ListServiceAccountsResponse) MarshalJSON() ([]byte, error) { } // Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to specify access control policies for Cloud Platform resources. +// used to +// specify access control policies for Cloud Platform resources. +// +// // A `Policy` consists of a list of `bindings`. A `Binding` binds a list -// of `members` to a `role`, where the members can be user accounts, -// Google groups, Google domains, and service accounts. A `role` is a -// named list of permissions defined by IAM. **Example** { "bindings": [ -// { "role": "roles/owner", "members": [ "user:mike@example.com", -// "group:admins@example.com", "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", ] }, { -// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] } -// For a description of IAM and its features, see the [IAM developer's -// guide](https://cloud.google.com/iam). +// of +// `members` to a `role`, where the members can be user accounts, Google +// groups, +// Google domains, and service accounts. A `role` is a named list of +// permissions +// defined by IAM. +// +// **Example** +// +// { +// "bindings": [ +// { +// "role": "roles/owner", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", +// ] +// }, +// { +// "role": "roles/viewer", +// "members": ["user:sean@example.com"] +// } +// ] +// } +// +// For a description of IAM and its features, see the +// [IAM developer's guide](https://cloud.google.com/iam). type Policy struct { - // Bindings: Associates a list of `members` to a `role`. Multiple - // `bindings` must not be specified for the same `role`. `bindings` with - // no members will result in an error. + // Bindings: Associates a list of `members` to a `role`. + // Multiple `bindings` must not be specified for the same + // `role`. + // `bindings` with no members will result in an error. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help prevent simultaneous updates of a policy from overwriting each - // other. It is strongly suggested that systems make use of the `etag` - // in the read-modify-write cycle to perform policy updates in order to - // avoid race conditions: An `etag` is returned in the response to - // `getIamPolicy`, and systems are expected to put that etag in the - // request to `setIamPolicy` to ensure that their change will be applied - // to the same version of the policy. If no `etag` is provided in the - // call to `setIamPolicy`, then the existing policy is overwritten - // blindly. + // help + // prevent simultaneous updates of a policy from overwriting each + // other. + // It is strongly suggested that systems make use of the `etag` in + // the + // read-modify-write cycle to perform policy updates in order to avoid + // race + // conditions: An `etag` is returned in the response to `getIamPolicy`, + // and + // systems are expected to put that etag in the request to + // `setIamPolicy` to + // ensure that their change will be applied to the same version of the + // policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the + // existing + // policy is overwritten blindly. Etag string `json:"etag,omitempty"` // Version: Version of the `Policy`. The default version is 0. @@ -492,9 +573,11 @@ func (s *PolicyDelta) MarshalJSON() ([]byte, error) { // QueryGrantableRolesRequest: The grantable role query request. type QueryGrantableRolesRequest struct { // FullResourceName: Required. The full resource name to query from the - // list of grantable roles. The name follows the Google Cloud Platform - // resource format. For example, a Cloud Platform project with id - // `my-project` will be named + // list of grantable roles. + // + // The name follows the Google Cloud Platform resource format. + // For example, a Cloud Platform project with id `my-project` will be + // named // `//cloudresourcemanager.googleapis.com/projects/my-project`. FullResourceName string `json:"fullResourceName,omitempty"` @@ -556,18 +639,23 @@ func (s *QueryGrantableRolesResponse) MarshalJSON() ([]byte, error) { // Role: A role in the Identity and Access Management API. type Role struct { - // Description: Optional. A human-readable description for the role. + // Description: Optional. A human-readable description for the role. Description string `json:"description,omitempty"` - // Name: The name of the role. When Role is used in CreateRole, the role - // name must not be set. When Role is used in output and other input - // such as UpdateRole, the role name is the complete path, e.g., - // roles/logging.viewer for curated roles and - // organizations/{organization-id}/roles/logging.viewer for custom + // Name: The name of the role. + // + // When Role is used in CreateRole, the role name must not be set. + // + // When Role is used in output and other input such as UpdateRole, the + // role + // name is the complete path, e.g., roles/logging.viewer for curated + // roles + // and organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom // roles. Name string `json:"name,omitempty"` - // Title: Optional. A human-readable title for the role. Typically this + // Title: Optional. A human-readable title for the role. Typically + // this // is limited to 100 UTF-8 bytes. Title string `json:"title,omitempty"` @@ -595,22 +683,37 @@ func (s *Role) MarshalJSON() ([]byte, error) { } // ServiceAccount: A service account in the Identity and Access -// Management API. To create a service account, specify the `project_id` -// and the `account_id` for the account. The `account_id` is unique -// within the project, and is used to generate the service account email -// address and a stable `unique_id`. If the account already exists, the -// account's resource name is returned in util::Status's -// ResourceInfo.resource_name in the format of -// projects/{project}/serviceAccounts/{email}. The caller can use the -// name in other methods to access the account. All other methods can -// identify the service account using the format -// `projects/{project}/serviceAccounts/{account}`. Using `-` as a -// wildcard for the project will infer the project from the account. The -// `account` value can be the `email` address or the `unique_id` of the -// service account. +// Management API. +// +// To create a service account, specify the `project_id` and the +// `account_id` +// for the account. The `account_id` is unique within the project, and +// is used +// to generate the service account email address and a +// stable +// `unique_id`. +// +// If the account already exists, the account's resource name is +// returned +// in util::Status's ResourceInfo.resource_name in the format +// of +// projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}. The +// caller can +// use the name in other methods to access the account. +// +// All other methods can identify the service account using the +// format +// `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}` +// . +// Using `-` as a wildcard for the project will infer the project +// from +// the account. The `account` value can be the `email` address or +// the +// `unique_id` of the service account. type ServiceAccount struct { // DisplayName: Optional. A user-specified description of the service - // account. Must be fewer than 100 UTF-8 bytes. + // account. Must be + // fewer than 100 UTF-8 bytes. DisplayName string `json:"displayName,omitempty"` // Email: @OutputOnly The email address of the service account. @@ -620,18 +723,27 @@ type ServiceAccount struct { Etag string `json:"etag,omitempty"` // Name: The resource name of the service account in the following - // format: `projects/{project}/serviceAccounts/{account}`. Requests - // using `-` as a wildcard for the project will infer the project from - // the `account` and the `account` value can be the `email` address or - // the `unique_id` of the service account. In responses the resource - // name will always be in the format - // `projects/{project}/serviceAccounts/{email}`. + // format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL} + // `. + // + // Requests using `-` as a wildcard for the project will infer the + // project + // from the `account` and the `account` value can be the `email` address + // or + // the `unique_id` of the service account. + // + // In responses the resource name will always be in the + // format + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}` + // . Name string `json:"name,omitempty"` // Oauth2ClientId: @OutputOnly. The OAuth2 client id for the service - // account. This is used in conjunction with the OAuth2 clientconfig API - // to make three legged OAuth2 (3LO) flows to access the data of Google - // users. + // account. + // This is used in conjunction with the OAuth2 clientconfig API to + // make + // three legged OAuth2 (3LO) flows to access the data of Google users. Oauth2ClientId string `json:"oauth2ClientId,omitempty"` // ProjectId: @OutputOnly The id of the project that owns the service @@ -669,44 +781,66 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAccountKey: Represents a service account key. A service -// account has two sets of key-pairs: user-managed, and system-managed. -// User-managed key-pairs can be created and deleted by users. Users are +// ServiceAccountKey: Represents a service account key. +// +// A service account has two sets of key-pairs: user-managed, +// and +// system-managed. +// +// User-managed key-pairs can be created and deleted by users. Users +// are // responsible for rotating these keys periodically to ensure security -// of their service accounts. Users retain the private key of these -// key-pairs, and Google retains ONLY the public key. System-managed -// key-pairs are managed automatically by Google, and rotated daily -// without user intervention. The private key never leaves Google's -// servers to maximize security. Public keys for all service accounts -// are also published at the OAuth2 Service Account API. +// of +// their service accounts. Users retain the private key of these +// key-pairs, +// and Google retains ONLY the public key. +// +// System-managed key-pairs are managed automatically by Google, and +// rotated +// daily without user intervention. The private key never leaves +// Google's +// servers to maximize security. +// +// Public keys for all service accounts are also published at the +// OAuth2 +// Service Account API. type ServiceAccountKey struct { // KeyAlgorithm: Specifies the algorithm (and possibly key size) for the // key. // // Possible values: - // "KEY_ALG_UNSPECIFIED" - // "KEY_ALG_RSA_1024" - // "KEY_ALG_RSA_2048" + // "KEY_ALG_UNSPECIFIED" - An unspecified key algorithm. + // "KEY_ALG_RSA_1024" - 1k RSA Key. + // "KEY_ALG_RSA_2048" - 2k RSA Key. KeyAlgorithm string `json:"keyAlgorithm,omitempty"` // Name: The resource name of the service account key in the following - // format `projects/{project}/serviceAccounts/{account}/keys/{key}`. + // format + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/ + // keys/{key}`. Name string `json:"name,omitempty"` // PrivateKeyData: The private key data. Only provided in - // `CreateServiceAccountKey` responses. + // `CreateServiceAccountKey` + // responses. PrivateKeyData string `json:"privateKeyData,omitempty"` - // PrivateKeyType: The output format for the private key. Only provided - // in `CreateServiceAccountKey` responses, not in `GetServiceAccountKey` - // or `ListServiceAccountKey` responses. Google never exposes - // system-managed private keys, and never retains user-managed private - // keys. + // PrivateKeyType: The output format for the private key. + // Only provided in `CreateServiceAccountKey` responses, not + // in `GetServiceAccountKey` or `ListServiceAccountKey` + // responses. + // + // Google never exposes system-managed private keys, and never + // retains + // user-managed private keys. // // Possible values: - // "TYPE_UNSPECIFIED" - // "TYPE_PKCS12_FILE" - // "TYPE_GOOGLE_CREDENTIALS_FILE" + // "TYPE_UNSPECIFIED" - Unspecified. Equivalent to + // `TYPE_GOOGLE_CREDENTIALS_FILE`. + // "TYPE_PKCS12_FILE" - PKCS12 format. + // The password for the PKCS12 file is `notasecret`. + // For more information, see https://tools.ietf.org/html/rfc7292. + // "TYPE_GOOGLE_CREDENTIALS_FILE" - Google Credentials File format. PrivateKeyType string `json:"privateKeyType,omitempty"` // PublicKeyData: The public key data. Only provided in @@ -749,9 +883,11 @@ func (s *ServiceAccountKey) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of the policy is limited to a few 10s of KB. An - // empty policy is a valid policy but certain Cloud Platform services - // (such as Projects) might reject them. + // `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as + // Projects) + // might reject them. Policy *Policy `json:"policy,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -840,12 +976,78 @@ func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SignJwtRequest: The service account sign JWT request. +type SignJwtRequest struct { + // Payload: The JWT payload to sign, a JSON JWT Claim set. + Payload string `json:"payload,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Payload") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Payload") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { + type noMethod SignJwtRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SignJwtResponse: The service account sign JWT response. +type SignJwtResponse struct { + // KeyId: The id of the key used to sign the JWT. + KeyId string `json:"keyId,omitempty"` + + // SignedJwt: The signed JWT. + SignedJwt string `json:"signedJwt,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "KeyId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KeyId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { + type noMethod SignJwtResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TestIamPermissionsRequest: Request message for `TestIamPermissions` // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. For more information see [IAM + // Permissions with + // wildcards (such as '*' or 'storage.*') are not allowed. For + // more + // information see + // [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -876,7 +1078,8 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. + // the caller is + // allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -917,7 +1120,8 @@ type ProjectsServiceAccountsCreateCall struct { header_ http.Header } -// Create: Creates a ServiceAccount and returns it. +// Create: Creates a ServiceAccount +// and returns it. func (r *ProjectsServiceAccountsService) Create(name string, createserviceaccountrequest *CreateServiceAccountRequest) *ProjectsServiceAccountsCreateCall { c := &ProjectsServiceAccountsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -956,6 +1160,7 @@ func (c *ProjectsServiceAccountsCreateCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createserviceaccountrequest) if err != nil { @@ -1011,7 +1216,8 @@ func (c *ProjectsServiceAccountsCreateCall) Do(opts ...googleapi.CallOption) (*S } return ret, nil // { - // "description": "Creates a ServiceAccount and returns it.", + // "description": "Creates a ServiceAccount\nand returns it.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.create", // "parameterOrder": [ @@ -1019,9 +1225,9 @@ func (c *ProjectsServiceAccountsCreateCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", + // "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", // "location": "path", - // "pattern": "^projects/[^/]*$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // } @@ -1088,6 +1294,7 @@ func (c *ProjectsServiceAccountsDeleteCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") @@ -1139,6 +1346,7 @@ func (c *ProjectsServiceAccountsDeleteCall) Do(opts ...googleapi.CallOption) (*E return ret, nil // { // "description": "Deletes a ServiceAccount.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", // "httpMethod": "DELETE", // "id": "iam.projects.serviceAccounts.delete", // "parameterOrder": [ @@ -1146,9 +1354,9 @@ func (c *ProjectsServiceAccountsDeleteCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -1223,6 +1431,7 @@ func (c *ProjectsServiceAccountsGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1277,6 +1486,7 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv return ret, nil // { // "description": "Gets a ServiceAccount.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", // "httpMethod": "GET", // "id": "iam.projects.serviceAccounts.get", // "parameterOrder": [ @@ -1284,9 +1494,9 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -1312,7 +1522,8 @@ type ProjectsServiceAccountsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Returns the IAM access control policy for a +// GetIamPolicy: Returns the IAM access control policy for +// a // ServiceAccount. func (r *ProjectsServiceAccountsService) GetIamPolicy(resource string) *ProjectsServiceAccountsGetIamPolicyCall { c := &ProjectsServiceAccountsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -1351,6 +1562,7 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") @@ -1401,7 +1613,8 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns the IAM access control policy for a ServiceAccount.", + // "description": "Returns the IAM access control policy for a\nServiceAccount.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.getIamPolicy", // "parameterOrder": [ @@ -1409,9 +1622,9 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path. For example, a Project resource is specified as `projects/{project}`.", + // "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -1446,16 +1659,19 @@ func (r *ProjectsServiceAccountsService) List(name string) *ProjectsServiceAccou } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of service accounts to include in the response. Further -// accounts can subsequently be obtained by including the -// ListServiceAccountsResponse.next_page_token in a subsequent request. +// the number of service accounts to include in the +// response. Further accounts can subsequently be obtained by including +// the +// ListServiceAccountsResponse.next_page_token +// in a subsequent request. func (c *ProjectsServiceAccountsListCall) PageSize(pageSize int64) *ProjectsServiceAccountsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Optional -// pagination token returned in an earlier +// pagination token returned in an +// earlier // ListServiceAccountsResponse.next_page_token. func (c *ProjectsServiceAccountsListCall) PageToken(pageToken string) *ProjectsServiceAccountsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -1503,6 +1719,7 @@ func (c *ProjectsServiceAccountsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1557,6 +1774,7 @@ func (c *ProjectsServiceAccountsListCall) Do(opts ...googleapi.CallOption) (*Lis return ret, nil // { // "description": "Lists ServiceAccounts for a project.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts", // "httpMethod": "GET", // "id": "iam.projects.serviceAccounts.list", // "parameterOrder": [ @@ -1564,20 +1782,20 @@ func (c *ProjectsServiceAccountsListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", + // "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", // "location": "path", - // "pattern": "^projects/[^/]*$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "pageSize": { - // "description": "Optional limit on the number of service accounts to include in the response. Further accounts can subsequently be obtained by including the ListServiceAccountsResponse.next_page_token in a subsequent request.", + // "description": "Optional limit on the number of service accounts to include in the\nresponse. Further accounts can subsequently be obtained by including the\nListServiceAccountsResponse.next_page_token\nin a subsequent request.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Optional pagination token returned in an earlier ListServiceAccountsResponse.next_page_token.", + // "description": "Optional pagination token returned in an earlier\nListServiceAccountsResponse.next_page_token.", // "location": "query", // "type": "string" // } @@ -1625,7 +1843,8 @@ type ProjectsServiceAccountsSetIamPolicyCall struct { header_ http.Header } -// SetIamPolicy: Sets the IAM access control policy for a +// SetIamPolicy: Sets the IAM access control policy for +// a // ServiceAccount. func (r *ProjectsServiceAccountsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsServiceAccountsSetIamPolicyCall { c := &ProjectsServiceAccountsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -1665,6 +1884,7 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -1720,7 +1940,8 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Sets the IAM access control policy for a ServiceAccount.", + // "description": "Sets the IAM access control policy for a\nServiceAccount.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.setIamPolicy", // "parameterOrder": [ @@ -1728,9 +1949,9 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path. For example, a Project resource is specified as `projects/{project}`.", + // "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -1800,6 +2021,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) if err != nil { @@ -1856,6 +2078,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( return ret, nil // { // "description": "Signs a blob using a service account's system-managed private key.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.signBlob", // "parameterOrder": [ @@ -1863,9 +2086,9 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -1884,6 +2107,149 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( } +// method id "iam.projects.serviceAccounts.signJwt": + +type ProjectsServiceAccountsSignJwtCall struct { + s *Service + name string + signjwtrequest *SignJwtRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignJwt: Signs a JWT using a service account's system-managed private +// key. +// +// If no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM +// sets an +// an expiry time of one hour by default. If you request an expiry time +// of +// more than one hour, the request will fail. +func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { + c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.signjwtrequest = signjwtrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsSignJwtCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignJwtCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsSignJwtCall) Context(ctx context.Context) *ProjectsServiceAccountsSignJwtCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signJwt") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.serviceAccounts.signJwt" call. +// Exactly one of *SignJwtResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SignJwtResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (*SignJwtResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SignJwtResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Signs a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + // "httpMethod": "POST", + // "id": "iam.projects.serviceAccounts.signJwt", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:signJwt", + // "request": { + // "$ref": "SignJwtRequest" + // }, + // "response": { + // "$ref": "SignJwtResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "iam.projects.serviceAccounts.testIamPermissions": type ProjectsServiceAccountsTestIamPermissionsCall struct { @@ -1896,7 +2262,8 @@ type ProjectsServiceAccountsTestIamPermissionsCall struct { } // TestIamPermissions: Tests the specified permissions against the IAM -// access control policy for a ServiceAccount. +// access control policy +// for a ServiceAccount. func (r *ProjectsServiceAccountsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsServiceAccountsTestIamPermissionsCall { c := &ProjectsServiceAccountsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -1935,6 +2302,7 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) doRequest(alt string) (* reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -1990,7 +2358,8 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Tests the specified permissions against the IAM access control policy for a ServiceAccount.", + // "description": "Tests the specified permissions against the IAM access control policy\nfor a ServiceAccount.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:testIamPermissions", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.testIamPermissions", // "parameterOrder": [ @@ -1998,9 +2367,9 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path. For example, a Project resource is specified as `projects/{project}`.", + // "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -2030,8 +2399,11 @@ type ProjectsServiceAccountsUpdateCall struct { header_ http.Header } -// Update: Updates a ServiceAccount. Currently, only the following -// fields are updatable: `display_name` . The `etag` is mandatory. +// Update: Updates a ServiceAccount. +// +// Currently, only the following fields are updatable: +// `display_name` . +// The `etag` is mandatory. func (r *ProjectsServiceAccountsService) Update(name string, serviceaccount *ServiceAccount) *ProjectsServiceAccountsUpdateCall { c := &ProjectsServiceAccountsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2070,6 +2442,7 @@ func (c *ProjectsServiceAccountsUpdateCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.serviceaccount) if err != nil { @@ -2125,7 +2498,8 @@ func (c *ProjectsServiceAccountsUpdateCall) Do(opts ...googleapi.CallOption) (*S } return ret, nil // { - // "description": "Updates a ServiceAccount. Currently, only the following fields are updatable: `display_name` . The `etag` is mandatory.", + // "description": "Updates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` .\nThe `etag` is mandatory.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", // "httpMethod": "PUT", // "id": "iam.projects.serviceAccounts.update", // "parameterOrder": [ @@ -2133,9 +2507,9 @@ func (c *ProjectsServiceAccountsUpdateCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Requests using `-` as a wildcard for the project will infer the project from the `account` and the `account` value can be the `email` address or the `unique_id` of the service account. In responses the resource name will always be in the format `projects/{project}/serviceAccounts/{email}`.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nRequests using `-` as a wildcard for the project will infer the project\nfrom the `account` and the `account` value can be the `email` address or\nthe `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -2165,7 +2539,8 @@ type ProjectsServiceAccountsKeysCreateCall struct { header_ http.Header } -// Create: Creates a ServiceAccountKey and returns it. +// Create: Creates a ServiceAccountKey +// and returns it. func (r *ProjectsServiceAccountsKeysService) Create(name string, createserviceaccountkeyrequest *CreateServiceAccountKeyRequest) *ProjectsServiceAccountsKeysCreateCall { c := &ProjectsServiceAccountsKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2204,6 +2579,7 @@ func (c *ProjectsServiceAccountsKeysCreateCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createserviceaccountkeyrequest) if err != nil { @@ -2259,7 +2635,8 @@ func (c *ProjectsServiceAccountsKeysCreateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a ServiceAccountKey and returns it.", + // "description": "Creates a ServiceAccountKey\nand returns it.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.keys.create", // "parameterOrder": [ @@ -2267,9 +2644,9 @@ func (c *ProjectsServiceAccountsKeysCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -2336,6 +2713,7 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") @@ -2387,6 +2765,7 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) Do(opts ...googleapi.CallOption) return ret, nil // { // "description": "Deletes a ServiceAccountKey.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", // "httpMethod": "DELETE", // "id": "iam.projects.serviceAccounts.keys.delete", // "parameterOrder": [ @@ -2394,9 +2773,9 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The resource name of the service account key in the following format: `projects/{project}/serviceAccounts/{account}/keys/{key}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", + // "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*/keys/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // } @@ -2423,7 +2802,8 @@ type ProjectsServiceAccountsKeysGetCall struct { header_ http.Header } -// Get: Gets the ServiceAccountKey by key id. +// Get: Gets the ServiceAccountKey +// by key id. func (r *ProjectsServiceAccountsKeysService) Get(name string) *ProjectsServiceAccountsKeysGetCall { c := &ProjectsServiceAccountsKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2431,8 +2811,8 @@ func (r *ProjectsServiceAccountsKeysService) Get(name string) *ProjectsServiceAc } // PublicKeyType sets the optional parameter "publicKeyType": The output -// format of the public key requested. X509_PEM is the default output -// format. +// format of the public key requested. +// X509_PEM is the default output format. // // Possible values: // "TYPE_NONE" @@ -2484,6 +2864,7 @@ func (c *ProjectsServiceAccountsKeysGetCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2537,7 +2918,8 @@ func (c *ProjectsServiceAccountsKeysGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the ServiceAccountKey by key id.", + // "description": "Gets the ServiceAccountKey\nby key id.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", // "httpMethod": "GET", // "id": "iam.projects.serviceAccounts.keys.get", // "parameterOrder": [ @@ -2545,14 +2927,14 @@ func (c *ProjectsServiceAccountsKeysGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The resource name of the service account key in the following format: `projects/{project}/serviceAccounts/{account}/keys/{key}`. Using `-` as a wildcard for the project will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", + // "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\n\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*/keys/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // }, // "publicKeyType": { - // "description": "The output format of the public key requested. X509_PEM is the default output format.", + // "description": "The output format of the public key requested.\nX509_PEM is the default output format.", // "enum": [ // "TYPE_NONE", // "TYPE_X509_PEM_FILE", @@ -2592,9 +2974,9 @@ func (r *ProjectsServiceAccountsKeysService) List(name string) *ProjectsServiceA } // KeyTypes sets the optional parameter "keyTypes": Filters the types of -// keys the user wants to include in the list response. Duplicate key -// types are not allowed. If no key type is provided, all keys are -// returned. +// keys the user wants to include in the list +// response. Duplicate key types are not allowed. If no key type +// is provided, all keys are returned. // // Possible values: // "KEY_TYPE_UNSPECIFIED" @@ -2646,6 +3028,7 @@ func (c *ProjectsServiceAccountsKeysListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2700,6 +3083,7 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( return ret, nil // { // "description": "Lists ServiceAccountKeys.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", // "httpMethod": "GET", // "id": "iam.projects.serviceAccounts.keys.list", // "parameterOrder": [ @@ -2707,7 +3091,7 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "keyTypes": { - // "description": "Filters the types of keys the user wants to include in the list response. Duplicate key types are not allowed. If no key type is provided, all keys are returned.", + // "description": "Filters the types of keys the user wants to include in the list\nresponse. Duplicate key types are not allowed. If no key type\nis provided, all keys are returned.", // "enum": [ // "KEY_TYPE_UNSPECIFIED", // "USER_MANAGED", @@ -2718,9 +3102,9 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "name": { - // "description": "The resource name of the service account in the following format: `projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for the project, will infer the project from the account. The `account` value can be the `email` address or the `unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nUsing `-` as a wildcard for the project, will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", - // "pattern": "^projects/[^/]*/serviceAccounts/[^/]*$", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } @@ -2747,8 +3131,10 @@ type RolesQueryGrantableRolesCall struct { } // QueryGrantableRoles: Queries roles that can be granted on a -// particular resource. A role is grantable if it can be used as the -// role in a binding for a policy for that resource. +// particular resource. +// A role is grantable if it can be used as the role in a binding for a +// policy +// for that resource. func (r *RolesService) QueryGrantableRoles(querygrantablerolesrequest *QueryGrantableRolesRequest) *RolesQueryGrantableRolesCall { c := &RolesQueryGrantableRolesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.querygrantablerolesrequest = querygrantablerolesrequest @@ -2786,6 +3172,7 @@ func (c *RolesQueryGrantableRolesCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.querygrantablerolesrequest) if err != nil { @@ -2838,9 +3225,12 @@ func (c *RolesQueryGrantableRolesCall) Do(opts ...googleapi.CallOption) (*QueryG } return ret, nil // { - // "description": "Queries roles that can be granted on a particular resource. A role is grantable if it can be used as the role in a binding for a policy for that resource.", + // "description": "Queries roles that can be granted on a particular resource.\nA role is grantable if it can be used as the role in a binding for a policy\nfor that resource.", + // "flatPath": "v1/roles:queryGrantableRoles", // "httpMethod": "POST", // "id": "iam.roles.queryGrantableRoles", + // "parameterOrder": [], + // "parameters": {}, // "path": "v1/roles:queryGrantableRoles", // "request": { // "$ref": "QueryGrantableRolesRequest" diff --git a/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-api.json b/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-api.json index dda7ad598..b296adcef 100644 --- a/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-api.json +++ b/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/9lrbPxfnfcTzLSOgIxD2Vj83GmI\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/Eza5dkILxsH0BZXGL1UuGyoKbqY\"", "discoveryVersion": "v1", "id": "identitytoolkit:v3", "name": "identitytoolkit", "canonicalName": "Identity Toolkit", "version": "v3", - "revision": "20161206", + "revision": "20170203", "title": "Google Identity Toolkit API", "description": "Help the third party sites to implement federated login.", "ownerDomain": "google.com", @@ -138,7 +138,7 @@ "DownloadAccountResponse": { "id": "DownloadAccountResponse", "type": "object", - "description": "Respone of downloading accounts in batch.", + "description": "Response of downloading accounts in batch.", "properties": { "kind": { "type": "string", @@ -717,6 +717,10 @@ "type": "string", "description": "Instance id token of the app." }, + "localId": { + "type": "string", + "description": "Privileged caller can create user with specified user id." + }, "password": { "type": "string", "description": "The new password of the user." @@ -1010,6 +1014,10 @@ "type": "string", "description": "The email of the user." }, + "emailVerified": { + "type": "boolean", + "description": "If email has been verified." + }, "expiresIn": { "type": "string", "description": "If idToken is STS id token, then this field will be expiration time of STS id token in seconds.", @@ -1324,6 +1332,10 @@ "type": "string", "description": "It's the identifier param in the createAuthUri request if the identifier is an email. It can be used to check whether the user input email is different from the asserted email." }, + "isNewUser": { + "type": "boolean", + "description": "True if it's a new user sign-in, false if it's a returning user." + }, "kind": { "type": "string", "description": "The fixed string \"identitytoolkit#VerifyAssertionResponse\".", diff --git a/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-gen.go b/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-gen.go index 40d16fac7..c93cf7a2c 100644 --- a/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-gen.go +++ b/vendor/google.golang.org/api/identitytoolkit/v3/identitytoolkit-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Relyingparty *RelyingpartyService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewRelyingpartyService(s *Service) *RelyingpartyService { rs := &RelyingpartyService{s: s} return rs @@ -177,7 +182,7 @@ func (s *DeleteAccountResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DownloadAccountResponse: Respone of downloading accounts in batch. +// DownloadAccountResponse: Response of downloading accounts in batch. type DownloadAccountResponse struct { // Kind: The fixed string "identitytoolkit#DownloadAccountResponse". Kind string `json:"kind,omitempty"` @@ -961,6 +966,9 @@ type IdentitytoolkitRelyingpartySignupNewUserRequest struct { // InstanceId: Instance id token of the app. InstanceId string `json:"instanceId,omitempty"` + // LocalId: Privileged caller can create user with specified user id. + LocalId string `json:"localId,omitempty"` + // Password: The new password of the user. Password string `json:"password,omitempty"` @@ -1362,6 +1370,9 @@ type SetAccountInfoResponse struct { // Email: The email of the user. Email string `json:"email,omitempty"` + // EmailVerified: If email has been verified. + EmailVerified bool `json:"emailVerified,omitempty"` + // ExpiresIn: If idToken is STS id token, then this field will be // expiration time of STS id token in seconds. ExpiresIn int64 `json:"expiresIn,omitempty,string"` @@ -1767,6 +1778,10 @@ type VerifyAssertionResponse struct { // input email is different from the asserted email. InputEmail string `json:"inputEmail,omitempty"` + // IsNewUser: True if it's a new user sign-in, false if it's a returning + // user. + IsNewUser bool `json:"isNewUser,omitempty"` + // Kind: The fixed string "identitytoolkit#VerifyAssertionResponse". Kind string `json:"kind,omitempty"` @@ -2034,6 +2049,7 @@ func (c *RelyingpartyCreateAuthUriCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartycreateauthurirequest) if err != nil { @@ -2151,6 +2167,7 @@ func (c *RelyingpartyDeleteAccountCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartydeleteaccountrequest) if err != nil { @@ -2268,6 +2285,7 @@ func (c *RelyingpartyDownloadAccountCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartydownloadaccountrequest) if err != nil { @@ -2407,6 +2425,7 @@ func (c *RelyingpartyGetAccountInfoCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartygetaccountinforequest) if err != nil { @@ -2524,6 +2543,7 @@ func (c *RelyingpartyGetOobConfirmationCodeCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.relyingparty) if err != nil { @@ -2665,6 +2685,7 @@ func (c *RelyingpartyGetProjectConfigCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2800,6 +2821,7 @@ func (c *RelyingpartyGetPublicKeysCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2901,6 +2923,7 @@ func (c *RelyingpartyGetRecaptchaParamCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3013,6 +3036,7 @@ func (c *RelyingpartyResetPasswordCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartyresetpasswordrequest) if err != nil { @@ -3130,6 +3154,7 @@ func (c *RelyingpartySetAccountInfoCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartysetaccountinforequest) if err != nil { @@ -3247,6 +3272,7 @@ func (c *RelyingpartySetProjectConfigCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartysetprojectconfigrequest) if err != nil { @@ -3366,6 +3392,7 @@ func (c *RelyingpartySignOutUserCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartysignoutuserrequest) if err != nil { @@ -3485,6 +3512,7 @@ func (c *RelyingpartySignupNewUserCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartysignupnewuserrequest) if err != nil { @@ -3602,6 +3630,7 @@ func (c *RelyingpartyUploadAccountCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartyuploadaccountrequest) if err != nil { @@ -3720,6 +3749,7 @@ func (c *RelyingpartyVerifyAssertionCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartyverifyassertionrequest) if err != nil { @@ -3837,6 +3867,7 @@ func (c *RelyingpartyVerifyCustomTokenCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartyverifycustomtokenrequest) if err != nil { @@ -3954,6 +3985,7 @@ func (c *RelyingpartyVerifyPasswordCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.identitytoolkitrelyingpartyverifypasswordrequest) if err != nil { diff --git a/vendor/google.golang.org/api/kgsearch/v1/kgsearch-api.json b/vendor/google.golang.org/api/kgsearch/v1/kgsearch-api.json index e5cadc79c..93f45e440 100644 --- a/vendor/google.golang.org/api/kgsearch/v1/kgsearch-api.json +++ b/vendor/google.golang.org/api/kgsearch/v1/kgsearch-api.json @@ -1,194 +1,194 @@ { - "id": "kgsearch:v1", - "description": "Searches the Google Knowledge Graph for entities.", - "protocol": "rest", - "title": "Knowledge Graph Search API", + "discoveryVersion": "v1", + "ownerName": "Google", + "version_module": "True", "resources": { "entities": { "methods": { "search": { "id": "kgsearch.entities.search", + "path": "v1/entities:search", + "description": "Searches Knowledge Graph for entities that match the constraints.\nA list of matched entities will be returned in response, which will be in\nJSON-LD format and compatible with http://schema.org", + "httpMethod": "GET", + "parameterOrder": [], "response": { "$ref": "SearchResponse" }, - "parameterOrder": [], - "description": "Searches Knowledge Graph for entities that match the constraints.\nA list of matched entities will be returned in response, which will be in\nJSON-LD format and compatible with http://schema.org", - "flatPath": "v1/entities:search", - "httpMethod": "GET", "parameters": { "limit": { - "description": "Limits the number of entities to be returned.", - "location": "query", "type": "integer", - "format": "int32" - }, - "ids": { - "description": "The list of entity id to be used for search instead of query string.\nTo specify multiple ids in the HTTP request, repeat the parameter in the\nURL as in ...?ids=A&ids=B", - "repeated": true, "location": "query", - "type": "string" + "description": "Limits the number of entities to be returned.", + "format": "int32" }, "prefix": { "description": "Enables prefix match against names and aliases of entities", - "location": "query", - "type": "boolean" + "type": "boolean", + "location": "query" }, "query": { "description": "The literal query string for search.", - "location": "query", - "type": "string" - }, - "indent": { - "description": "Enables indenting of json results.", - "location": "query", - "type": "boolean" + "type": "string", + "location": "query" }, "types": { "description": "Restricts returned entities with these types, e.g. Person\n(as defined in http://schema.org/Person). If multiple types are specified,\nreturned entities will contain one or more of these types.", + "type": "string", "repeated": true, + "location": "query" + }, + "indent": { + "type": "boolean", "location": "query", - "type": "string" + "description": "Enables indenting of json results." }, "languages": { + "location": "query", "description": "The list of language codes (defined in ISO 693) to run the query with,\ne.g. 'en'.", + "type": "string", + "repeated": true + }, + "ids": { + "description": "The list of entity id to be used for search instead of query string.\nTo specify multiple ids in the HTTP request, repeat the parameter in the\nURL as in ...?ids=A&ids=B", + "type": "string", "repeated": true, - "location": "query", - "type": "string" + "location": "query" } }, - "path": "v1/entities:search" + "flatPath": "v1/entities:search" } } } }, - "schemas": { - "SearchResponse": { - "description": "Response message includes the context and a list of matching results\nwhich contain the detail of associated entities.", - "type": "object", - "properties": { - "@context": { - "description": "The local context applicable for the response. See more details at\nhttp://www.w3.org/TR/json-ld/#context-definitions.", - "type": "any" - }, - "@type": { - "description": "The schema type of top-level JSON-LD object, e.g. ItemList.", - "type": "any" - }, - "itemListElement": { - "description": "The item list of search results.", - "type": "array", - "items": { - "type": "any" - } - } - }, - "id": "SearchResponse" - } - }, - "revision": "20170109", - "basePath": "", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "version_module": "True", - "discoveryVersion": "v1", - "baseUrl": "https://kgsearch.googleapis.com/", - "name": "kgsearch", "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", + "pp": { + "location": "query", + "description": "Pretty-print response.", "type": "boolean", - "location": "query" + "default": "true" }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "bearer_token": { + "description": "OAuth bearer token.", "type": "string", "location": "query" }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", "type": "string", "location": "query" }, - "pp": { - "description": "Pretty-print response.", - "default": "true", + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", "type": "boolean", - "location": "query" + "default": "true" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, "fields": { + "location": "query", "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" + "type": "string" }, - "alt": { - "description": "Data format for response.", + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "alt": { "enum": [ "json", "media", "proto" ], - "default": "json", + "type": "string", "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" + "location": "query", + "description": "Data format for response.", + "default": "json" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "quotaUser": { "type": "string", - "location": "query" + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." } }, - "documentationLink": "https://developers.google.com/knowledge-graph/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", + "schemas": { + "SearchResponse": { + "properties": { + "@context": { + "description": "The local context applicable for the response. See more details at\nhttp://www.w3.org/TR/json-ld/#context-definitions.", + "type": "any" + }, + "itemListElement": { + "description": "The item list of search results.", + "type": "array", + "items": { + "type": "any" + } + }, + "@type": { + "description": "The schema type of top-level JSON-LD object, e.g. ItemList.", + "type": "any" + } + }, + "id": "SearchResponse", + "description": "Response message includes the context and a list of matching results\nwhich contain the detail of associated entities.", + "type": "object" + } + }, + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "protocol": "rest", "version": "v1", + "baseUrl": "https://kgsearch.googleapis.com/", + "servicePath": "", + "description": "Searches the Google Knowledge Graph for entities.", + "kind": "discovery#restDescription", "rootUrl": "https://kgsearch.googleapis.com/", - "kind": "discovery#restDescription" + "basePath": "", + "ownerDomain": "google.com", + "name": "kgsearch", + "batchPath": "batch", + "id": "kgsearch:v1", + "documentationLink": "https://developers.google.com/knowledge-graph/", + "revision": "20170109", + "title": "Knowledge Graph Search API" } diff --git a/vendor/google.golang.org/api/kgsearch/v1/kgsearch-gen.go b/vendor/google.golang.org/api/kgsearch/v1/kgsearch-gen.go index 3af8e38ff..3dd03d6ac 100644 --- a/vendor/google.golang.org/api/kgsearch/v1/kgsearch-gen.go +++ b/vendor/google.golang.org/api/kgsearch/v1/kgsearch-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Entities *EntitiesService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewEntitiesService(s *Service) *EntitiesService { rs := &EntitiesService{s: s} return rs @@ -231,6 +236,7 @@ func (c *EntitiesSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/language/v1/language-api.json b/vendor/google.golang.org/api/language/v1/language-api.json index ec9601731..153284a44 100644 --- a/vendor/google.golang.org/api/language/v1/language-api.json +++ b/vendor/google.golang.org/api/language/v1/language-api.json @@ -1,390 +1,528 @@ { + "basePath": "", "id": "language:v1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "description": "Google Cloud Natural Language API provides natural language understanding technologies to developers. Examples include sentiment analysis, entity recognition, and text annotations.", - "protocol": "rest", - "title": "Google Cloud Natural Language API", - "resources": { - "documents": { - "methods": { - "analyzeSentiment": { - "id": "language.documents.analyzeSentiment", - "response": { - "$ref": "AnalyzeSentimentResponse" - }, - "parameterOrder": [], - "description": "Analyzes the sentiment of the provided text.", - "request": { - "$ref": "AnalyzeSentimentRequest" - }, - "flatPath": "v1/documents:analyzeSentiment", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/documents:analyzeSentiment", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "analyzeSyntax": { - "id": "language.documents.analyzeSyntax", - "response": { - "$ref": "AnalyzeSyntaxResponse" - }, - "parameterOrder": [], - "description": "Analyzes the syntax of the text and provides sentence boundaries and\ntokenization along with part of speech tags, dependency trees, and other\nproperties.", - "request": { - "$ref": "AnalyzeSyntaxRequest" - }, - "flatPath": "v1/documents:analyzeSyntax", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/documents:analyzeSyntax", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "annotateText": { - "id": "language.documents.annotateText", - "response": { - "$ref": "AnnotateTextResponse" - }, - "parameterOrder": [], - "description": "A convenience method that provides all the features that analyzeSentiment,\nanalyzeEntities, and analyzeSyntax provide in one call.", - "request": { - "$ref": "AnnotateTextRequest" - }, - "flatPath": "v1/documents:annotateText", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/documents:annotateText", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "analyzeEntities": { - "id": "language.documents.analyzeEntities", - "response": { - "$ref": "AnalyzeEntitiesResponse" - }, - "parameterOrder": [], - "description": "Finds named entities (currently finds proper names) in the text,\nentity types, salience, mentions for each entity, and other properties.", - "request": { - "$ref": "AnalyzeEntitiesRequest" - }, - "flatPath": "v1/documents:analyzeEntities", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/documents:analyzeEntities", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - } - } - }, + "revision": "20170206", + "documentationLink": "https://cloud.google.com/natural-language/", + "discoveryVersion": "v1", + "version_module": "True", "schemas": { - "Document": { - "description": "################################################################ #\n\nRepresents the input to API methods.", + "PartOfSpeech": { + "description": "Represents part of speech information for a token. Parts of speech\nare as defined in\nhttp://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf", "type": "object", "properties": { - "language": { - "description": "The language of the document (if not specified, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.\u003cbr\u003e\n**Current Language Restrictions:**\n\n * Only English, Spanish, and Japanese textual content are supported.\nIf the language (either specified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned.", + "proper": { + "enumDescriptions": [ + "Proper is not applicable in the analyzed language or is not predicted.", + "Proper", + "Not proper" + ], + "enum": [ + "PROPER_UNKNOWN", + "PROPER", + "NOT_PROPER" + ], + "description": "The grammatical properness.", "type": "string" }, - "gcsContentUri": { - "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nNOTE: Cloud Storage object versioning is not supported.", + "case": { + "enumDescriptions": [ + "Case is not applicable in the analyzed language or is not predicted.", + "Accusative", + "Adverbial", + "Complementive", + "Dative", + "Genitive", + "Instrumental", + "Locative", + "Nominative", + "Oblique", + "Partitive", + "Prepositional", + "Reflexive", + "Relative", + "Vocative" + ], + "enum": [ + "CASE_UNKNOWN", + "ACCUSATIVE", + "ADVERBIAL", + "COMPLEMENTIVE", + "DATIVE", + "GENITIVE", + "INSTRUMENTAL", + "LOCATIVE", + "NOMINATIVE", + "OBLIQUE", + "PARTITIVE", + "PREPOSITIONAL", + "REFLEXIVE_CASE", + "RELATIVE_CASE", + "VOCATIVE" + ], + "description": "The grammatical case.", "type": "string" }, - "type": { - "description": "Required. If the type is not set or is `TYPE_UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.", + "tense": { + "type": "string", + "enumDescriptions": [ + "Tense is not applicable in the analyzed language or is not predicted.", + "Conditional", + "Future", + "Past", + "Present", + "Imperfect", + "Pluperfect" + ], "enum": [ - "TYPE_UNSPECIFIED", - "PLAIN_TEXT", - "HTML" + "TENSE_UNKNOWN", + "CONDITIONAL_TENSE", + "FUTURE", + "PAST", + "PRESENT", + "IMPERFECT", + "PLUPERFECT" ], + "description": "The grammatical tense." + }, + "reciprocity": { + "description": "The grammatical reciprocity.", + "type": "string", "enumDescriptions": [ - "The content type is not specified.", - "Plain text", - "HTML" + "Reciprocity is not applicable in the analyzed language or is not\npredicted.", + "Reciprocal", + "Non-reciprocal" ], - "type": "string" + "enum": [ + "RECIPROCITY_UNKNOWN", + "RECIPROCAL", + "NON_RECIPROCAL" + ] }, - "content": { - "description": "The content of the input in string format.", + "form": { + "enumDescriptions": [ + "Form is not applicable in the analyzed language or is not predicted.", + "Adnomial", + "Auxiliary", + "Complementizer", + "Final ending", + "Gerund", + "Realis", + "Irrealis", + "Short form", + "Long form", + "Order form", + "Specific form" + ], + "enum": [ + "FORM_UNKNOWN", + "ADNOMIAL", + "AUXILIARY", + "COMPLEMENTIZER", + "FINAL_ENDING", + "GERUND", + "REALIS", + "IRREALIS", + "SHORT", + "LONG", + "ORDER", + "SPECIFIC" + ], + "description": "The grammatical form.", "type": "string" - } - }, - "id": "Document" - }, - "TextSpan": { - "description": "Represents an output piece of text.", - "type": "object", - "properties": { - "beginOffset": { - "description": "The API calculates the beginning offset of the content in the original\ndocument according to the EncodingType specified in the API request.", - "type": "integer", - "format": "int32" }, - "content": { - "description": "The content of the output text.", + "number": { + "enumDescriptions": [ + "Number is not applicable in the analyzed language or is not predicted.", + "Singular", + "Plural", + "Dual" + ], + "enum": [ + "NUMBER_UNKNOWN", + "SINGULAR", + "PLURAL", + "DUAL" + ], + "description": "The grammatical number.", "type": "string" - } - }, - "id": "TextSpan" - }, - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", - "type": "object", - "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", - "type": "array", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - } - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" - } - }, - "id": "Status" - }, - "AnalyzeEntitiesRequest": { - "description": "The entity analysis request message.", - "type": "object", - "properties": { - "document": { - "description": "Input document.", - "$ref": "Document" - }, - "encodingType": { - "description": "The encoding type used by the API to calculate offsets.", + "voice": { + "type": "string", + "enumDescriptions": [ + "Voice is not applicable in the analyzed language or is not predicted.", + "Active", + "Causative", + "Passive" + ], "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" + "VOICE_UNKNOWN", + "ACTIVE", + "CAUSATIVE", + "PASSIVE" ], + "description": "The grammatical voice." + }, + "aspect": { + "type": "string", "enumDescriptions": [ - "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." + "Aspect is not applicable in the analyzed language or is not predicted.", + "Perfective", + "Imperfective", + "Progressive" ], - "type": "string" - } - }, - "id": "AnalyzeEntitiesRequest" - }, - "EntityMention": { - "description": "Represents a mention for an entity in the text. Currently, proper noun\nmentions are supported.", - "type": "object", - "properties": { - "text": { - "description": "The mention text.", - "$ref": "TextSpan" + "enum": [ + "ASPECT_UNKNOWN", + "PERFECTIVE", + "IMPERFECTIVE", + "PROGRESSIVE" + ], + "description": "The grammatical aspect." }, - "type": { - "description": "The type of the entity mention.", + "mood": { + "enumDescriptions": [ + "Mood is not applicable in the analyzed language or is not predicted.", + "Conditional", + "Imperative", + "Indicative", + "Interrogative", + "Jussive", + "Subjunctive" + ], "enum": [ - "TYPE_UNKNOWN", - "PROPER", - "COMMON" + "MOOD_UNKNOWN", + "CONDITIONAL_MOOD", + "IMPERATIVE", + "INDICATIVE", + "INTERROGATIVE", + "JUSSIVE", + "SUBJUNCTIVE" ], + "description": "The grammatical mood.", + "type": "string" + }, + "tag": { + "description": "The part of speech tag.", + "type": "string", "enumDescriptions": [ "Unknown", - "Proper name", - "Common noun (or noun compound)" + "Adjective", + "Adposition (preposition and postposition)", + "Adverb", + "Conjunction", + "Determiner", + "Noun (common and proper)", + "Cardinal number", + "Pronoun", + "Particle or other function word", + "Punctuation", + "Verb (all tenses and modes)", + "Other: foreign words, typos, abbreviations", + "Affix" + ], + "enum": [ + "UNKNOWN", + "ADJ", + "ADP", + "ADV", + "CONJ", + "DET", + "NOUN", + "NUM", + "PRON", + "PRT", + "PUNCT", + "VERB", + "X", + "AFFIX" + ] + }, + "gender": { + "enumDescriptions": [ + "Gender is not applicable in the analyzed language or is not predicted.", + "Feminine", + "Masculine", + "Neuter" + ], + "enum": [ + "GENDER_UNKNOWN", + "FEMININE", + "MASCULINE", + "NEUTER" ], + "description": "The grammatical gender.", "type": "string" + }, + "person": { + "enum": [ + "PERSON_UNKNOWN", + "FIRST", + "SECOND", + "THIRD", + "REFLEXIVE_PERSON" + ], + "description": "The grammatical person.", + "type": "string", + "enumDescriptions": [ + "Person is not applicable in the analyzed language or is not predicted.", + "First", + "Second", + "Third", + "Reflexive" + ] } }, - "id": "EntityMention" + "id": "PartOfSpeech" }, - "AnalyzeSentimentRequest": { - "description": "The sentiment analysis request message.", + "AnalyzeSyntaxRequest": { + "id": "AnalyzeSyntaxRequest", + "description": "The syntax analysis request message.", "type": "object", "properties": { - "document": { - "description": "Input document. Currently, `analyzeSentiment` only supports English text\n(Document.language=\"EN\").", - "$ref": "Document" - }, "encodingType": { - "description": "The encoding type used by the API to calculate sentence offsets.", - "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" - ], "enumDescriptions": [ "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." ], + "enum": [ + "NONE", + "UTF8", + "UTF16", + "UTF32" + ], + "description": "The encoding type used by the API to calculate offsets.", "type": "string" + }, + "document": { + "$ref": "Document", + "description": "Input document." } - }, - "id": "AnalyzeSentimentRequest" + } }, "AnalyzeSentimentResponse": { "description": "The sentiment analysis response message.", "type": "object", "properties": { "documentSentiment": { - "description": "The overall sentiment of the input document.", - "$ref": "Sentiment" + "$ref": "Sentiment", + "description": "The overall sentiment of the input document." }, "language": { "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.", "type": "string" }, "sentences": { - "description": "The sentiment for all the sentences in the document.", "type": "array", "items": { "$ref": "Sentence" - } + }, + "description": "The sentiment for all the sentences in the document." } }, "id": "AnalyzeSentimentResponse" }, - "AnalyzeSyntaxRequest": { - "description": "The syntax analysis request message.", - "type": "object", + "AnalyzeEntitiesResponse": { "properties": { - "document": { - "description": "Input document.", - "$ref": "Document" - }, - "encodingType": { - "description": "The encoding type used by the API to calculate offsets.", - "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" - ], - "enumDescriptions": [ - "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." - ], + "language": { + "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.", "type": "string" + }, + "entities": { + "description": "The recognized entities in the input document.", + "type": "array", + "items": { + "$ref": "Entity" + } } }, - "id": "AnalyzeSyntaxRequest" + "id": "AnalyzeEntitiesResponse", + "description": "The entity analysis response message.", + "type": "object" }, - "DependencyEdge": { - "description": "Represents dependency parse tree information for a token. (For more\ninformation on dependency labels, see\nhttp://www.aclweb.org/anthology/P13-2017", + "Entity": { + "description": "Represents a phrase in the text that is a known entity, such as\na person, an organization, or location. The API associates information, such\nas salience and mentions, with entities.", "type": "object", "properties": { - "headTokenIndex": { - "description": "Represents the head of this token in the dependency tree.\nThis is the index of the token which has an arc going to this token.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is its own index.", - "type": "integer", - "format": "int32" - }, - "label": { - "description": "The parse label for the token.", + "type": { + "enumDescriptions": [ + "Unknown", + "Person", + "Location", + "Organization", + "Event", + "Work of art", + "Consumer goods", + "Other types" + ], "enum": [ "UNKNOWN", - "ABBREV", - "ACOMP", - "ADVCL", - "ADVMOD", - "AMOD", - "APPOS", - "ATTR", - "AUX", - "AUXPASS", - "CC", - "CCOMP", - "CONJ", - "CSUBJ", - "CSUBJPASS", - "DEP", - "DET", - "DISCOURSE", - "DOBJ", - "EXPL", - "GOESWITH", - "IOBJ", - "MARK", - "MWE", - "MWV", - "NEG", - "NN", - "NPADVMOD", - "NSUBJ", - "NSUBJPASS", - "NUM", - "NUMBER", - "P", - "PARATAXIS", - "PARTMOD", - "PCOMP", - "POBJ", - "POSS", - "POSTNEG", - "PRECOMP", - "PRECONJ", - "PREDET", - "PREF", - "PREP", - "PRONL", - "PRT", - "PS", - "QUANTMOD", - "RCMOD", - "RCMODREL", - "RDROP", - "REF", - "REMNANT", - "REPARANDUM", - "ROOT", - "SNUM", - "SUFF", - "TMOD", - "TOPIC", - "VMOD", - "VOCATIVE", - "XCOMP", - "SUFFIX", - "TITLE", - "ADVPHMOD", - "AUXCAUS", - "AUXVV", - "DTMOD", - "FOREIGN", - "KW", - "LIST", - "NOMC", - "NOMCSUBJ", - "NOMCSUBJPASS", - "NUMC", - "COP", - "DISLOCATED" + "PERSON", + "LOCATION", + "ORGANIZATION", + "EVENT", + "WORK_OF_ART", + "CONSUMER_GOOD", + "OTHER" + ], + "description": "The entity type.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata associated with the entity.\n\nCurrently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.", + "type": "object" + }, + "salience": { + "type": "number", + "description": "The salience score associated with the entity in the [0, 1.0] range.\n\nThe salience score for an entity provides information about the\nimportance or centrality of that entity to the entire document text.\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\nsalient.", + "format": "float" + }, + "mentions": { + "description": "The mentions of this entity in the input document. The API currently\nsupports proper noun mentions.", + "type": "array", + "items": { + "$ref": "EntityMention" + } + }, + "name": { + "description": "The representative name for the entity.", + "type": "string" + } + }, + "id": "Entity" + }, + "AnalyzeSyntaxResponse": { + "description": "The syntax analysis response message.", + "type": "object", + "properties": { + "language": { + "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.", + "type": "string" + }, + "sentences": { + "description": "Sentences in the input document.", + "type": "array", + "items": { + "$ref": "Sentence" + } + }, + "tokens": { + "description": "Tokens, along with their syntactic information, in the input document.", + "type": "array", + "items": { + "$ref": "Token" + } + } + }, + "id": "AnalyzeSyntaxResponse" + }, + "AnnotateTextRequest": { + "properties": { + "encodingType": { + "type": "string", + "enumDescriptions": [ + "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." + ], + "enum": [ + "NONE", + "UTF8", + "UTF16", + "UTF32" + ], + "description": "The encoding type used by the API to calculate offsets." + }, + "document": { + "description": "Input document.", + "$ref": "Document" + }, + "features": { + "description": "The enabled features.", + "$ref": "Features" + } + }, + "id": "AnnotateTextRequest", + "description": "The request message for the text annotation API, which can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.", + "type": "object" + }, + "AnnotateTextResponse": { + "description": "The text annotations response message.", + "type": "object", + "properties": { + "documentSentiment": { + "$ref": "Sentiment", + "description": "The overall sentiment for the document. Populated if the user enables\nAnnotateTextRequest.Features.extract_document_sentiment." + }, + "language": { + "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.", + "type": "string" + }, + "sentences": { + "description": "Sentences in the input document. Populated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", + "type": "array", + "items": { + "$ref": "Sentence" + } + }, + "tokens": { + "description": "Tokens, along with their syntactic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", + "type": "array", + "items": { + "$ref": "Token" + } + }, + "entities": { + "description": "Entities, along with their semantic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_entities.", + "type": "array", + "items": { + "$ref": "Entity" + } + } + }, + "id": "AnnotateTextResponse" + }, + "AnalyzeSentimentRequest": { + "type": "object", + "properties": { + "encodingType": { + "enumDescriptions": [ + "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." + ], + "enum": [ + "NONE", + "UTF8", + "UTF16", + "UTF32" ], + "description": "The encoding type used by the API to calculate sentence offsets.", + "type": "string" + }, + "document": { + "description": "Input document. Currently, `analyzeSentiment` only supports English text\n(Document.language=\"EN\").", + "$ref": "Document" + } + }, + "id": "AnalyzeSentimentRequest", + "description": "The sentiment analysis request message." + }, + "DependencyEdge": { + "properties": { + "headTokenIndex": { + "description": "Represents the head of this token in the dependency tree.\nThis is the index of the token which has an arc going to this token.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is its own index.", + "format": "int32", + "type": "integer" + }, + "label": { + "description": "The parse label for the token.", + "type": "string", "enumDescriptions": [ "Unknown", "Abbreviation modifier", @@ -464,622 +602,484 @@ "Copula", "Dislocated relation (for fronted/topicalized elements)" ], - "type": "string" - } - }, - "id": "DependencyEdge" - }, - "AnalyzeSyntaxResponse": { - "description": "The syntax analysis response message.", - "type": "object", - "properties": { - "language": { - "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.", - "type": "string" - }, - "tokens": { - "description": "Tokens, along with their syntactic information, in the input document.", - "type": "array", - "items": { - "$ref": "Token" - } - }, - "sentences": { - "description": "Sentences in the input document.", - "type": "array", - "items": { - "$ref": "Sentence" - } - } - }, - "id": "AnalyzeSyntaxResponse" - }, - "AnnotateTextRequest": { - "description": "The request message for the text annotation API, which can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.", - "type": "object", - "properties": { - "document": { - "description": "Input document.", - "$ref": "Document" - }, - "encodingType": { - "description": "The encoding type used by the API to calculate offsets.", "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" - ], - "enumDescriptions": [ - "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." - ], - "type": "string" - }, - "features": { - "description": "The enabled features.", - "$ref": "Features" + "UNKNOWN", + "ABBREV", + "ACOMP", + "ADVCL", + "ADVMOD", + "AMOD", + "APPOS", + "ATTR", + "AUX", + "AUXPASS", + "CC", + "CCOMP", + "CONJ", + "CSUBJ", + "CSUBJPASS", + "DEP", + "DET", + "DISCOURSE", + "DOBJ", + "EXPL", + "GOESWITH", + "IOBJ", + "MARK", + "MWE", + "MWV", + "NEG", + "NN", + "NPADVMOD", + "NSUBJ", + "NSUBJPASS", + "NUM", + "NUMBER", + "P", + "PARATAXIS", + "PARTMOD", + "PCOMP", + "POBJ", + "POSS", + "POSTNEG", + "PRECOMP", + "PRECONJ", + "PREDET", + "PREF", + "PREP", + "PRONL", + "PRT", + "PS", + "QUANTMOD", + "RCMOD", + "RCMODREL", + "RDROP", + "REF", + "REMNANT", + "REPARANDUM", + "ROOT", + "SNUM", + "SUFF", + "TMOD", + "TOPIC", + "VMOD", + "VOCATIVE", + "XCOMP", + "SUFFIX", + "TITLE", + "ADVPHMOD", + "AUXCAUS", + "AUXVV", + "DTMOD", + "FOREIGN", + "KW", + "LIST", + "NOMC", + "NOMCSUBJ", + "NOMCSUBJPASS", + "NUMC", + "COP", + "DISLOCATED" + ] } }, - "id": "AnnotateTextRequest" + "id": "DependencyEdge", + "description": "Represents dependency parse tree information for a token. (For more\ninformation on dependency labels, see\nhttp://www.aclweb.org/anthology/P13-2017", + "type": "object" }, - "Sentence": { - "description": "Represents a sentence in the input document.", + "Token": { + "description": "Represents the smallest syntactic building block of the text.", "type": "object", "properties": { "text": { - "description": "The sentence text.", - "$ref": "TextSpan" + "$ref": "TextSpan", + "description": "The token text." }, - "sentiment": { - "description": "For calls to AnalyzeSentiment or if\nAnnotateTextRequest.Features.extract_document_sentiment is set to\ntrue, this field will contain the sentiment for the sentence.", - "$ref": "Sentiment" - } - }, - "id": "Sentence" - }, - "Features": { - "description": "All available features for sentiment, syntax, and semantic analysis.\nSetting each one to true will enable that specific analysis for the input.", - "type": "object", - "properties": { - "extractDocumentSentiment": { - "description": "Extract document-level sentiment.", - "type": "boolean" + "dependencyEdge": { + "$ref": "DependencyEdge", + "description": "Dependency tree parse for this token." }, - "extractEntities": { - "description": "Extract entities.", - "type": "boolean" + "lemma": { + "description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.", + "type": "string" }, - "extractSyntax": { - "description": "Extract syntax information.", - "type": "boolean" + "partOfSpeech": { + "description": "Parts of speech tag for this token.", + "$ref": "PartOfSpeech" } }, - "id": "Features" + "id": "Token" }, - "PartOfSpeech": { - "description": "Represents part of speech information for a token. Parts of speech\nare as defined in\nhttp://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf", - "type": "object", - "properties": { - "aspect": { - "description": "The grammatical aspect.", - "enum": [ - "ASPECT_UNKNOWN", - "PERFECTIVE", - "IMPERFECTIVE", - "PROGRESSIVE" - ], - "enumDescriptions": [ - "Aspect is not applicable in the analyzed language or is not predicted.", - "Perfective", - "Imperfective", - "Progressive" - ], - "type": "string" - }, - "gender": { - "description": "The grammatical gender.", - "enum": [ - "GENDER_UNKNOWN", - "FEMININE", - "MASCULINE", - "NEUTER" - ], - "enumDescriptions": [ - "Gender is not applicable in the analyzed language or is not predicted.", - "Feminine", - "Masculine", - "Neuter" - ], - "type": "string" - }, - "person": { - "description": "The grammatical person.", - "enum": [ - "PERSON_UNKNOWN", - "FIRST", - "SECOND", - "THIRD", - "REFLEXIVE_PERSON" - ], - "enumDescriptions": [ - "Person is not applicable in the analyzed language or is not predicted.", - "First", - "Second", - "Third", - "Reflexive" - ], - "type": "string" - }, - "case": { - "description": "The grammatical case.", - "enum": [ - "CASE_UNKNOWN", - "ACCUSATIVE", - "ADVERBIAL", - "COMPLEMENTIVE", - "DATIVE", - "GENITIVE", - "INSTRUMENTAL", - "LOCATIVE", - "NOMINATIVE", - "OBLIQUE", - "PARTITIVE", - "PREPOSITIONAL", - "REFLEXIVE_CASE", - "RELATIVE_CASE", - "VOCATIVE" - ], - "enumDescriptions": [ - "Case is not applicable in the analyzed language or is not predicted.", - "Accusative", - "Adverbial", - "Complementive", - "Dative", - "Genitive", - "Instrumental", - "Locative", - "Nominative", - "Oblique", - "Partitive", - "Prepositional", - "Reflexive", - "Relative", - "Vocative" - ], - "type": "string" - }, - "form": { - "description": "The grammatical form.", - "enum": [ - "FORM_UNKNOWN", - "ADNOMIAL", - "AUXILIARY", - "COMPLEMENTIZER", - "FINAL_ENDING", - "GERUND", - "REALIS", - "IRREALIS", - "SHORT", - "LONG", - "ORDER", - "SPECIFIC" - ], - "enumDescriptions": [ - "Form is not applicable in the analyzed language or is not predicted.", - "Adnomial", - "Auxiliary", - "Complementizer", - "Final ending", - "Gerund", - "Realis", - "Irrealis", - "Short form", - "Long form", - "Order form", - "Specific form" - ], - "type": "string" - }, - "tense": { - "description": "The grammatical tense.", - "enum": [ - "TENSE_UNKNOWN", - "CONDITIONAL_TENSE", - "FUTURE", - "PAST", - "PRESENT", - "IMPERFECT", - "PLUPERFECT" - ], - "enumDescriptions": [ - "Tense is not applicable in the analyzed language or is not predicted.", - "Conditional", - "Future", - "Past", - "Present", - "Imperfect", - "Pluperfect" - ], - "type": "string" - }, - "proper": { - "description": "The grammatical properness.", - "enum": [ - "PROPER_UNKNOWN", - "PROPER", - "NOT_PROPER" - ], - "enumDescriptions": [ - "Proper is not applicable in the analyzed language or is not predicted.", - "Proper", - "Not proper" - ], + "TextSpan": { + "id": "TextSpan", + "description": "Represents an output piece of text.", + "type": "object", + "properties": { + "beginOffset": { + "description": "The API calculates the beginning offset of the content in the original\ndocument according to the EncodingType specified in the API request.", + "format": "int32", + "type": "integer" + }, + "content": { + "description": "The content of the output text.", "type": "string" + } + } + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" }, - "mood": { - "description": "The grammatical mood.", - "enum": [ - "MOOD_UNKNOWN", - "CONDITIONAL_MOOD", - "IMPERATIVE", - "INDICATIVE", - "INTERROGATIVE", - "JUSSIVE", - "SUBJUNCTIVE" - ], - "enumDescriptions": [ - "Mood is not applicable in the analyzed language or is not predicted.", - "Conditional", - "Imperative", - "Indicative", - "Interrogative", - "Jussive", - "Subjunctive" - ], + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", "type": "string" }, - "tag": { - "description": "The part of speech tag.", - "enum": [ - "UNKNOWN", - "ADJ", - "ADP", - "ADV", - "CONJ", - "DET", - "NOUN", - "NUM", - "PRON", - "PRT", - "PUNCT", - "VERB", - "X", - "AFFIX" - ], + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } + } + }, + "id": "Status" + }, + "EntityMention": { + "properties": { + "text": { + "$ref": "TextSpan", + "description": "The mention text." + }, + "type": { + "type": "string", "enumDescriptions": [ "Unknown", - "Adjective", - "Adposition (preposition and postposition)", - "Adverb", - "Conjunction", - "Determiner", - "Noun (common and proper)", - "Cardinal number", - "Pronoun", - "Particle or other function word", - "Punctuation", - "Verb (all tenses and modes)", - "Other: foreign words, typos, abbreviations", - "Affix" + "Proper name", + "Common noun (or noun compound)" ], - "type": "string" - }, - "number": { - "description": "The grammatical number.", "enum": [ - "NUMBER_UNKNOWN", - "SINGULAR", - "PLURAL", - "DUAL" + "TYPE_UNKNOWN", + "PROPER", + "COMMON" ], + "description": "The type of the entity mention." + } + }, + "id": "EntityMention", + "description": "Represents a mention for an entity in the text. Currently, proper noun\nmentions are supported.", + "type": "object" + }, + "Features": { + "properties": { + "extractEntities": { + "description": "Extract entities.", + "type": "boolean" + }, + "extractSyntax": { + "description": "Extract syntax information.", + "type": "boolean" + }, + "extractDocumentSentiment": { + "description": "Extract document-level sentiment.", + "type": "boolean" + } + }, + "id": "Features", + "description": "All available features for sentiment, syntax, and semantic analysis.\nSetting each one to true will enable that specific analysis for the input.", + "type": "object" + }, + "Document": { + "type": "object", + "properties": { + "language": { + "type": "string", + "description": "The language of the document (if not specified, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.\u003cbr\u003e\n**Current Language Restrictions:**\n\n * Only English, Spanish, and Japanese textual content are supported.\nIf the language (either specified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned." + }, + "type": { "enumDescriptions": [ - "Number is not applicable in the analyzed language or is not predicted.", - "Singular", - "Plural", - "Dual" + "The content type is not specified.", + "Plain text", + "HTML" ], - "type": "string" - }, - "reciprocity": { - "description": "The grammatical reciprocity.", "enum": [ - "RECIPROCITY_UNKNOWN", - "RECIPROCAL", - "NON_RECIPROCAL" - ], - "enumDescriptions": [ - "Reciprocity is not applicable in the analyzed language or is not\npredicted.", - "Reciprocal", - "Non-reciprocal" + "TYPE_UNSPECIFIED", + "PLAIN_TEXT", + "HTML" ], + "description": "Required. If the type is not set or is `TYPE_UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.", "type": "string" }, - "voice": { - "description": "The grammatical voice.", - "enum": [ - "VOICE_UNKNOWN", - "ACTIVE", - "CAUSATIVE", - "PASSIVE" - ], - "enumDescriptions": [ - "Voice is not applicable in the analyzed language or is not predicted.", - "Active", - "Causative", - "Passive" - ], + "content": { + "description": "The content of the input in string format.", "type": "string" + }, + "gcsContentUri": { + "type": "string", + "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nNOTE: Cloud Storage object versioning is not supported." } }, - "id": "PartOfSpeech" + "id": "Document", + "description": "################################################################ #\n\nRepresents the input to API methods." }, - "AnnotateTextResponse": { - "description": "The text annotations response message.", - "type": "object", + "Sentence": { "properties": { - "entities": { - "description": "Entities, along with their semantic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_entities.", - "type": "array", - "items": { - "$ref": "Entity" - } + "text": { + "$ref": "TextSpan", + "description": "The sentence text." }, - "documentSentiment": { - "description": "The overall sentiment for the document. Populated if the user enables\nAnnotateTextRequest.Features.extract_document_sentiment.", + "sentiment": { + "description": "For calls to AnalyzeSentiment or if\nAnnotateTextRequest.Features.extract_document_sentiment is set to\ntrue, this field will contain the sentiment for the sentence.", "$ref": "Sentiment" - }, - "language": { - "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.", - "type": "string" - }, - "tokens": { - "description": "Tokens, along with their syntactic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", - "type": "array", - "items": { - "$ref": "Token" - } - }, - "sentences": { - "description": "Sentences in the input document. Populated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", - "type": "array", - "items": { - "$ref": "Sentence" - } } }, - "id": "AnnotateTextResponse" + "id": "Sentence", + "description": "Represents a sentence in the input document.", + "type": "object" }, - "Entity": { - "description": "Represents a phrase in the text that is a known entity, such as\na person, an organization, or location. The API associates information, such\nas salience and mentions, with entities.", + "AnalyzeEntitiesRequest": { + "description": "The entity analysis request message.", "type": "object", "properties": { - "metadata": { - "description": "Metadata associated with the entity.\n\nCurrently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.", - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - "salience": { - "description": "The salience score associated with the entity in the [0, 1.0] range.\n\nThe salience score for an entity provides information about the\nimportance or centrality of that entity to the entire document text.\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\nsalient.", - "type": "number", - "format": "float" - }, - "type": { - "description": "The entity type.", + "encodingType": { "enum": [ - "UNKNOWN", - "PERSON", - "LOCATION", - "ORGANIZATION", - "EVENT", - "WORK_OF_ART", - "CONSUMER_GOOD", - "OTHER" + "NONE", + "UTF8", + "UTF16", + "UTF32" ], + "description": "The encoding type used by the API to calculate offsets.", + "type": "string", "enumDescriptions": [ - "Unknown", - "Person", - "Location", - "Organization", - "Event", - "Work of art", - "Consumer goods", - "Other types" - ], - "type": "string" - }, - "mentions": { - "description": "The mentions of this entity in the input document. The API currently\nsupports proper noun mentions.", - "type": "array", - "items": { - "$ref": "EntityMention" - } + "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." + ] }, - "name": { - "description": "The representative name for the entity.", - "type": "string" + "document": { + "$ref": "Document", + "description": "Input document." } }, - "id": "Entity" + "id": "AnalyzeEntitiesRequest" }, "Sentiment": { - "description": "Represents the feeling associated with the entire text or entities in\nthe text.", "type": "object", "properties": { "score": { "description": "Sentiment score between -1.0 (negative sentiment) and 1.0\n(positive sentiment).", - "type": "number", - "format": "float" + "format": "float", + "type": "number" }, "magnitude": { "description": "A non-negative number in the [0, +inf) range, which represents\nthe absolute magnitude of sentiment regardless of score (positive or\nnegative).", - "type": "number", - "format": "float" - } - }, - "id": "Sentiment" - }, - "Token": { - "description": "Represents the smallest syntactic building block of the text.", - "type": "object", - "properties": { - "text": { - "description": "The token text.", - "$ref": "TextSpan" - }, - "partOfSpeech": { - "description": "Parts of speech tag for this token.", - "$ref": "PartOfSpeech" - }, - "dependencyEdge": { - "description": "Dependency tree parse for this token.", - "$ref": "DependencyEdge" - }, - "lemma": { - "description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.", - "type": "string" - } - }, - "id": "Token" - }, - "AnalyzeEntitiesResponse": { - "description": "The entity analysis response message.", - "type": "object", - "properties": { - "entities": { - "description": "The recognized entities in the input document.", - "type": "array", - "items": { - "$ref": "Entity" - } - }, - "language": { - "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.", - "type": "string" + "format": "float", + "type": "number" } }, - "id": "AnalyzeEntitiesResponse" + "id": "Sentiment", + "description": "Represents the feeling associated with the entire text or entities in\nthe text." } }, - "revision": "20170103", - "basePath": "", + "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version_module": "True", "canonicalName": "Cloud Natural Language", - "discoveryVersion": "v1", - "baseUrl": "https://language.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://language.googleapis.com/", + "ownerDomain": "google.com", "name": "language", + "batchPath": "batch", + "title": "Google Cloud Natural Language API", + "ownerName": "Google", + "resources": { + "documents": { + "methods": { + "analyzeSyntax": { + "flatPath": "v1/documents:analyzeSyntax", + "id": "language.documents.analyzeSyntax", + "path": "v1/documents:analyzeSyntax", + "request": { + "$ref": "AnalyzeSyntaxRequest" + }, + "description": "Analyzes the syntax of the text and provides sentence boundaries and\ntokenization along with part of speech tags, dependency trees, and other\nproperties.", + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "AnalyzeSyntaxResponse" + }, + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "analyzeSentiment": { + "flatPath": "v1/documents:analyzeSentiment", + "id": "language.documents.analyzeSentiment", + "path": "v1/documents:analyzeSentiment", + "request": { + "$ref": "AnalyzeSentimentRequest" + }, + "description": "Analyzes the sentiment of the provided text.", + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "AnalyzeSentimentResponse" + }, + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "annotateText": { + "request": { + "$ref": "AnnotateTextRequest" + }, + "description": "A convenience method that provides all the features that analyzeSentiment,\nanalyzeEntities, and analyzeSyntax provide in one call.", + "response": { + "$ref": "AnnotateTextResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/documents:annotateText", + "path": "v1/documents:annotateText", + "id": "language.documents.annotateText" + }, + "analyzeEntities": { + "request": { + "$ref": "AnalyzeEntitiesRequest" + }, + "description": "Finds named entities (currently finds proper names) in the text,\nentity types, salience, mentions for each entity, and other properties.", + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "AnalyzeEntitiesResponse" + }, + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/documents:analyzeEntities", + "id": "language.documents.analyzeEntities", + "path": "v1/documents:analyzeEntities" + } + } + } + }, "parameters": { - "access_token": { - "description": "OAuth access token.", + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", "location": "query" }, "prettyPrint": { + "location": "query", "description": "Returns response with indentations and line breaks.", - "default": "true", "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" + "default": "true" }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, "fields": { "description": "Selector specifying which fields to include in a partial response.", "type": "string", "location": "query" }, - "alt": { - "description": "Data format for response.", + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], "location": "query", "enum": [ - "json", - "media", - "proto" + "1", + "2" ], - "default": "json", + "description": "V1 error format.", + "type": "string" + }, + "callback": { + "type": "string", + "location": "query", + "description": "JSONP" + }, + "alt": { "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", + "location": "query", + "description": "Data format for response.", + "default": "json", "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" + "json", + "media", + "proto" ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" + "type": "string" }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" }, - "bearer_token": { - "description": "OAuth bearer token.", + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", "type": "string", "location": "query" }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "pp": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Pretty-print response." } }, - "documentationLink": "https://cloud.google.com/natural-language/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", "version": "v1", - "rootUrl": "https://language.googleapis.com/", - "kind": "discovery#restDescription" + "baseUrl": "https://language.googleapis.com/", + "kind": "discovery#restDescription", + "description": "Google Cloud Natural Language API provides natural language understanding technologies to developers. Examples include sentiment analysis, entity recognition, and text annotations.", + "servicePath": "" } diff --git a/vendor/google.golang.org/api/language/v1/language-gen.go b/vendor/google.golang.org/api/language/v1/language-gen.go index ad601b08b..1492f94e7 100644 --- a/vendor/google.golang.org/api/language/v1/language-gen.go +++ b/vendor/google.golang.org/api/language/v1/language-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Documents *DocumentsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDocumentsService(s *Service) *DocumentsService { rs := &DocumentsService{s: s} return rs @@ -1351,6 +1356,7 @@ func (c *DocumentsAnalyzeEntitiesCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzeentitiesrequest) if err != nil { @@ -1471,6 +1477,7 @@ func (c *DocumentsAnalyzeSentimentCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzesentimentrequest) if err != nil { @@ -1595,6 +1602,7 @@ func (c *DocumentsAnalyzeSyntaxCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzesyntaxrequest) if err != nil { @@ -1717,6 +1725,7 @@ func (c *DocumentsAnnotateTextCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotatetextrequest) if err != nil { diff --git a/vendor/google.golang.org/api/language/v1beta1/language-api.json b/vendor/google.golang.org/api/language/v1beta1/language-api.json index 8a2847590..d11058741 100644 --- a/vendor/google.golang.org/api/language/v1beta1/language-api.json +++ b/vendor/google.golang.org/api/language/v1beta1/language-api.json @@ -1,153 +1,200 @@ { - "id": "language:v1beta1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "description": "Google Cloud Natural Language API provides natural language understanding technologies to developers. Examples include sentiment analysis, entity recognition, and text annotations.", - "protocol": "rest", + "rootUrl": "https://language.googleapis.com/", + "ownerDomain": "google.com", + "name": "language", + "batchPath": "batch", "title": "Google Cloud Natural Language API", + "ownerName": "Google", "resources": { "documents": { "methods": { - "analyzeSentiment": { - "id": "language.documents.analyzeSentiment", - "response": { - "$ref": "AnalyzeSentimentResponse" - }, - "parameterOrder": [], - "description": "Analyzes the sentiment of the provided text.", - "request": { - "$ref": "AnalyzeSentimentRequest" - }, - "flatPath": "v1beta1/documents:analyzeSentiment", - "httpMethod": "POST", + "analyzeEntities": { "parameters": {}, - "path": "v1beta1/documents:analyzeSentiment", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "analyzeSyntax": { - "id": "language.documents.analyzeSyntax", + ], + "flatPath": "v1beta1/documents:analyzeEntities", + "path": "v1beta1/documents:analyzeEntities", + "id": "language.documents.analyzeEntities", + "request": { + "$ref": "AnalyzeEntitiesRequest" + }, + "description": "Finds named entities (currently finds proper names) in the text,\nentity types, salience, mentions for each entity, and other properties.", "response": { - "$ref": "AnalyzeSyntaxResponse" + "$ref": "AnalyzeEntitiesResponse" }, "parameterOrder": [], + "httpMethod": "POST" + }, + "analyzeSyntax": { "description": "Analyzes the syntax of the text and provides sentence boundaries and\ntokenization along with part of speech tags, dependency trees, and other\nproperties.", "request": { "$ref": "AnalyzeSyntaxRequest" }, - "flatPath": "v1beta1/documents:analyzeSyntax", + "response": { + "$ref": "AnalyzeSyntaxResponse" + }, + "parameterOrder": [], "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": {}, + "flatPath": "v1beta1/documents:analyzeSyntax", "path": "v1beta1/documents:analyzeSyntax", + "id": "language.documents.analyzeSyntax" + }, + "analyzeSentiment": { + "path": "v1beta1/documents:analyzeSentiment", + "id": "language.documents.analyzeSentiment", + "description": "Analyzes the sentiment of the provided text.", + "request": { + "$ref": "AnalyzeSentimentRequest" + }, + "response": { + "$ref": "AnalyzeSentimentResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] + ], + "parameters": {}, + "flatPath": "v1beta1/documents:analyzeSentiment" }, "annotateText": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": {}, + "flatPath": "v1beta1/documents:annotateText", + "path": "v1beta1/documents:annotateText", "id": "language.documents.annotateText", - "response": { - "$ref": "AnnotateTextResponse" - }, - "parameterOrder": [], "description": "A convenience method that provides all the features that analyzeSentiment,\nanalyzeEntities, and analyzeSyntax provide in one call.", "request": { "$ref": "AnnotateTextRequest" }, - "flatPath": "v1beta1/documents:annotateText", - "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1/documents:annotateText", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "analyzeEntities": { - "id": "language.documents.analyzeEntities", "response": { - "$ref": "AnalyzeEntitiesResponse" + "$ref": "AnnotateTextResponse" }, "parameterOrder": [], - "description": "Finds named entities (currently finds proper names) in the text,\nentity types, salience, mentions for each entity, and other properties.", - "request": { - "$ref": "AnalyzeEntitiesRequest" - }, - "flatPath": "v1beta1/documents:analyzeEntities", - "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1/documents:analyzeEntities", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "httpMethod": "POST" } } } }, - "schemas": { - "Document": { - "description": "################################################################ #\n\nRepresents the input to API methods.", - "type": "object", - "properties": { - "language": { - "description": "The language of the document (if not specified, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.\u003cbr\u003e\n**Current Language Restrictions:**\n\n * Only English, Spanish, and Japanese textual content are supported.\nIf the language (either specified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned.", - "type": "string" - }, - "gcsContentUri": { - "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nNOTE: Cloud Storage object versioning is not supported.", - "type": "string" - }, - "type": { - "description": "Required. If the type is not set or is `TYPE_UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.", - "enum": [ - "TYPE_UNSPECIFIED", - "PLAIN_TEXT", - "HTML" - ], - "enumDescriptions": [ - "The content type is not specified.", - "Plain text", - "HTML" - ], - "type": "string" - }, - "content": { - "description": "The content of the input in string format.", - "type": "string" - } - }, - "id": "Document" + "parameters": { + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" }, - "TextSpan": { - "description": "Represents an output piece of text.", - "type": "object", - "properties": { - "beginOffset": { - "description": "The API calculates the beginning offset of the content in the original\ndocument according to the EncodingType specified in the API request.", - "type": "integer", - "format": "int32" - }, - "content": { - "description": "The content of the output text.", - "type": "string" - } - }, - "id": "TextSpan" + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "alt": { + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" }, + "oauth_token": { + "type": "string", + "location": "query", + "description": "OAuth 2.0 token for the current user." + } + }, + "version": "v1beta1", + "baseUrl": "https://language.googleapis.com/", + "servicePath": "", + "description": "Google Cloud Natural Language API provides natural language understanding technologies to developers. Examples include sentiment analysis, entity recognition, and text annotations.", + "kind": "discovery#restDescription", + "basePath": "", + "revision": "20170206", + "id": "language:v1beta1", + "documentationLink": "https://cloud.google.com/natural-language/", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { "Status": { "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", "type": "object", "properties": { "code": { "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" }, "details": { "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", @@ -159,479 +206,176 @@ }, "type": "object" } - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" } }, "id": "Status" }, - "AnalyzeEntitiesRequest": { - "description": "The entity analysis request message.", - "type": "object", + "Features": { "properties": { - "document": { - "description": "Input document.", - "$ref": "Document" + "extractEntities": { + "description": "Extract entities.", + "type": "boolean" }, - "encodingType": { - "description": "The encoding type used by the API to calculate offsets.", - "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" - ], - "enumDescriptions": [ - "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." - ], - "type": "string" + "extractSyntax": { + "description": "Extract syntax information.", + "type": "boolean" + }, + "extractDocumentSentiment": { + "description": "Extract document-level sentiment.", + "type": "boolean" } }, - "id": "AnalyzeEntitiesRequest" + "id": "Features", + "description": "All available features for sentiment, syntax, and semantic analysis.\nSetting each one to true will enable that specific analysis for the input.", + "type": "object" }, "EntityMention": { - "description": "Represents a mention for an entity in the text. Currently, proper noun\nmentions are supported.", - "type": "object", "properties": { "text": { - "description": "The mention text.", - "$ref": "TextSpan" + "$ref": "TextSpan", + "description": "The mention text." }, "type": { - "description": "The type of the entity mention.", - "enum": [ - "TYPE_UNKNOWN", - "PROPER", - "COMMON" - ], "enumDescriptions": [ "Unknown", "Proper name", "Common noun (or noun compound)" ], + "enum": [ + "TYPE_UNKNOWN", + "PROPER", + "COMMON" + ], + "description": "The type of the entity mention.", "type": "string" } }, - "id": "EntityMention" + "id": "EntityMention", + "description": "Represents a mention for an entity in the text. Currently, proper noun\nmentions are supported.", + "type": "object" }, - "AnalyzeSentimentRequest": { - "description": "The sentiment analysis request message.", - "type": "object", + "Sentence": { "properties": { - "document": { - "description": "Input document. Currently, `analyzeSentiment` only supports English text\n(Document.language=\"EN\").", - "$ref": "Document" + "text": { + "$ref": "TextSpan", + "description": "The sentence text." }, - "encodingType": { - "description": "The encoding type used by the API to calculate sentence offsets for the\nsentence sentiment.", - "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" - ], - "enumDescriptions": [ - "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." - ], - "type": "string" + "sentiment": { + "description": "For calls to AnalyzeSentiment or if\nAnnotateTextRequest.Features.extract_document_sentiment is set to\ntrue, this field will contain the sentiment for the sentence.", + "$ref": "Sentiment" } }, - "id": "AnalyzeSentimentRequest" + "id": "Sentence", + "description": "Represents a sentence in the input document.", + "type": "object" }, - "AnalyzeSentimentResponse": { - "description": "The sentiment analysis response message.", + "Document": { + "description": "################################################################ #\n\nRepresents the input to API methods.", "type": "object", "properties": { - "documentSentiment": { - "description": "The overall sentiment of the input document.", - "$ref": "Sentiment" + "gcsContentUri": { + "type": "string", + "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nNOTE: Cloud Storage object versioning is not supported." }, "language": { - "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.", + "description": "The language of the document (if not specified, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.\u003cbr\u003e\n**Current Language Restrictions:**\n\n * Only English, Spanish, and Japanese textual content are supported.\nIf the language (either specified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned.", "type": "string" }, - "sentences": { - "description": "The sentiment for all the sentences in the document.", - "type": "array", - "items": { - "$ref": "Sentence" - } + "type": { + "description": "Required. If the type is not set or is `TYPE_UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.", + "type": "string", + "enumDescriptions": [ + "The content type is not specified.", + "Plain text", + "HTML" + ], + "enum": [ + "TYPE_UNSPECIFIED", + "PLAIN_TEXT", + "HTML" + ] + }, + "content": { + "description": "The content of the input in string format.", + "type": "string" } }, - "id": "AnalyzeSentimentResponse" + "id": "Document" }, - "AnalyzeSyntaxRequest": { - "description": "The syntax analysis request message.", + "AnalyzeEntitiesRequest": { + "id": "AnalyzeEntitiesRequest", + "description": "The entity analysis request message.", "type": "object", "properties": { "document": { - "description": "Input document.", - "$ref": "Document" + "$ref": "Document", + "description": "Input document." }, "encodingType": { - "description": "The encoding type used by the API to calculate offsets.", - "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" - ], "enumDescriptions": [ "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." ], + "enum": [ + "NONE", + "UTF8", + "UTF16", + "UTF32" + ], + "description": "The encoding type used by the API to calculate offsets.", "type": "string" } - }, - "id": "AnalyzeSyntaxRequest" + } }, - "DependencyEdge": { - "description": "Represents dependency parse tree information for a token.", + "Sentiment": { + "description": "Represents the feeling associated with the entire text or entities in\nthe text.", "type": "object", "properties": { - "headTokenIndex": { - "description": "Represents the head of this token in the dependency tree.\nThis is the index of the token which has an arc going to this token.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is its own index.", - "type": "integer", - "format": "int32" + "polarity": { + "description": "DEPRECATED FIELD - This field is being deprecated in\nfavor of score. Please refer to our documentation at\nhttps://cloud.google.com/natural-language/docs for more information.", + "format": "float", + "type": "number" }, - "label": { - "description": "The parse label for the token.", + "score": { + "description": "Sentiment score between -1.0 (negative sentiment) and 1.0\n(positive sentiment).", + "format": "float", + "type": "number" + }, + "magnitude": { + "type": "number", + "description": "A non-negative number in the [0, +inf) range, which represents\nthe absolute magnitude of sentiment regardless of score (positive or\nnegative).", + "format": "float" + } + }, + "id": "Sentiment" + }, + "PartOfSpeech": { + "id": "PartOfSpeech", + "description": "Represents part of speech information for a token.", + "type": "object", + "properties": { + "case": { "enum": [ - "UNKNOWN", - "ABBREV", - "ACOMP", - "ADVCL", - "ADVMOD", - "AMOD", - "APPOS", - "ATTR", - "AUX", - "AUXPASS", - "CC", - "CCOMP", - "CONJ", - "CSUBJ", - "CSUBJPASS", - "DEP", - "DET", - "DISCOURSE", - "DOBJ", - "EXPL", - "GOESWITH", - "IOBJ", - "MARK", - "MWE", - "MWV", - "NEG", - "NN", - "NPADVMOD", - "NSUBJ", - "NSUBJPASS", - "NUM", - "NUMBER", - "P", - "PARATAXIS", - "PARTMOD", - "PCOMP", - "POBJ", - "POSS", - "POSTNEG", - "PRECOMP", - "PRECONJ", - "PREDET", - "PREF", - "PREP", - "PRONL", - "PRT", - "PS", - "QUANTMOD", - "RCMOD", - "RCMODREL", - "RDROP", - "REF", - "REMNANT", - "REPARANDUM", - "ROOT", - "SNUM", - "SUFF", - "TMOD", - "TOPIC", - "VMOD", - "VOCATIVE", - "XCOMP", - "SUFFIX", - "TITLE", - "ADVPHMOD", - "AUXCAUS", - "AUXVV", - "DTMOD", - "FOREIGN", - "KW", - "LIST", - "NOMC", - "NOMCSUBJ", - "NOMCSUBJPASS", - "NUMC", - "COP", - "DISLOCATED" - ], - "enumDescriptions": [ - "Unknown", - "Abbreviation modifier", - "Adjectival complement", - "Adverbial clause modifier", - "Adverbial modifier", - "Adjectival modifier of an NP", - "Appositional modifier of an NP", - "Attribute dependent of a copular verb", - "Auxiliary (non-main) verb", - "Passive auxiliary", - "Coordinating conjunction", - "Clausal complement of a verb or adjective", - "Conjunct", - "Clausal subject", - "Clausal passive subject", - "Dependency (unable to determine)", - "Determiner", - "Discourse", - "Direct object", - "Expletive", - "Goes with (part of a word in a text not well edited)", - "Indirect object", - "Marker (word introducing a subordinate clause)", - "Multi-word expression", - "Multi-word verbal expression", - "Negation modifier", - "Noun compound modifier", - "Noun phrase used as an adverbial modifier", - "Nominal subject", - "Passive nominal subject", - "Numeric modifier of a noun", - "Element of compound number", - "Punctuation mark", - "Parataxis relation", - "Participial modifier", - "The complement of a preposition is a clause", - "Object of a preposition", - "Possession modifier", - "Postverbal negative particle", - "Predicate complement", - "Preconjunt", - "Predeterminer", - "Prefix", - "Prepositional modifier", - "The relationship between a verb and verbal morpheme", - "Particle", - "Associative or possessive marker", - "Quantifier phrase modifier", - "Relative clause modifier", - "Complementizer in relative clause", - "Ellipsis without a preceding predicate", - "Referent", - "Remnant", - "Reparandum", - "Root", - "Suffix specifying a unit of number", - "Suffix", - "Temporal modifier", - "Topic marker", - "Clause headed by an infinite form of the verb that modifies a noun", - "Vocative", - "Open clausal complement", - "Name suffix", - "Name title", - "Adverbial phrase modifier", - "Causative auxiliary", - "Helper auxiliary", - "Rentaishi (Prenominal modifier)", - "Foreign words", - "Keyword", - "List for chains of comparable items", - "Nominalized clause", - "Nominalized clausal subject", - "Nominalized clausal passive", - "Compound of numeric modifier", - "Copula", - "Dislocated relation (for fronted/topicalized elements)" - ], - "type": "string" - } - }, - "id": "DependencyEdge" - }, - "AnalyzeSyntaxResponse": { - "description": "The syntax analysis response message.", - "type": "object", - "properties": { - "language": { - "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee Document.language field for more details.", - "type": "string" - }, - "tokens": { - "description": "Tokens, along with their syntactic information, in the input document.", - "type": "array", - "items": { - "$ref": "Token" - } - }, - "sentences": { - "description": "Sentences in the input document.", - "type": "array", - "items": { - "$ref": "Sentence" - } - } - }, - "id": "AnalyzeSyntaxResponse" - }, - "AnnotateTextRequest": { - "description": "The request message for the text annotation API, which can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.", - "type": "object", - "properties": { - "document": { - "description": "Input document.", - "$ref": "Document" - }, - "encodingType": { - "description": "The encoding type used by the API to calculate offsets.", - "enum": [ - "NONE", - "UTF8", - "UTF16", - "UTF32" - ], - "enumDescriptions": [ - "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", - "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." - ], - "type": "string" - }, - "features": { - "description": "The enabled features.", - "$ref": "Features" - } - }, - "id": "AnnotateTextRequest" - }, - "Sentence": { - "description": "Represents a sentence in the input document.", - "type": "object", - "properties": { - "text": { - "description": "The sentence text.", - "$ref": "TextSpan" - }, - "sentiment": { - "description": "For calls to AnalyzeSentiment or if\nAnnotateTextRequest.Features.extract_document_sentiment is set to\ntrue, this field will contain the sentiment for the sentence.", - "$ref": "Sentiment" - } - }, - "id": "Sentence" - }, - "Features": { - "description": "All available features for sentiment, syntax, and semantic analysis.\nSetting each one to true will enable that specific analysis for the input.", - "type": "object", - "properties": { - "extractDocumentSentiment": { - "description": "Extract document-level sentiment.", - "type": "boolean" - }, - "extractEntities": { - "description": "Extract entities.", - "type": "boolean" - }, - "extractSyntax": { - "description": "Extract syntax information.", - "type": "boolean" - } - }, - "id": "Features" - }, - "PartOfSpeech": { - "description": "Represents part of speech information for a token.", - "type": "object", - "properties": { - "aspect": { - "description": "The grammatical aspect.", - "enum": [ - "ASPECT_UNKNOWN", - "PERFECTIVE", - "IMPERFECTIVE", - "PROGRESSIVE" - ], - "enumDescriptions": [ - "Aspect is not applicable in the analyzed language or is not predicted.", - "Perfective", - "Imperfective", - "Progressive" - ], - "type": "string" - }, - "gender": { - "description": "The grammatical gender.", - "enum": [ - "GENDER_UNKNOWN", - "FEMININE", - "MASCULINE", - "NEUTER" - ], - "enumDescriptions": [ - "Gender is not applicable in the analyzed language or is not predicted.", - "Feminine", - "Masculine", - "Neuter" - ], - "type": "string" - }, - "person": { - "description": "The grammatical person.", - "enum": [ - "PERSON_UNKNOWN", - "FIRST", - "SECOND", - "THIRD", - "REFLEXIVE_PERSON" - ], - "enumDescriptions": [ - "Person is not applicable in the analyzed language or is not predicted.", - "First", - "Second", - "Third", - "Reflexive" - ], - "type": "string" - }, - "case": { - "description": "The grammatical case.", - "enum": [ - "CASE_UNKNOWN", - "ACCUSATIVE", - "ADVERBIAL", - "COMPLEMENTIVE", - "DATIVE", - "GENITIVE", - "INSTRUMENTAL", - "LOCATIVE", - "NOMINATIVE", - "OBLIQUE", - "PARTITIVE", - "PREPOSITIONAL", - "REFLEXIVE_CASE", - "RELATIVE_CASE", - "VOCATIVE" + "CASE_UNKNOWN", + "ACCUSATIVE", + "ADVERBIAL", + "COMPLEMENTIVE", + "DATIVE", + "GENITIVE", + "INSTRUMENTAL", + "LOCATIVE", + "NOMINATIVE", + "OBLIQUE", + "PARTITIVE", + "PREPOSITIONAL", + "REFLEXIVE_CASE", + "RELATIVE_CASE", + "VOCATIVE" ], + "description": "The grammatical case.", + "type": "string", "enumDescriptions": [ "Case is not applicable in the analyzed language or is not predicted.", "Accusative", @@ -648,11 +392,45 @@ "Reflexive", "Relative", "Vocative" + ] + }, + "tense": { + "enum": [ + "TENSE_UNKNOWN", + "CONDITIONAL_TENSE", + "FUTURE", + "PAST", + "PRESENT", + "IMPERFECT", + "PLUPERFECT" ], - "type": "string" + "description": "The grammatical tense.", + "type": "string", + "enumDescriptions": [ + "Tense is not applicable in the analyzed language or is not predicted.", + "Conditional", + "Future", + "Past", + "Present", + "Imperfect", + "Pluperfect" + ] + }, + "reciprocity": { + "description": "The grammatical reciprocity.", + "type": "string", + "enumDescriptions": [ + "Reciprocity is not applicable in the analyzed language or is not\npredicted.", + "Reciprocal", + "Non-reciprocal" + ], + "enum": [ + "RECIPROCITY_UNKNOWN", + "RECIPROCAL", + "NON_RECIPROCAL" + ] }, "form": { - "description": "The grammatical form.", "enum": [ "FORM_UNKNOWN", "ADNOMIAL", @@ -667,6 +445,8 @@ "ORDER", "SPECIFIC" ], + "description": "The grammatical form.", + "type": "string", "enumDescriptions": [ "Form is not applicable in the analyzed language or is not predicted.", "Adnomial", @@ -680,56 +460,57 @@ "Long form", "Order form", "Specific form" - ], - "type": "string" + ] }, - "tense": { - "description": "The grammatical tense.", - "enum": [ - "TENSE_UNKNOWN", - "CONDITIONAL_TENSE", - "FUTURE", - "PAST", - "PRESENT", - "IMPERFECT", - "PLUPERFECT" - ], + "number": { "enumDescriptions": [ - "Tense is not applicable in the analyzed language or is not predicted.", - "Conditional", - "Future", - "Past", - "Present", - "Imperfect", - "Pluperfect" + "Number is not applicable in the analyzed language or is not predicted.", + "Singular", + "Plural", + "Dual" + ], + "enum": [ + "NUMBER_UNKNOWN", + "SINGULAR", + "PLURAL", + "DUAL" ], + "description": "The grammatical number.", "type": "string" }, - "proper": { - "description": "The grammatical properness.", + "voice": { "enum": [ - "PROPER_UNKNOWN", - "PROPER", - "NOT_PROPER" + "VOICE_UNKNOWN", + "ACTIVE", + "CAUSATIVE", + "PASSIVE" ], + "description": "The grammatical voice.", + "type": "string", "enumDescriptions": [ - "Proper is not applicable in the analyzed language or is not predicted.", - "Proper", - "Not proper" + "Voice is not applicable in the analyzed language or is not predicted.", + "Active", + "Causative", + "Passive" + ] + }, + "aspect": { + "enumDescriptions": [ + "Aspect is not applicable in the analyzed language or is not predicted.", + "Perfective", + "Imperfective", + "Progressive" + ], + "enum": [ + "ASPECT_UNKNOWN", + "PERFECTIVE", + "IMPERFECTIVE", + "PROGRESSIVE" ], + "description": "The grammatical aspect.", "type": "string" }, "mood": { - "description": "The grammatical mood.", - "enum": [ - "MOOD_UNKNOWN", - "CONDITIONAL_MOOD", - "IMPERATIVE", - "INDICATIVE", - "INTERROGATIVE", - "JUSSIVE", - "SUBJUNCTIVE" - ], "enumDescriptions": [ "Mood is not applicable in the analyzed language or is not predicted.", "Conditional", @@ -739,26 +520,19 @@ "Jussive", "Subjunctive" ], + "enum": [ + "MOOD_UNKNOWN", + "CONDITIONAL_MOOD", + "IMPERATIVE", + "INDICATIVE", + "INTERROGATIVE", + "JUSSIVE", + "SUBJUNCTIVE" + ], + "description": "The grammatical mood.", "type": "string" }, "tag": { - "description": "The part of speech tag.", - "enum": [ - "UNKNOWN", - "ADJ", - "ADP", - "ADV", - "CONJ", - "DET", - "NOUN", - "NUM", - "PRON", - "PRT", - "PUNCT", - "VERB", - "X", - "AFFIX" - ], "enumDescriptions": [ "Unknown", "Adjective", @@ -775,111 +549,194 @@ "Other: foreign words, typos, abbreviations", "Affix" ], + "enum": [ + "UNKNOWN", + "ADJ", + "ADP", + "ADV", + "CONJ", + "DET", + "NOUN", + "NUM", + "PRON", + "PRT", + "PUNCT", + "VERB", + "X", + "AFFIX" + ], + "description": "The part of speech tag.", "type": "string" }, - "number": { - "description": "The grammatical number.", + "gender": { "enum": [ - "NUMBER_UNKNOWN", - "SINGULAR", - "PLURAL", - "DUAL" + "GENDER_UNKNOWN", + "FEMININE", + "MASCULINE", + "NEUTER" ], + "description": "The grammatical gender.", + "type": "string", "enumDescriptions": [ - "Number is not applicable in the analyzed language or is not predicted.", - "Singular", - "Plural", - "Dual" - ], - "type": "string" + "Gender is not applicable in the analyzed language or is not predicted.", + "Feminine", + "Masculine", + "Neuter" + ] }, - "reciprocity": { - "description": "The grammatical reciprocity.", + "person": { "enum": [ - "RECIPROCITY_UNKNOWN", - "RECIPROCAL", - "NON_RECIPROCAL" + "PERSON_UNKNOWN", + "FIRST", + "SECOND", + "THIRD", + "REFLEXIVE_PERSON" ], + "description": "The grammatical person.", + "type": "string", "enumDescriptions": [ - "Reciprocity is not applicable in the analyzed language or is not\npredicted.", - "Reciprocal", - "Non-reciprocal" - ], - "type": "string" + "Person is not applicable in the analyzed language or is not predicted.", + "First", + "Second", + "Third", + "Reflexive" + ] }, - "voice": { - "description": "The grammatical voice.", + "proper": { + "enumDescriptions": [ + "Proper is not applicable in the analyzed language or is not predicted.", + "Proper", + "Not proper" + ], "enum": [ - "VOICE_UNKNOWN", - "ACTIVE", - "CAUSATIVE", - "PASSIVE" + "PROPER_UNKNOWN", + "PROPER", + "NOT_PROPER" ], + "description": "The grammatical properness.", + "type": "string" + } + } + }, + "AnalyzeSyntaxRequest": { + "properties": { + "document": { + "$ref": "Document", + "description": "Input document." + }, + "encodingType": { + "description": "The encoding type used by the API to calculate offsets.", + "type": "string", "enumDescriptions": [ - "Voice is not applicable in the analyzed language or is not predicted.", - "Active", - "Causative", - "Passive" + "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." ], - "type": "string" + "enum": [ + "NONE", + "UTF8", + "UTF16", + "UTF32" + ] } }, - "id": "PartOfSpeech" + "id": "AnalyzeSyntaxRequest", + "description": "The syntax analysis request message.", + "type": "object" }, - "AnnotateTextResponse": { - "description": "The text annotations response message.", + "AnalyzeSentimentResponse": { + "description": "The sentiment analysis response message.", "type": "object", "properties": { - "entities": { - "description": "Entities, along with their semantic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_entities.", + "language": { + "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.", + "type": "string" + }, + "sentences": { + "description": "The sentiment for all the sentences in the document.", "type": "array", "items": { - "$ref": "Entity" + "$ref": "Sentence" } }, "documentSentiment": { - "description": "The overall sentiment for the document. Populated if the user enables\nAnnotateTextRequest.Features.extract_document_sentiment.", - "$ref": "Sentiment" + "$ref": "Sentiment", + "description": "The overall sentiment of the input document." + } + }, + "id": "AnalyzeSentimentResponse" + }, + "AnalyzeEntitiesResponse": { + "description": "The entity analysis response message.", + "type": "object", + "properties": { + "entities": { + "type": "array", + "items": { + "$ref": "Entity" + }, + "description": "The recognized entities in the input document." }, + "language": { + "type": "string", + "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee Document.language field for more details." + } + }, + "id": "AnalyzeEntitiesResponse" + }, + "AnalyzeSyntaxResponse": { + "description": "The syntax analysis response message.", + "type": "object", + "properties": { "language": { "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee Document.language field for more details.", "type": "string" }, - "tokens": { - "description": "Tokens, along with their syntactic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", + "sentences": { + "description": "Sentences in the input document.", "type": "array", "items": { - "$ref": "Token" + "$ref": "Sentence" } }, - "sentences": { - "description": "Sentences in the input document. Populated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", + "tokens": { + "description": "Tokens, along with their syntactic information, in the input document.", "type": "array", "items": { - "$ref": "Sentence" + "$ref": "Token" } } }, - "id": "AnnotateTextResponse" + "id": "AnalyzeSyntaxResponse" }, "Entity": { "description": "Represents a phrase in the text that is a known entity, such as\na person, an organization, or location. The API associates information, such\nas salience and mentions, with entities.", "type": "object", "properties": { - "metadata": { - "description": "Metadata associated with the entity.\n\nCurrently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.", - "additionalProperties": { - "type": "string" - }, - "type": "object" + "mentions": { + "description": "The mentions of this entity in the input document. The API currently\nsupports proper noun mentions.", + "type": "array", + "items": { + "$ref": "EntityMention" + } }, - "salience": { - "description": "The salience score associated with the entity in the [0, 1.0] range.\n\nThe salience score for an entity provides information about the\nimportance or centrality of that entity to the entire document text.\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\nsalient.", - "type": "number", - "format": "float" + "name": { + "description": "The representative name for the entity.", + "type": "string" }, "type": { - "description": "The entity type.", + "type": "string", + "enumDescriptions": [ + "Unknown", + "Person", + "Location", + "Organization", + "Event", + "Work of art", + "Consumer goods", + "Other types" + ], "enum": [ "UNKNOWN", "PERSON", @@ -890,201 +747,344 @@ "CONSUMER_GOOD", "OTHER" ], + "description": "The entity type." + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata associated with the entity.\n\nCurrently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.", + "type": "object" + }, + "salience": { + "description": "The salience score associated with the entity in the [0, 1.0] range.\n\nThe salience score for an entity provides information about the\nimportance or centrality of that entity to the entire document text.\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\nsalient.", + "format": "float", + "type": "number" + } + }, + "id": "Entity" + }, + "AnnotateTextRequest": { + "description": "The request message for the text annotation API, which can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.", + "type": "object", + "properties": { + "features": { + "$ref": "Features", + "description": "The enabled features." + }, + "encodingType": { + "enum": [ + "NONE", + "UTF8", + "UTF16", + "UTF32" + ], + "description": "The encoding type used by the API to calculate offsets.", + "type": "string", "enumDescriptions": [ - "Unknown", - "Person", - "Location", - "Organization", - "Event", - "Work of art", - "Consumer goods", - "Other types" + "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." + ] + }, + "document": { + "$ref": "Document", + "description": "Input document." + } + }, + "id": "AnnotateTextRequest" + }, + "AnalyzeSentimentRequest": { + "properties": { + "encodingType": { + "enum": [ + "NONE", + "UTF8", + "UTF16", + "UTF32" ], + "description": "The encoding type used by the API to calculate sentence offsets for the\nsentence sentiment.", + "type": "string", + "enumDescriptions": [ + "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.", + "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." + ] + }, + "document": { + "$ref": "Document", + "description": "Input document. Currently, `analyzeSentiment` only supports English text\n(Document.language=\"EN\")." + } + }, + "id": "AnalyzeSentimentRequest", + "description": "The sentiment analysis request message.", + "type": "object" + }, + "AnnotateTextResponse": { + "type": "object", + "properties": { + "documentSentiment": { + "description": "The overall sentiment for the document. Populated if the user enables\nAnnotateTextRequest.Features.extract_document_sentiment.", + "$ref": "Sentiment" + }, + "language": { + "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee Document.language field for more details.", "type": "string" }, - "mentions": { - "description": "The mentions of this entity in the input document. The API currently\nsupports proper noun mentions.", + "sentences": { + "description": "Sentences in the input document. Populated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", "type": "array", "items": { - "$ref": "EntityMention" + "$ref": "Sentence" } }, - "name": { - "description": "The representative name for the entity.", - "type": "string" + "tokens": { + "description": "Tokens, along with their syntactic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_syntax.", + "type": "array", + "items": { + "$ref": "Token" + } + }, + "entities": { + "description": "Entities, along with their semantic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_entities.", + "type": "array", + "items": { + "$ref": "Entity" + } } }, - "id": "Entity" + "id": "AnnotateTextResponse", + "description": "The text annotations response message." }, - "Sentiment": { - "description": "Represents the feeling associated with the entire text or entities in\nthe text.", + "DependencyEdge": { + "description": "Represents dependency parse tree information for a token.", "type": "object", "properties": { - "score": { - "description": "Sentiment score between -1.0 (negative sentiment) and 1.0\n(positive sentiment).", - "type": "number", - "format": "float" + "headTokenIndex": { + "description": "Represents the head of this token in the dependency tree.\nThis is the index of the token which has an arc going to this token.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is its own index.", + "format": "int32", + "type": "integer" }, - "polarity": { - "description": "DEPRECATED FIELD - This field is being deprecated in\nfavor of score. Please refer to our documentation at\nhttps://cloud.google.com/natural-language/docs for more information.", - "type": "number", - "format": "float" + "label": { + "enumDescriptions": [ + "Unknown", + "Abbreviation modifier", + "Adjectival complement", + "Adverbial clause modifier", + "Adverbial modifier", + "Adjectival modifier of an NP", + "Appositional modifier of an NP", + "Attribute dependent of a copular verb", + "Auxiliary (non-main) verb", + "Passive auxiliary", + "Coordinating conjunction", + "Clausal complement of a verb or adjective", + "Conjunct", + "Clausal subject", + "Clausal passive subject", + "Dependency (unable to determine)", + "Determiner", + "Discourse", + "Direct object", + "Expletive", + "Goes with (part of a word in a text not well edited)", + "Indirect object", + "Marker (word introducing a subordinate clause)", + "Multi-word expression", + "Multi-word verbal expression", + "Negation modifier", + "Noun compound modifier", + "Noun phrase used as an adverbial modifier", + "Nominal subject", + "Passive nominal subject", + "Numeric modifier of a noun", + "Element of compound number", + "Punctuation mark", + "Parataxis relation", + "Participial modifier", + "The complement of a preposition is a clause", + "Object of a preposition", + "Possession modifier", + "Postverbal negative particle", + "Predicate complement", + "Preconjunt", + "Predeterminer", + "Prefix", + "Prepositional modifier", + "The relationship between a verb and verbal morpheme", + "Particle", + "Associative or possessive marker", + "Quantifier phrase modifier", + "Relative clause modifier", + "Complementizer in relative clause", + "Ellipsis without a preceding predicate", + "Referent", + "Remnant", + "Reparandum", + "Root", + "Suffix specifying a unit of number", + "Suffix", + "Temporal modifier", + "Topic marker", + "Clause headed by an infinite form of the verb that modifies a noun", + "Vocative", + "Open clausal complement", + "Name suffix", + "Name title", + "Adverbial phrase modifier", + "Causative auxiliary", + "Helper auxiliary", + "Rentaishi (Prenominal modifier)", + "Foreign words", + "Keyword", + "List for chains of comparable items", + "Nominalized clause", + "Nominalized clausal subject", + "Nominalized clausal passive", + "Compound of numeric modifier", + "Copula", + "Dislocated relation (for fronted/topicalized elements)" + ], + "enum": [ + "UNKNOWN", + "ABBREV", + "ACOMP", + "ADVCL", + "ADVMOD", + "AMOD", + "APPOS", + "ATTR", + "AUX", + "AUXPASS", + "CC", + "CCOMP", + "CONJ", + "CSUBJ", + "CSUBJPASS", + "DEP", + "DET", + "DISCOURSE", + "DOBJ", + "EXPL", + "GOESWITH", + "IOBJ", + "MARK", + "MWE", + "MWV", + "NEG", + "NN", + "NPADVMOD", + "NSUBJ", + "NSUBJPASS", + "NUM", + "NUMBER", + "P", + "PARATAXIS", + "PARTMOD", + "PCOMP", + "POBJ", + "POSS", + "POSTNEG", + "PRECOMP", + "PRECONJ", + "PREDET", + "PREF", + "PREP", + "PRONL", + "PRT", + "PS", + "QUANTMOD", + "RCMOD", + "RCMODREL", + "RDROP", + "REF", + "REMNANT", + "REPARANDUM", + "ROOT", + "SNUM", + "SUFF", + "TMOD", + "TOPIC", + "VMOD", + "VOCATIVE", + "XCOMP", + "SUFFIX", + "TITLE", + "ADVPHMOD", + "AUXCAUS", + "AUXVV", + "DTMOD", + "FOREIGN", + "KW", + "LIST", + "NOMC", + "NOMCSUBJ", + "NOMCSUBJPASS", + "NUMC", + "COP", + "DISLOCATED" + ], + "description": "The parse label for the token.", + "type": "string" + } + }, + "id": "DependencyEdge" + }, + "TextSpan": { + "description": "Represents an output piece of text.", + "type": "object", + "properties": { + "beginOffset": { + "description": "The API calculates the beginning offset of the content in the original\ndocument according to the EncodingType specified in the API request.", + "format": "int32", + "type": "integer" }, - "magnitude": { - "description": "A non-negative number in the [0, +inf) range, which represents\nthe absolute magnitude of sentiment regardless of score (positive or\nnegative).", - "type": "number", - "format": "float" + "content": { + "description": "The content of the output text.", + "type": "string" } }, - "id": "Sentiment" + "id": "TextSpan" }, "Token": { - "description": "Represents the smallest syntactic building block of the text.", - "type": "object", "properties": { "text": { - "description": "The token text.", - "$ref": "TextSpan" - }, - "partOfSpeech": { - "description": "Parts of speech tag for this token.", - "$ref": "PartOfSpeech" + "$ref": "TextSpan", + "description": "The token text." }, "dependencyEdge": { - "description": "Dependency tree parse for this token.", - "$ref": "DependencyEdge" + "$ref": "DependencyEdge", + "description": "Dependency tree parse for this token." }, "lemma": { "description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.", "type": "string" - } - }, - "id": "Token" - }, - "AnalyzeEntitiesResponse": { - "description": "The entity analysis response message.", - "type": "object", - "properties": { - "entities": { - "description": "The recognized entities in the input document.", - "type": "array", - "items": { - "$ref": "Entity" - } }, - "language": { - "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee Document.language field for more details.", - "type": "string" + "partOfSpeech": { + "$ref": "PartOfSpeech", + "description": "Parts of speech tag for this token." } }, - "id": "AnalyzeEntitiesResponse" + "id": "Token", + "description": "Represents the smallest syntactic building block of the text.", + "type": "object" } }, - "revision": "20170103", - "basePath": "", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version_module": "True", + "protocol": "rest", "canonicalName": "Cloud Natural Language", - "discoveryVersion": "v1", - "baseUrl": "https://language.googleapis.com/", - "name": "language", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } - }, - "documentationLink": "https://cloud.google.com/natural-language/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1beta1", - "rootUrl": "https://language.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/language/v1beta1/language-gen.go b/vendor/google.golang.org/api/language/v1beta1/language-gen.go index 5bf31a34a..ac11e4f71 100644 --- a/vendor/google.golang.org/api/language/v1beta1/language-gen.go +++ b/vendor/google.golang.org/api/language/v1beta1/language-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Documents *DocumentsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDocumentsService(s *Service) *DocumentsService { rs := &DocumentsService{s: s} return rs @@ -1352,6 +1357,7 @@ func (c *DocumentsAnalyzeEntitiesCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzeentitiesrequest) if err != nil { @@ -1472,6 +1478,7 @@ func (c *DocumentsAnalyzeSentimentCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzesentimentrequest) if err != nil { @@ -1596,6 +1603,7 @@ func (c *DocumentsAnalyzeSyntaxCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.analyzesyntaxrequest) if err != nil { @@ -1718,6 +1726,7 @@ func (c *DocumentsAnnotateTextCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.annotatetextrequest) if err != nil { diff --git a/vendor/google.golang.org/api/licensing/v1/licensing-api.json b/vendor/google.golang.org/api/licensing/v1/licensing-api.json index 929b4e563..c9bfa0454 100644 --- a/vendor/google.golang.org/api/licensing/v1/licensing-api.json +++ b/vendor/google.golang.org/api/licensing/v1/licensing-api.json @@ -1,6 +1,6 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/FyUD9D0inmca5JWfAKYW1_Iol18\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/QQRbdYm0j-JrEGkOGWqDQAEBikU\"", "discoveryVersion": "v1", "id": "licensing:v1", "name": "licensing", @@ -70,7 +70,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/apps.licensing": { - "description": "View and manage Google Apps licenses for your domain" + "description": "View and manage G Suite licenses for your domain" } } } diff --git a/vendor/google.golang.org/api/licensing/v1/licensing-gen.go b/vendor/google.golang.org/api/licensing/v1/licensing-gen.go index 1d76c4336..6954403ec 100644 --- a/vendor/google.golang.org/api/licensing/v1/licensing-gen.go +++ b/vendor/google.golang.org/api/licensing/v1/licensing-gen.go @@ -47,7 +47,7 @@ const basePath = "https://www.googleapis.com/apps/licensing/v1/product/" // OAuth2 scopes used by this API. const ( - // View and manage Google Apps licenses for your domain + // View and manage G Suite licenses for your domain AppsLicensingScope = "https://www.googleapis.com/auth/apps.licensing" ) @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only LicenseAssignments *LicenseAssignmentsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewLicenseAssignmentsService(s *Service) *LicenseAssignmentsService { rs := &LicenseAssignmentsService{s: s} return rs @@ -256,6 +261,7 @@ func (c *LicenseAssignmentsDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{productId}/sku/{skuId}/user/{userId}") @@ -383,6 +389,7 @@ func (c *LicenseAssignmentsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -529,6 +536,7 @@ func (c *LicenseAssignmentsInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.licenseassignmentinsert) if err != nil { @@ -696,6 +704,7 @@ func (c *LicenseAssignmentsListForProductCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -895,6 +904,7 @@ func (c *LicenseAssignmentsListForProductAndSkuCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1078,6 +1088,7 @@ func (c *LicenseAssignmentsPatchCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.licenseassignment) if err != nil { @@ -1231,6 +1242,7 @@ func (c *LicenseAssignmentsUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.licenseassignment) if err != nil { diff --git a/vendor/google.golang.org/api/logging/v2/logging-api.json b/vendor/google.golang.org/api/logging/v2/logging-api.json index 17f9fc0d8..c71a0a92b 100644 --- a/vendor/google.golang.org/api/logging/v2/logging-api.json +++ b/vendor/google.golang.org/api/logging/v2/logging-api.json @@ -1,769 +1,778 @@ { + "version": "v2", + "baseUrl": "https://logging.googleapis.com/", + "servicePath": "", + "description": "Writes log entries and manages your Stackdriver Logging configuration.", + "kind": "discovery#restDescription", + "basePath": "", + "revision": "20170220", + "documentationLink": "https://cloud.google.com/logging/docs/", + "id": "logging:v2", "discoveryVersion": "v1", "version_module": "True", "schemas": { - "MonitoredResource": { - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", - "type": "object", + "LogLine": { "properties": { - "type": { - "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Cloud SQL database is \"cloudsql_database\".", + "severity": { + "enumDescriptions": [ + "(0) The log entry has no assigned severity level.", + "(100) Debug or trace information.", + "(200) Routine information, such as ongoing status or performance.", + "(300) Normal but significant events, such as start up, shut down, or a configuration change.", + "(400) Warning events might cause problems.", + "(500) Error events are likely to cause problems.", + "(600) Critical events cause more severe problems or outages.", + "(700) A person must take an action immediately.", + "(800) One or more systems are unusable." + ], + "enum": [ + "DEFAULT", + "DEBUG", + "INFO", + "NOTICE", + "WARNING", + "ERROR", + "CRITICAL", + "ALERT", + "EMERGENCY" + ], + "description": "Severity of this log entry.", "type": "string" }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Cloud SQL databases use the labels \"database_id\" and \"zone\"." + "logMessage": { + "type": "string", + "description": "App-provided log message." + }, + "sourceLocation": { + "description": "Where in the source code this log message was written.", + "$ref": "SourceLocation" + }, + "time": { + "description": "Approximate time when this log entry was made.", + "format": "google-datetime", + "type": "string" } }, - "id": "MonitoredResource" + "id": "LogLine", + "description": "Application log line emitted while processing a request.", + "type": "object" }, - "WriteLogEntriesRequest": { + "ListLogMetricsResponse": { + "description": "Result returned from ListLogMetrics.", "type": "object", "properties": { - "resource": { - "$ref": "MonitoredResource", - "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry." - }, - "logName": { - "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\" or \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "type": "string" - }, - "entries": { - "description": "Required. The log entries to write. Values supplied for the fields log_name, resource, and labels in this entries.write request are added to those log entries that do not provide their own values for the fields.To improve throughput and to avoid exceeding the quota limit for calls to entries.write, you should write multiple log entries at once rather than calling this method for each individual log entry.", + "metrics": { + "description": "A list of logs-based metrics.", "type": "array", "items": { - "$ref": "LogEntry" + "$ref": "LogMetric" } }, - "partialSuccess": { - "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, the response status will be the error associated with one of the failed entries and include error details in the form of WriteLogEntriesPartialErrors.", - "type": "boolean" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Optional. Default labels that are added to the labels field of all log entries in entries. If a log entry already has a label with the same key as a label in this parameter, then the log entry's label is not changed. See LogEntry." + "nextPageToken": { + "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", + "type": "string" } }, - "id": "WriteLogEntriesRequest", - "description": "The parameters to WriteLogEntries." + "id": "ListLogMetricsResponse" }, - "LogSink": { + "LogEntry": { + "description": "An individual entry in a log.", "type": "object", "properties": { - "name": { - "description": "Required. The client-assigned sink identifier, unique within the project. Example: \"my-syslog-errors-to-pubsub\". Sink identifiers are limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, and periods.", + "logName": { + "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", "type": "string" }, - "filter": { - "description": "Optional. An advanced logs filter. The only exported log entries are those that are in the resource owning the sink and that match the filter. The filter must use the log entry format specified by the output_version_format parameter. For example, in the v2 format:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n", - "type": "string" + "httpRequest": { + "$ref": "HttpRequest", + "description": "Optional. Information about the HTTP request associated with this log entry, if applicable." }, - "destination": { - "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks.", - "type": "string" + "resource": { + "$ref": "MonitoredResource", + "description": "Required. The monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error." }, - "endTime": { - "description": "Optional. The time at which this sink will stop exporting log entries. Log entries are exported only if their timestamp is earlier than the end time. If this field is not supplied, there is no end time. If both a start time and an end time are provided, then the end time must be later than the start time.", - "format": "google-datetime", - "type": "string" + "jsonPayload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The log entry payload, represented as a structure that is expressed as a JSON object.", + "type": "object" }, - "writerIdentity": { + "insertId": { "type": "string", - "description": "Output only. An IAM identity—a service account or group—under which Stackdriver Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity." + "description": "Optional. A unique ID for the log entry. If you provide this field, the logging service considers other log entries in the same project with the same ID as duplicates which can be removed. If omitted, Stackdriver Logging will generate a unique ID for this log entry." }, - "startTime": { - "description": "Optional. The time at which this sink will begin exporting log entries. Log entries are exported only if their timestamp is not earlier than the start time. The default value of this field is the time the sink is created or updated.", - "format": "google-datetime", + "operation": { + "$ref": "LogEntryOperation", + "description": "Optional. Information about an operation associated with the log entry, if applicable." + }, + "textPayload": { + "description": "The log entry payload, represented as a Unicode string (UTF-8).", "type": "string" }, - "outputVersionFormat": { - "enum": [ - "VERSION_FORMAT_UNSPECIFIED", - "V2", - "V1" - ], - "description": "Optional. The log entry format to use for this sink's exported log entries. The v2 format is used by default. The v1 format is deprecated and should be used only as part of a migration effort to v2. See Migration to the v2 API.", + "protoPayload": { + "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + }, + "trace": { + "description": "Optional. Resource name of the trace associated with the log entry, if any. If it contains a relative resource name, the name is assumed to be relative to //tracing.googleapis.com. Example: projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A set of user-defined (key, value) data that provides additional information about the log entry.", + "type": "object" + }, + "severity": { + "description": "Optional. The severity of the log entry. The default value is LogSeverity.DEFAULT.", "type": "string", "enumDescriptions": [ - "An unspecified format version that will default to V2.", - "LogEntry version 2 format.", - "LogEntry version 1 format." + "(0) The log entry has no assigned severity level.", + "(100) Debug or trace information.", + "(200) Routine information, such as ongoing status or performance.", + "(300) Normal but significant events, such as start up, shut down, or a configuration change.", + "(400) Warning events might cause problems.", + "(500) Error events are likely to cause problems.", + "(600) Critical events cause more severe problems or outages.", + "(700) A person must take an action immediately.", + "(800) One or more systems are unusable." + ], + "enum": [ + "DEFAULT", + "DEBUG", + "INFO", + "NOTICE", + "WARNING", + "ERROR", + "CRITICAL", + "ALERT", + "EMERGENCY" ] + }, + "sourceLocation": { + "description": "Optional. Source code location information associated with the log entry, if any.", + "$ref": "LogEntrySourceLocation" + }, + "timestamp": { + "description": "Optional. The time the event described by the log entry occurred. If omitted, Stackdriver Logging will use the time the log entry is received.", + "format": "google-datetime", + "type": "string" } }, - "id": "LogSink", - "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project or organization." + "id": "LogEntry" }, - "ListLogsResponse": { + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "SourceLocation": { + "description": "Specifies a location in a source code file.", "type": "object", "properties": { - "logNames": { - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of log names. For example, \"projects/my-project/syslog\" or \"organizations/123/cloudresourcemanager.googleapis.com%2Factivity\"." + "file": { + "description": "Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", + "type": "string" }, - "nextPageToken": { - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", + "functionName": { + "type": "string", + "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information is used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python)." + }, + "line": { + "description": "Line within the source file.", + "format": "int64", "type": "string" } }, - "id": "ListLogsResponse", - "description": "Result returned from ListLogs." + "id": "SourceLocation" }, - "ListSinksResponse": { - "description": "Result returned from ListSinks.", + "ListLogEntriesRequest": { + "description": "The parameters to ListLogEntries.", "type": "object", "properties": { - "nextPageToken": { - "type": "string", - "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken." + "orderBy": { + "description": "Optional. How the results should be sorted. Presently, the only permitted values are \"timestamp asc\" (default) and \"timestamp desc\". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of LogEntry.insertId.", + "type": "string" }, - "sinks": { + "resourceNames": { "type": "array", "items": { - "$ref": "LogSink" + "type": "string" }, - "description": "A list of sinks." + "description": "Required. Names of one or more parent resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nProjects listed in the project_ids field are added to this list." + }, + "projectIds": { + "description": "Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\". If present, these project identifiers are converted to resource name format and added to the list of resources in resource_names.", + "type": "array", + "items": { + "type": "string" + } + }, + "filter": { + "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", + "type": "string" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" } }, - "id": "ListSinksResponse" + "id": "ListLogEntriesRequest" }, - "HttpRequest": { - "description": "A common proto for logging HTTP requests. Only contains semantics defined by the HTTP specification. Product-specific logging information MUST be defined in a separate message.", + "RequestLog": { "type": "object", "properties": { - "latency": { - "type": "string", - "description": "The request processing latency on the server, from the time the request was received until the response was sent.", - "format": "google-duration" + "responseSize": { + "description": "Size in bytes sent back to client by request.", + "format": "int64", + "type": "string" }, - "userAgent": { - "type": "string", - "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\"." + "traceId": { + "description": "Stackdriver Trace identifier for this request.", + "type": "string" }, - "cacheFillBytes": { - "description": "The number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.", - "format": "int64", + "line": { + "description": "A list of log lines emitted by the application while serving this request.", + "type": "array", + "items": { + "$ref": "LogLine" + } + }, + "referrer": { + "description": "Referrer URL of request.", "type": "string" }, - "requestMethod": { + "taskQueueName": { "type": "string", - "description": "The request method. Examples: \"GET\", \"HEAD\", \"PUT\", \"POST\"." + "description": "Queue name of the request, in the case of an offline request." }, - "responseSize": { - "description": "The size of the HTTP response message sent back to the client, in bytes, including the response headers and the response body.", - "format": "int64", + "requestId": { + "description": "Globally unique identifier for a request, which is based on the request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier.", "type": "string" }, - "requestSize": { - "type": "string", - "description": "The size of the HTTP request message in bytes, including the request headers and the request body.", - "format": "int64" - }, - "requestUrl": { - "type": "string", - "description": "The scheme (http, https), the host name, the path and the query portion of the URL that was requested. Example: \"http://example.com/some/info?color=red\"." + "nickname": { + "description": "The logged-in user who made the request.Most likely, this is the part of the user's email before the @ sign. The field value is the same for different requests from the same user, but different users can have similar names. This information is also available to the application via the App Engine Users API.This field will be populated starting with App Engine 1.9.21.", + "type": "string" }, - "serverIp": { - "type": "string", - "description": "The IP address (IPv4 or IPv6) of the origin server that the request was sent to." + "pendingTime": { + "description": "Time this request spent in the pending request queue.", + "format": "google-duration", + "type": "string" }, - "remoteIp": { - "type": "string", - "description": "The IP address (IPv4 or IPv6) of the client that issued the HTTP request. Examples: \"192.168.1.1\", \"FE80::0202:B3FF:FE1E:8329\"." + "resource": { + "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". The fragment identifier, which is identified by the # character, is not included.", + "type": "string" }, - "cacheLookup": { - "type": "boolean", - "description": "Whether or not a cache lookup was attempted." + "status": { + "description": "HTTP response status code. Example: 200, 404.", + "format": "int32", + "type": "integer" }, - "cacheHit": { - "description": "Whether or not an entity was served from cache (with or without validation).", - "type": "boolean" + "taskName": { + "description": "Task name of the request, in the case of an offline request.", + "type": "string" }, - "cacheValidatedWithOriginServer": { - "description": "Whether or not the response was validated with the origin server before being served from cache. This field is only meaningful if cache_hit is True.", - "type": "boolean" + "urlMapEntry": { + "description": "File or class that handled the request.", + "type": "string" }, - "status": { - "description": "The response code indicating the status of response. Examples: 200, 404.", + "instanceIndex": { + "description": "If the instance processing this request belongs to a manually scaled module, then this is the 0-based index of the instance. Otherwise, this value is -1.", "format": "int32", "type": "integer" }, - "referer": { - "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", + "host": { + "description": "Internet host and port number of the resource being requested.", "type": "string" - } - }, - "id": "HttpRequest" - }, - "LabelDescriptor": { - "type": "object", - "properties": { - "valueType": { - "enum": [ - "STRING", - "BOOL", - "INT64" - ], - "description": "The type of data that can be assigned to the label.", - "type": "string", - "enumDescriptions": [ - "A variable-length string. This is the default.", - "Boolean; true or false.", - "A 64-bit signed integer." - ] }, - "key": { + "finished": { + "description": "Whether this request is finished or active.", + "type": "boolean" + }, + "httpVersion": { "type": "string", - "description": "The label key." + "description": "HTTP version of request. Example: \"HTTP/1.1\"." }, - "description": { - "description": "A human-readable description for the label.", + "startTime": { + "description": "Time when the request started.", + "format": "google-datetime", "type": "string" - } - }, - "id": "LabelDescriptor", - "description": "A description of a label." - }, - "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", - "type": "object", - "properties": { - "name": { - "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", + }, + "latency": { + "description": "Latency of the request.", + "format": "google-duration", "type": "string" }, - "displayName": { - "type": "string", - "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\"." + "ip": { + "description": "Origin IP address.", + "type": "string" }, - "description": { - "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", + "appId": { + "description": "Application that handled this request.", "type": "string" }, - "type": { - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", + "appEngineRelease": { + "description": "App Engine release version.", "type": "string" }, - "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", + "method": { + "description": "Request method. Example: \"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\".", + "type": "string" + }, + "cost": { + "description": "An indication of the relative cost of serving this request.", + "format": "double", + "type": "number" + }, + "instanceId": { + "description": "An identifier for the instance that handled the request.", + "type": "string" + }, + "megaCycles": { + "description": "Number of CPU megacycles used to process request.", + "format": "int64", + "type": "string" + }, + "first": { + "description": "Whether this is the first RequestLog entry for this request. If an active request has several RequestLog entries written to Stackdriver Logging, then this field will be set for one of them.", + "type": "boolean" + }, + "versionId": { + "description": "Version of the application that handled this request.", + "type": "string" + }, + "moduleId": { + "description": "Module of the application that handled this request.", + "type": "string" + }, + "endTime": { + "description": "Time when the request finished.", + "format": "google-datetime", + "type": "string" + }, + "userAgent": { + "description": "User agent that made the request.", + "type": "string" + }, + "wasLoadingRequest": { + "description": "Whether this was a loading request for the instance.", + "type": "boolean" + }, + "sourceReference": { + "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.", "type": "array", "items": { - "$ref": "LabelDescriptor" + "$ref": "SourceReference" } } }, - "id": "MonitoredResourceDescriptor" + "id": "RequestLog", + "description": "Complete log information about a single HTTP request to an App Engine application." }, - "LogEntrySourceLocation": { - "description": "Additional information about the source code location that produced the log entry.", + "ListMonitoredResourceDescriptorsResponse": { + "description": "Result returned from ListMonitoredResourceDescriptors.", "type": "object", "properties": { - "file": { - "description": "Optional. Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", - "type": "string" - }, - "function": { - "type": "string", - "description": "Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python)." + "resourceDescriptors": { + "description": "A list of resource descriptors.", + "type": "array", + "items": { + "$ref": "MonitoredResourceDescriptor" + } }, - "line": { - "type": "string", - "description": "Optional. Line within the source file. 1-based; 0 indicates no line number available.", - "format": "int64" + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", + "type": "string" } }, - "id": "LogEntrySourceLocation" + "id": "ListMonitoredResourceDescriptorsResponse" }, - "ListLogEntriesResponse": { - "description": "Result returned from ListLogEntries.", + "SourceReference": { + "description": "A reference to a particular snapshot of the source tree used to build and deploy an application.", "type": "object", "properties": { - "nextPageToken": { - "type": "string", - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.If a value for next_page_token appears and the entries field is empty, it means that the search found no log entries so far but it did not have time to search all the possible log entries. Retry the method with this value for page_token to continue the search. Alternatively, consider speeding up the search by changing your filter to specify a single log name or resource type, or to narrow the time range of the search." + "repository": { + "description": "Optional. A URI string identifying the repository. Example: \"https://github.com/GoogleCloudPlatform/kubernetes.git\"", + "type": "string" }, - "entries": { - "type": "array", - "items": { - "$ref": "LogEntry" - }, - "description": "A list of log entries." + "revisionId": { + "description": "The canonical and persistent identifier of the deployed revision. Example (git): \"0035781c50ec7aa23385dc841529ce8a4b70db1b\"", + "type": "string" } }, - "id": "ListLogEntriesResponse" + "id": "SourceReference" }, - "LogLine": { - "description": "Application log line emitted while processing a request.", + "LogMetric": { + "description": "Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval.", "type": "object", "properties": { - "severity": { + "version": { + "enumDescriptions": [ + "Stackdriver Logging API v2.", + "Stackdriver Logging API v1." + ], "enum": [ - "DEFAULT", - "DEBUG", - "INFO", - "NOTICE", - "WARNING", - "ERROR", - "CRITICAL", - "ALERT", - "EMERGENCY" + "V2", + "V1" ], - "description": "Severity of this log entry.", - "type": "string", - "enumDescriptions": [ - "(0) The log entry has no assigned severity level.", - "(100) Debug or trace information.", - "(200) Routine information, such as ongoing status or performance.", - "(300) Normal but significant events, such as start up, shut down, or a configuration change.", - "(400) Warning events might cause problems.", - "(500) Error events are likely to cause problems.", - "(600) Critical events cause more severe problems or outages.", - "(700) A person must take an action immediately.", - "(800) One or more systems are unusable." - ] + "description": "Output only. The API version that created or updated this metric. The version also dictates the syntax of the filter expression. When a value for this field is missing, the default value of V2 should be assumed.", + "type": "string" }, - "logMessage": { - "description": "App-provided log message.", + "filter": { + "description": "Required. An advanced logs filter which is used to match log entries. Example:\n\"resource.type=gae_app AND severity\u003e=ERROR\"\nThe maximum length of the filter is 20000 characters.", "type": "string" }, - "sourceLocation": { - "$ref": "SourceLocation", - "description": "Where in the source code this log message was written." + "name": { + "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".", + "type": "string" }, - "time": { - "description": "Approximate time when this log entry was made.", - "format": "google-datetime", + "description": { + "description": "Optional. A description of this metric, which is used in documentation.", "type": "string" } }, - "id": "LogLine" + "id": "LogMetric" }, - "ListLogMetricsResponse": { + "WriteLogEntriesResponse": { + "description": "Result returned from WriteLogEntries. empty", + "type": "object", + "properties": {}, + "id": "WriteLogEntriesResponse" + }, + "LogEntryOperation": { + "description": "Additional information about a potentially long-running operation with which a log entry is associated.", "type": "object", "properties": { - "metrics": { - "description": "A list of logs-based metrics.", - "type": "array", - "items": { - "$ref": "LogMetric" - } + "last": { + "description": "Optional. Set this to True if this is the last log entry in the operation.", + "type": "boolean" }, - "nextPageToken": { - "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", + "id": { + "description": "Optional. An arbitrary operation identifier. Log entries with the same identifier are assumed to be part of the same operation.", "type": "string" + }, + "producer": { + "description": "Optional. An arbitrary producer identifier. The combination of id and producer must be globally unique. Examples for producer: \"MyDivision.MyBigCompany.com\", \"github.com/MyProject/MyApplication\".", + "type": "string" + }, + "first": { + "description": "Optional. Set this to True if this is the first log entry in the operation.", + "type": "boolean" } }, - "id": "ListLogMetricsResponse", - "description": "Result returned from ListLogMetrics." + "id": "LogEntryOperation" }, - "LogEntry": { + "MonitoredResource": { + "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", "type": "object", "properties": { - "logName": { - "type": "string", - "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results." - }, - "resource": { - "$ref": "MonitoredResource", - "description": "Required. The monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error." - }, - "httpRequest": { - "$ref": "HttpRequest", - "description": "Optional. Information about the HTTP request associated with this log entry, if applicable." - }, - "jsonPayload": { - "type": "object", - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" - }, - "description": "The log entry payload, represented as a structure that is expressed as a JSON object." - }, - "operation": { - "$ref": "LogEntryOperation", - "description": "Optional. Information about an operation associated with the log entry, if applicable." - }, - "insertId": { - "type": "string", - "description": "Optional. A unique ID for the log entry. If you provide this field, the logging service considers other log entries in the same project with the same ID as duplicates which can be removed. If omitted, Stackdriver Logging will generate a unique ID for this log entry." - }, - "textPayload": { - "description": "The log entry payload, represented as a Unicode string (UTF-8).", - "type": "string" - }, - "protoPayload": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.", - "type": "object" - }, "labels": { - "type": "object", "additionalProperties": { "type": "string" }, - "description": "Optional. A set of user-defined (key, value) data that provides additional information about the log entry." - }, - "trace": { - "description": "Optional. Resource name of the trace associated with the log entry, if any. If it contains a relative resource name, the name is assumed to be relative to //tracing.googleapis.com. Example: projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824", - "type": "string" - }, - "severity": { - "type": "string", - "enumDescriptions": [ - "(0) The log entry has no assigned severity level.", - "(100) Debug or trace information.", - "(200) Routine information, such as ongoing status or performance.", - "(300) Normal but significant events, such as start up, shut down, or a configuration change.", - "(400) Warning events might cause problems.", - "(500) Error events are likely to cause problems.", - "(600) Critical events cause more severe problems or outages.", - "(700) A person must take an action immediately.", - "(800) One or more systems are unusable." - ], - "enum": [ - "DEFAULT", - "DEBUG", - "INFO", - "NOTICE", - "WARNING", - "ERROR", - "CRITICAL", - "ALERT", - "EMERGENCY" - ], - "description": "Optional. The severity of the log entry. The default value is LogSeverity.DEFAULT." - }, - "sourceLocation": { - "$ref": "LogEntrySourceLocation", - "description": "Optional. Source code location information associated with the log entry, if any." + "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Cloud SQL databases use the labels \"database_id\" and \"zone\".", + "type": "object" }, - "timestamp": { - "description": "Optional. The time the event described by the log entry occurred. If omitted, Stackdriver Logging will use the time the log entry is received.", - "format": "google-datetime", + "type": { + "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Cloud SQL database is \"cloudsql_database\".", "type": "string" } }, - "id": "LogEntry", - "description": "An individual entry in a log." - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", - "type": "object", - "properties": {}, - "id": "Empty" + "id": "MonitoredResource" }, - "SourceLocation": { - "description": "Specifies a location in a source code file.", + "LogSink": { + "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.", "type": "object", "properties": { - "file": { + "name": { + "description": "Required. The client-assigned sink identifier, unique within the project. Example: \"my-syslog-errors-to-pubsub\". Sink identifiers are limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, and periods.", + "type": "string" + }, + "filter": { "type": "string", - "description": "Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name." + "description": "Optional. An advanced logs filter. The only exported log entries are those that are in the resource owning the sink and that match the filter. The filter must use the log entry format specified by the output_version_format parameter. For example, in the v2 format:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n" }, - "functionName": { - "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information is used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", + "destination": { + "type": "string", + "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks." + }, + "endTime": { + "description": "Optional. The time at which this sink will stop exporting log entries. Log entries are exported only if their timestamp is earlier than the end time. If this field is not supplied, there is no end time. If both a start time and an end time are provided, then the end time must be later than the start time.", + "format": "google-datetime", "type": "string" }, - "line": { - "description": "Line within the source file.", - "format": "int64", + "startTime": { + "type": "string", + "description": "Optional. The time at which this sink will begin exporting log entries. Log entries are exported only if their timestamp is not earlier than the start time. The default value of this field is the time the sink is created or updated.", + "format": "google-datetime" + }, + "writerIdentity": { + "description": "Output only. An IAM identity—a service account or group—under which Stackdriver Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", + "type": "string" + }, + "outputVersionFormat": { + "enumDescriptions": [ + "An unspecified format version that will default to V2.", + "LogEntry version 2 format.", + "LogEntry version 1 format." + ], + "enum": [ + "VERSION_FORMAT_UNSPECIFIED", + "V2", + "V1" + ], + "description": "Optional. The log entry format to use for this sink's exported log entries. The v2 format is used by default. The v1 format is deprecated and should be used only as part of a migration effort to v2. See Migration to the v2 API.", "type": "string" } }, - "id": "SourceLocation" + "id": "LogSink" }, - "ListLogEntriesRequest": { + "WriteLogEntriesRequest": { + "id": "WriteLogEntriesRequest", + "description": "The parameters to WriteLogEntries.", "type": "object", "properties": { - "resourceNames": { - "description": "Required. Names of one or more resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nProjects listed in the project_ids field are added to this list.", + "logName": { + "type": "string", + "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\" or \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry." + }, + "entries": { + "description": "Required. The log entries to write. Values supplied for the fields log_name, resource, and labels in this entries.write request are added to those log entries that do not provide their own values for the fields.To improve throughput and to avoid exceeding the quota limit for calls to entries.write, you should write multiple log entries at once rather than calling this method for each individual log entry.", "type": "array", "items": { - "type": "string" + "$ref": "LogEntry" } }, - "filter": { - "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", + "partialSuccess": { + "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, the response status will be the error associated with one of the failed entries and include error details in the form of WriteLogEntriesPartialErrors.", + "type": "boolean" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Default labels that are added to the labels field of all log entries in entries. If a log entry already has a label with the same key as a label in this parameter, then the log entry's label is not changed. See LogEntry.", + "type": "object" + }, + "resource": { + "$ref": "MonitoredResource", + "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry." + } + } + }, + "ListLogsResponse": { + "description": "Result returned from ListLogs.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", "type": "string" }, - "projectIds": { - "description": "Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\". If present, these project identifiers are converted to resource name format and added to the list of resources in resource_names.", + "logNames": { + "description": "A list of log names. For example, \"projects/my-project/syslog\" or \"organizations/123/cloudresourcemanager.googleapis.com%2Factivity\".", "type": "array", "items": { "type": "string" } - }, - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - }, - "orderBy": { - "type": "string", - "description": "Optional. How the results should be sorted. Presently, the only permitted values are \"timestamp asc\" (default) and \"timestamp desc\". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of LogEntry.insertId." } }, - "id": "ListLogEntriesRequest", - "description": "The parameters to ListLogEntries." + "id": "ListLogsResponse" }, - "RequestLog": { + "HttpRequest": { + "description": "A common proto for logging HTTP requests. Only contains semantics defined by the HTTP specification. Product-specific logging information MUST be defined in a separate message.", "type": "object", "properties": { - "moduleId": { - "description": "Module of the application that handled this request.", + "latency": { + "description": "The request processing latency on the server, from the time the request was received until the response was sent.", + "format": "google-duration", "type": "string" }, - "endTime": { - "type": "string", - "description": "Time when the request finished.", - "format": "google-datetime" - }, "userAgent": { - "type": "string", - "description": "User agent that made the request." - }, - "wasLoadingRequest": { - "description": "Whether this was a loading request for the instance.", - "type": "boolean" - }, - "sourceReference": { - "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.", - "type": "array", - "items": { - "$ref": "SourceReference" - } + "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\".", + "type": "string" }, - "responseSize": { - "description": "Size in bytes sent back to client by request.", + "cacheFillBytes": { + "description": "The number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.", "format": "int64", "type": "string" }, - "traceId": { - "type": "string", - "description": "Stackdriver Trace identifier for this request." - }, - "line": { - "type": "array", - "items": { - "$ref": "LogLine" - }, - "description": "A list of log lines emitted by the application while serving this request." - }, - "taskQueueName": { - "description": "Queue name of the request, in the case of an offline request.", + "requestMethod": { + "description": "The request method. Examples: \"GET\", \"HEAD\", \"PUT\", \"POST\".", "type": "string" }, - "referrer": { - "type": "string", - "description": "Referrer URL of request." - }, - "requestId": { - "type": "string", - "description": "Globally unique identifier for a request, which is based on the request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier." - }, - "nickname": { - "description": "The logged-in user who made the request.Most likely, this is the part of the user's email before the @ sign. The field value is the same for different requests from the same user, but different users can have similar names. This information is also available to the application via the App Engine Users API.This field will be populated starting with App Engine 1.9.21.", + "responseSize": { + "description": "The size of the HTTP response message sent back to the client, in bytes, including the response headers and the response body.", + "format": "int64", "type": "string" }, - "pendingTime": { - "type": "string", - "description": "Time this request spent in the pending request queue.", - "format": "google-duration" - }, - "resource": { - "type": "string", - "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". The fragment identifier, which is identified by the # character, is not included." - }, - "status": { - "description": "HTTP response status code. Example: 200, 404.", - "format": "int32", - "type": "integer" - }, - "taskName": { - "type": "string", - "description": "Task name of the request, in the case of an offline request." - }, - "urlMapEntry": { - "description": "File or class that handled the request.", + "requestSize": { + "description": "The size of the HTTP request message in bytes, including the request headers and the request body.", + "format": "int64", "type": "string" }, - "instanceIndex": { - "description": "If the instance processing this request belongs to a manually scaled module, then this is the 0-based index of the instance. Otherwise, this value is -1.", - "format": "int32", - "type": "integer" - }, - "host": { - "type": "string", - "description": "Internet host and port number of the resource being requested." - }, - "finished": { - "type": "boolean", - "description": "Whether this request is finished or active." - }, - "httpVersion": { - "type": "string", - "description": "HTTP version of request. Example: \"HTTP/1.1\"." - }, - "startTime": { - "type": "string", - "description": "Time when the request started.", - "format": "google-datetime" - }, - "latency": { - "description": "Latency of the request.", - "format": "google-duration", + "requestUrl": { + "description": "The scheme (http, https), the host name, the path and the query portion of the URL that was requested. Example: \"http://example.com/some/info?color=red\".", "type": "string" }, - "ip": { - "type": "string", - "description": "Origin IP address." - }, - "appId": { - "type": "string", - "description": "Application that handled this request." - }, - "appEngineRelease": { - "description": "App Engine release version.", + "serverIp": { + "description": "The IP address (IPv4 or IPv6) of the origin server that the request was sent to.", "type": "string" }, - "method": { - "description": "Request method. Example: \"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\".", + "remoteIp": { + "description": "The IP address (IPv4 or IPv6) of the client that issued the HTTP request. Examples: \"192.168.1.1\", \"FE80::0202:B3FF:FE1E:8329\".", "type": "string" }, - "cost": { - "type": "number", - "description": "An indication of the relative cost of serving this request.", - "format": "double" - }, - "instanceId": { - "type": "string", - "description": "An identifier for the instance that handled the request." + "cacheLookup": { + "description": "Whether or not a cache lookup was attempted.", + "type": "boolean" }, - "megaCycles": { - "type": "string", - "description": "Number of CPU megacycles used to process request.", - "format": "int64" + "cacheHit": { + "description": "Whether or not an entity was served from cache (with or without validation).", + "type": "boolean" }, - "first": { - "description": "Whether this is the first RequestLog entry for this request. If an active request has several RequestLog entries written to Stackdriver Logging, then this field will be set for one of them.", + "cacheValidatedWithOriginServer": { + "description": "Whether or not the response was validated with the origin server before being served from cache. This field is only meaningful if cache_hit is True.", "type": "boolean" }, - "versionId": { - "description": "Version of the application that handled this request.", + "status": { + "description": "The response code indicating the status of response. Examples: 200, 404.", + "format": "int32", + "type": "integer" + }, + "referer": { + "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", "type": "string" } }, - "id": "RequestLog", - "description": "Complete log information about a single HTTP request to an App Engine application." + "id": "HttpRequest" }, - "ListMonitoredResourceDescriptorsResponse": { - "description": "Result returned from ListMonitoredResourceDescriptors.", - "type": "object", + "ListSinksResponse": { "properties": { "nextPageToken": { - "type": "string", - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken." + "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", + "type": "string" }, - "resourceDescriptors": { + "sinks": { + "description": "A list of sinks.", "type": "array", "items": { - "$ref": "MonitoredResourceDescriptor" - }, - "description": "A list of resource descriptors." + "$ref": "LogSink" + } } }, - "id": "ListMonitoredResourceDescriptorsResponse" + "id": "ListSinksResponse", + "description": "Result returned from ListSinks.", + "type": "object" }, - "SourceReference": { + "LabelDescriptor": { + "description": "A description of a label.", "type": "object", "properties": { - "repository": { - "type": "string", - "description": "Optional. A URI string identifying the repository. Example: \"https://github.com/GoogleCloudPlatform/kubernetes.git\"" + "key": { + "description": "The label key.", + "type": "string" }, - "revisionId": { - "type": "string", - "description": "The canonical and persistent identifier of the deployed revision. Example (git): \"0035781c50ec7aa23385dc841529ce8a4b70db1b\"" + "description": { + "description": "A human-readable description for the label.", + "type": "string" + }, + "valueType": { + "enumDescriptions": [ + "A variable-length string. This is the default.", + "Boolean; true or false.", + "A 64-bit signed integer." + ], + "enum": [ + "STRING", + "BOOL", + "INT64" + ], + "description": "The type of data that can be assigned to the label.", + "type": "string" } }, - "id": "SourceReference", - "description": "A reference to a particular snapshot of the source tree used to build and deploy an application." + "id": "LabelDescriptor" }, - "LogMetric": { - "description": "Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval.", + "MonitoredResourceDescriptor": { + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", "type": "object", "properties": { + "labels": { + "type": "array", + "items": { + "$ref": "LabelDescriptor" + }, + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\"." + }, "name": { - "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".", + "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", "type": "string" }, - "description": { - "type": "string", - "description": "Optional. A description of this metric, which is used in documentation." + "displayName": { + "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", + "type": "string" }, - "version": { - "type": "string", - "enumDescriptions": [ - "Stackdriver Logging API v2.", - "Stackdriver Logging API v1." - ], - "enum": [ - "V2", - "V1" - ], - "description": "Output only. The API version that created or updated this metric. The version also dictates the syntax of the filter expression. When a value for this field is missing, the default value of V2 should be assumed." + "description": { + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", + "type": "string" }, - "filter": { - "type": "string", - "description": "Required. An advanced logs filter which is used to match log entries. Example:\n\"resource.type=gae_app AND severity\u003e=ERROR\"\nThe maximum length of the filter is 20000 characters." + "type": { + "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", + "type": "string" } }, - "id": "LogMetric" + "id": "MonitoredResourceDescriptor" }, - "LogEntryOperation": { - "description": "Additional information about a potentially long-running operation with which a log entry is associated.", + "LogEntrySourceLocation": { + "description": "Additional information about the source code location that produced the log entry.", "type": "object", "properties": { - "last": { - "type": "boolean", - "description": "Optional. Set this to True if this is the last log entry in the operation." - }, - "id": { - "description": "Optional. An arbitrary operation identifier. Log entries with the same identifier are assumed to be part of the same operation.", + "file": { + "description": "Optional. Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", "type": "string" }, - "producer": { - "description": "Optional. An arbitrary producer identifier. The combination of id and producer must be globally unique. Examples for producer: \"MyDivision.MyBigCompany.com\", \"github.com/MyProject/MyApplication\".", + "function": { + "description": "Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", "type": "string" }, - "first": { - "type": "boolean", - "description": "Optional. Set this to True if this is the first log entry in the operation." + "line": { + "description": "Optional. Line within the source file. 1-based; 0 indicates no line number available.", + "format": "int64", + "type": "string" } }, - "id": "LogEntryOperation" + "id": "LogEntrySourceLocation" }, - "WriteLogEntriesResponse": { - "description": "Result returned from WriteLogEntries. empty", - "type": "object", - "properties": {}, - "id": "WriteLogEntriesResponse" + "ListLogEntriesResponse": { + "properties": { + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.If a value for next_page_token appears and the entries field is empty, it means that the search found no log entries so far but it did not have time to search all the possible log entries. Retry the method with this value for page_token to continue the search. Alternatively, consider speeding up the search by changing your filter to specify a single log name or resource type, or to narrow the time range of the search.", + "type": "string" + }, + "entries": { + "description": "A list of log entries.", + "type": "array", + "items": { + "$ref": "LogEntry" + } + } + }, + "id": "ListLogEntriesResponse", + "description": "Result returned from ListLogEntries.", + "type": "object" } }, "icons": { @@ -800,56 +809,12 @@ "title": "Stackdriver Logging API", "ownerName": "Google", "resources": { - "entries": { - "methods": { - "list": { - "response": { - "$ref": "ListLogEntriesResponse" - }, - "parameterOrder": [], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": {}, - "flatPath": "v2/entries:list", - "path": "v2/entries:list", - "id": "logging.entries.list", - "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.", - "request": { - "$ref": "ListLogEntriesRequest" - } - }, - "write": { - "flatPath": "v2/entries:write", - "path": "v2/entries:write", - "id": "logging.entries.write", - "description": "Writes log entries to Stackdriver Logging. All log entries are written by this method.", - "request": { - "$ref": "WriteLogEntriesRequest" - }, - "response": { - "$ref": "WriteLogEntriesResponse" - }, - "parameterOrder": [], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "parameters": {} - } - } - }, - "projects": { + "organizations": { "resources": { "logs": { "methods": { "delete": { + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", "response": { "$ref": "Empty" }, @@ -859,48 +824,50 @@ "httpMethod": "DELETE", "parameters": { "logName": { - "location": "path", - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", "required": true, "type": "string", - "pattern": "^projects/[^/]+/logs/[^/]+$" + "pattern": "^organizations/[^/]+/logs/[^/]+$", + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], - "flatPath": "v2/projects/{projectsId}/logs/{logsId}", + "flatPath": "v2/organizations/{organizationsId}/logs/{logsId}", "path": "v2/{+logName}", - "id": "logging.projects.logs.delete", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries." + "id": "logging.organizations.logs.delete" }, "list": { + "id": "logging.organizations.logs.list", + "path": "v2/{+parent}/logs", + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], "response": { "$ref": "ListLogsResponse" }, + "parameterOrder": [ + "parent" + ], "parameters": { + "parent": { + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$", + "location": "path" + }, "pageToken": { - "location": "query", "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" + "type": "string", + "location": "query" }, "pageSize": { + "type": "integer", "location": "query", "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - }, - "parent": { - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n" + "format": "int32" } }, "scopes": [ @@ -909,24 +876,50 @@ "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read" ], - "flatPath": "v2/projects/{projectsId}/logs", - "id": "logging.projects.logs.list", - "path": "v2/{+parent}/logs", - "description": "Lists the logs in projects or organizations. Only logs that have entries are listed." + "flatPath": "v2/organizations/{organizationsId}/logs" } } }, "sinks": { "methods": { + "delete": { + "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", + "parameterOrder": [ + "sinkName" + ], + "response": { + "$ref": "Empty" + }, + "httpMethod": "DELETE", + "parameters": { + "sinkName": { + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+/sinks/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", + "path": "v2/{+sinkName}", + "id": "logging.organizations.sinks.delete" + }, "list": { + "flatPath": "v2/organizations/{organizationsId}/sinks", + "id": "logging.organizations.sinks.list", + "path": "v2/{+parent}/sinks", "description": "Lists sinks.", "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], "response": { "$ref": "ListSinksResponse" }, + "parameterOrder": [ + "parent" + ], "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", @@ -935,204 +928,293 @@ ], "parameters": { "parent": { + "pattern": "^organizations/[^/]+$", "location": "path", - "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", "required": true, - "type": "string", - "pattern": "^projects/[^/]+$" + "type": "string" }, "pageToken": { - "type": "string", "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" }, "pageSize": { - "type": "integer", - "location": "query", "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" + "format": "int32", + "type": "integer", + "location": "query" } - }, - "flatPath": "v2/projects/{projectsId}/sinks", - "id": "logging.projects.sinks.list", - "path": "v2/{+parent}/sinks" + } }, "get": { - "httpMethod": "GET", - "parameterOrder": [ - "sinkName" - ], + "description": "Gets a sink.", "response": { "$ref": "LogSink" }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" + "parameterOrder": [ + "sinkName" ], + "httpMethod": "GET", "parameters": { "sinkName": { "location": "path", - "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, "type": "string", - "pattern": "^projects/[^/]+/sinks/[^/]+$" + "pattern": "^organizations/[^/]+/sinks/[^/]+$" } }, - "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}", - "id": "logging.projects.sinks.get", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", "path": "v2/{+sinkName}", - "description": "Gets a sink." + "id": "logging.organizations.sinks.get" }, "update": { - "response": { + "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", + "request": { "$ref": "LogSink" }, + "httpMethod": "PUT", "parameterOrder": [ "sinkName" ], - "httpMethod": "PUT", + "response": { + "$ref": "LogSink" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], "parameters": { "sinkName": { "location": "path", - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, "type": "string", - "pattern": "^projects/[^/]+/sinks/[^/]+$" + "pattern": "^organizations/[^/]+/sinks/[^/]+$" }, "uniqueWriterIdentity": { - "location": "query", - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", - "type": "boolean" + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", + "type": "boolean", + "location": "query" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.projects.sinks.update", - "request": { - "$ref": "LogSink" - }, - "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field." + "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", + "id": "logging.organizations.sinks.update", + "path": "v2/{+sinkName}" }, "create": { - "request": { - "$ref": "LogSink" - }, - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - "response": { - "$ref": "LogSink" - }, + "httpMethod": "POST", "parameterOrder": [ "parent" ], - "httpMethod": "POST", + "response": { + "$ref": "LogSink" + }, "parameters": { "parent": { + "pattern": "^organizations/[^/]+$", "location": "path", - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", "required": true, - "type": "string", - "pattern": "^projects/[^/]+$" + "type": "string" }, "uniqueWriterIdentity": { - "location": "query", - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", - "type": "boolean" + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "type": "boolean", + "location": "query" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], - "flatPath": "v2/projects/{projectsId}/sinks", + "flatPath": "v2/organizations/{organizationsId}/sinks", + "id": "logging.organizations.sinks.create", "path": "v2/{+parent}/sinks", - "id": "logging.projects.sinks.create" - }, + "request": { + "$ref": "LogSink" + }, + "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink." + } + } + } + } + }, + "entries": { + "methods": { + "list": { + "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.", + "request": { + "$ref": "ListLogEntriesRequest" + }, + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "ListLogEntriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "parameters": {}, + "flatPath": "v2/entries:list", + "id": "logging.entries.list", + "path": "v2/entries:list" + }, + "write": { + "description": "Writes log entries to Stackdriver Logging. All log entries are written by this method.", + "request": { + "$ref": "WriteLogEntriesRequest" + }, + "response": { + "$ref": "WriteLogEntriesResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "parameters": {}, + "flatPath": "v2/entries:write", + "path": "v2/entries:write", + "id": "logging.entries.write" + } + } + }, + "projects": { + "resources": { + "logs": { + "methods": { "delete": { - "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", + "flatPath": "v2/projects/{projectsId}/logs/{logsId}", + "id": "logging.projects.logs.delete", + "path": "v2/{+logName}", + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", + "httpMethod": "DELETE", "response": { "$ref": "Empty" }, "parameterOrder": [ - "sinkName" + "logName" ], - "httpMethod": "DELETE", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], "parameters": { - "sinkName": { + "logName": { + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/logs/[^/]+$", + "location": "path" + } + } + }, + "list": { + "parameters": { + "parent": { "location": "path", - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", "required": true, "type": "string", - "pattern": "^projects/[^/]+/sinks/[^/]+$" + "pattern": "^projects/[^/]+$" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" } }, - "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.projects.sinks.delete" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2/projects/{projectsId}/logs", + "path": "v2/{+parent}/logs", + "id": "logging.projects.logs.list", + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListLogsResponse" + }, + "httpMethod": "GET" } } }, - "metrics": { + "sinks": { "methods": { "delete": { + "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}", + "path": "v2/{+sinkName}", + "id": "logging.projects.sinks.delete", + "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", "response": { "$ref": "Empty" }, "parameterOrder": [ - "metricName" + "sinkName" ], "httpMethod": "DELETE", "parameters": { - "metricName": { - "location": "path", - "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + "sinkName": { + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, "type": "string", - "pattern": "^projects/[^/]+/metrics/[^/]+$" + "pattern": "^projects/[^/]+/sinks/[^/]+$", + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}", - "path": "v2/{+metricName}", - "id": "logging.projects.metrics.delete", - "description": "Deletes a logs-based metric." + "https://www.googleapis.com/auth/logging.admin" + ] }, "list": { - "httpMethod": "GET", + "description": "Lists sinks.", + "response": { + "$ref": "ListSinksResponse" + }, "parameterOrder": [ "parent" ], - "response": { - "$ref": "ListLogMetricsResponse" - }, + "httpMethod": "GET", "parameters": { "pageToken": { + "type": "string", "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." }, "pageSize": { - "location": "query", "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", - "type": "integer" + "type": "integer", + "location": "query" }, "parent": { "location": "path", - "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", + "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", "required": true, "type": "string", "pattern": "^projects/[^/]+$" @@ -1144,253 +1226,185 @@ "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read" ], - "flatPath": "v2/projects/{projectsId}/metrics", - "id": "logging.projects.metrics.list", - "path": "v2/{+parent}/metrics", - "description": "Lists logs-based metrics." + "flatPath": "v2/projects/{projectsId}/sinks", + "path": "v2/{+parent}/sinks", + "id": "logging.projects.sinks.list" }, "get": { - "description": "Gets a logs-based metric.", + "httpMethod": "GET", "response": { - "$ref": "LogMetric" + "$ref": "LogSink" }, "parameterOrder": [ - "metricName" + "sinkName" ], - "httpMethod": "GET", - "parameters": { - "metricName": { - "location": "path", - "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/metrics/[^/]+$" - } - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read" ], - "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}", - "path": "v2/{+metricName}", - "id": "logging.projects.metrics.get" - }, - "update": { - "request": { - "$ref": "LogMetric" - }, - "description": "Creates or updates a logs-based metric.", - "httpMethod": "PUT", - "parameterOrder": [ - "metricName" - ], - "response": { - "$ref": "LogMetric" - }, - "parameters": { - "metricName": { - "location": "path", - "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/metrics/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}", - "id": "logging.projects.metrics.update", - "path": "v2/{+metricName}" - }, - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "LogMetric" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], "parameters": { - "parent": { + "sinkName": { + "pattern": "^projects/[^/]+/sinks/[^/]+$", "location": "path", - "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", + "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, - "type": "string", - "pattern": "^projects/[^/]+$" + "type": "string" } }, - "flatPath": "v2/projects/{projectsId}/metrics", - "id": "logging.projects.metrics.create", - "path": "v2/{+parent}/metrics", - "description": "Creates a logs-based metric.", - "request": { - "$ref": "LogMetric" - } - } - } - } - } - }, - "billingAccounts": { - "resources": { - "logs": { - "methods": { - "delete": { - "flatPath": "v2/billingAccounts/{billingAccountsId}/logs/{logsId}", - "path": "v2/{+logName}", - "id": "logging.billingAccounts.logs.delete", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", - "response": { - "$ref": "Empty" - }, + "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}", + "id": "logging.projects.sinks.get", + "path": "v2/{+sinkName}", + "description": "Gets a sink." + }, + "update": { + "id": "logging.projects.sinks.update", + "path": "v2/{+sinkName}", + "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", + "request": { + "$ref": "LogSink" + }, + "httpMethod": "PUT", "parameterOrder": [ - "logName" + "sinkName" ], - "httpMethod": "DELETE", + "response": { + "$ref": "LogSink" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], "parameters": { - "logName": { + "sinkName": { + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, "type": "string", - "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", - "location": "path", - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry." + "pattern": "^projects/[^/]+/sinks/[^/]+$", + "location": "path" + }, + "uniqueWriterIdentity": { + "location": "query", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", + "type": "boolean" } - } - }, - "list": { - "flatPath": "v2/billingAccounts/{billingAccountsId}/logs", - "path": "v2/{+parent}/logs", - "id": "logging.billingAccounts.logs.list", - "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", - "response": { - "$ref": "ListLogsResponse" }, + "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}" + }, + "create": { + "httpMethod": "POST", "parameterOrder": [ "parent" ], - "httpMethod": "GET", + "response": { + "$ref": "LogSink" + }, "parameters": { - "pageToken": { - "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." - }, - "pageSize": { - "type": "integer", - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" - }, "parent": { + "pattern": "^projects/[^/]+$", "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", "required": true, - "type": "string", - "pattern": "^billingAccounts/[^/]+$" + "type": "string" + }, + "uniqueWriterIdentity": { + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "type": "boolean", + "location": "query" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ] + "https://www.googleapis.com/auth/logging.admin" + ], + "flatPath": "v2/projects/{projectsId}/sinks", + "id": "logging.projects.sinks.create", + "path": "v2/{+parent}/sinks", + "request": { + "$ref": "LogSink" + }, + "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink." } } }, - "sinks": { + "metrics": { "methods": { "delete": { + "httpMethod": "DELETE", + "parameterOrder": [ + "metricName" + ], "response": { "$ref": "Empty" }, - "parameterOrder": [ - "sinkName" - ], - "httpMethod": "DELETE", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" ], "parameters": { - "sinkName": { + "metricName": { + "location": "path", + "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", "required": true, "type": "string", - "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", - "location": "path", - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist." + "pattern": "^projects/[^/]+/metrics/[^/]+$" } }, - "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.billingAccounts.sinks.delete", - "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted." + "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}", + "id": "logging.projects.metrics.delete", + "path": "v2/{+metricName}", + "description": "Deletes a logs-based metric." }, "list": { - "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", - "id": "logging.billingAccounts.sinks.list", - "path": "v2/{+parent}/sinks", - "description": "Lists sinks.", - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListSinksResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], "parameters": { "parent": { - "required": true, - "type": "string", - "pattern": "^billingAccounts/[^/]+$", + "pattern": "^projects/[^/]+$", "location": "path", - "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\"." + "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", + "required": true, + "type": "string" }, "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." + "location": "query" }, "pageSize": { - "type": "integer", "location": "query", "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" + "format": "int32", + "type": "integer" } - } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2/projects/{projectsId}/metrics", + "id": "logging.projects.metrics.list", + "path": "v2/{+parent}/metrics", + "description": "Lists logs-based metrics.", + "httpMethod": "GET", + "response": { + "$ref": "ListLogMetricsResponse" + }, + "parameterOrder": [ + "parent" + ] }, "get": { - "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.billingAccounts.sinks.get", - "description": "Gets a sink.", + "description": "Gets a logs-based metric.", + "httpMethod": "GET", "response": { - "$ref": "LogSink" + "$ref": "LogMetric" }, "parameterOrder": [ - "sinkName" + "metricName" ], - "httpMethod": "GET", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", @@ -1398,155 +1412,150 @@ "https://www.googleapis.com/auth/logging.read" ], "parameters": { - "sinkName": { + "metricName": { + "pattern": "^projects/[^/]+/metrics/[^/]+$", "location": "path", - "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", "required": true, - "type": "string", - "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$" + "type": "string" } - } + }, + "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}", + "id": "logging.projects.metrics.get", + "path": "v2/{+metricName}" }, "update": { - "request": { - "$ref": "LogSink" + "response": { + "$ref": "LogMetric" }, - "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", - "httpMethod": "PUT", "parameterOrder": [ - "sinkName" + "metricName" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" ], - "response": { - "$ref": "LogSink" - }, "parameters": { - "sinkName": { + "metricName": { + "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", "required": true, "type": "string", - "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", - "location": "path", - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\"." - }, - "uniqueWriterIdentity": { - "location": "query", - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", - "type": "boolean" + "pattern": "^projects/[^/]+/metrics/[^/]+$", + "location": "path" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", - "id": "logging.billingAccounts.sinks.update", - "path": "v2/{+sinkName}" + "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}", + "path": "v2/{+metricName}", + "id": "logging.projects.metrics.update", + "description": "Creates or updates a logs-based metric.", + "request": { + "$ref": "LogMetric" + } }, "create": { - "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", - "path": "v2/{+parent}/sinks", - "id": "logging.billingAccounts.sinks.create", - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - "request": { - "$ref": "LogSink" - }, "response": { - "$ref": "LogSink" + "$ref": "LogMetric" }, "parameterOrder": [ "parent" ], "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], "parameters": { "parent": { "location": "path", - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", "required": true, "type": "string", - "pattern": "^billingAccounts/[^/]+$" - }, - "uniqueWriterIdentity": { - "location": "query", - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", - "type": "boolean" + "pattern": "^projects/[^/]+$" } - } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "flatPath": "v2/projects/{projectsId}/metrics", + "path": "v2/{+parent}/metrics", + "id": "logging.projects.metrics.create", + "request": { + "$ref": "LogMetric" + }, + "description": "Creates a logs-based metric." } } } } }, - "folders": { + "billingAccounts": { "resources": { "logs": { "methods": { "delete": { - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + "httpMethod": "DELETE", "response": { "$ref": "Empty" }, "parameterOrder": [ "logName" ], - "httpMethod": "DELETE", - "parameters": { - "logName": { - "location": "path", - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string", - "pattern": "^folders/[^/]+/logs/[^/]+$" - } - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], - "flatPath": "v2/folders/{foldersId}/logs/{logsId}", + "parameters": { + "logName": { + "required": true, + "type": "string", + "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", + "location": "path", + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry." + } + }, + "flatPath": "v2/billingAccounts/{billingAccountsId}/logs/{logsId}", + "id": "logging.billingAccounts.logs.delete", "path": "v2/{+logName}", - "id": "logging.folders.logs.delete" + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted." }, "list": { - "flatPath": "v2/folders/{foldersId}/logs", - "id": "logging.folders.logs.list", - "path": "v2/{+parent}/logs", - "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", - "httpMethod": "GET", "parameterOrder": [ "parent" ], "response": { "$ref": "ListLogsResponse" }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], + "httpMethod": "GET", "parameters": { + "parent": { + "pattern": "^billingAccounts/[^/]+$", + "location": "path", + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string" + }, "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." + "location": "query" }, "pageSize": { - "type": "integer", "location": "query", "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" - }, - "parent": { - "required": true, - "type": "string", - "pattern": "^folders/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n" + "format": "int32", + "type": "integer" } - } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2/billingAccounts/{billingAccountsId}/logs", + "path": "v2/{+parent}/logs", + "id": "logging.billingAccounts.logs.list", + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed." } } }, @@ -1554,105 +1563,98 @@ "methods": { "delete": { "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", - "httpMethod": "DELETE", - "parameterOrder": [ - "sinkName" - ], "response": { "$ref": "Empty" }, + "parameterOrder": [ + "sinkName" + ], + "httpMethod": "DELETE", "parameters": { "sinkName": { "required": true, "type": "string", - "pattern": "^folders/[^/]+/sinks/[^/]+$", + "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", "location": "path", - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist." + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\"." } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], - "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", - "id": "logging.folders.sinks.delete", - "path": "v2/{+sinkName}" + "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", + "path": "v2/{+sinkName}", + "id": "logging.billingAccounts.sinks.delete" }, "list": { + "description": "Lists sinks.", + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], "response": { "$ref": "ListSinksResponse" }, - "parameterOrder": [ - "parent" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" ], - "httpMethod": "GET", "parameters": { "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." + "location": "query" }, "pageSize": { - "type": "integer", - "location": "query", "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" + "format": "int32", + "type": "integer", + "location": "query" }, "parent": { - "required": true, - "type": "string", - "pattern": "^folders/[^/]+$", + "pattern": "^billingAccounts/[^/]+$", "location": "path", - "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\"." + "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2/folders/{foldersId}/sinks", - "path": "v2/{+parent}/sinks", - "id": "logging.folders.sinks.list", - "description": "Lists sinks." + "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", + "id": "logging.billingAccounts.sinks.list", + "path": "v2/{+parent}/sinks" }, "get": { - "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.folders.sinks.get", - "description": "Gets a sink.", "response": { "$ref": "LogSink" }, + "httpMethod": "GET", "parameterOrder": [ "sinkName" ], - "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], "parameters": { "sinkName": { + "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, "type": "string", - "pattern": "^folders/[^/]+/sinks/[^/]+$", - "location": "path", - "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\"." + "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", + "location": "path" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ] + "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", + "id": "logging.billingAccounts.sinks.get", + "path": "v2/{+sinkName}", + "description": "Gets a sink." }, "update": { - "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.folders.sinks.update", - "request": { - "$ref": "LogSink" - }, - "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", "response": { "$ref": "LogSink" }, @@ -1660,177 +1662,82 @@ "sinkName" ], "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], "parameters": { "sinkName": { + "location": "path", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, "type": "string", - "pattern": "^folders/[^/]+/sinks/[^/]+$", - "location": "path", - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\"." + "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$" }, "uniqueWriterIdentity": { "location": "query", - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", "type": "boolean" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ] + "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", + "path": "v2/{+sinkName}", + "id": "logging.billingAccounts.sinks.update", + "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", + "request": { + "$ref": "LogSink" + } }, "create": { - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], "response": { "$ref": "LogSink" }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], "parameters": { "parent": { - "location": "path", - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", "required": true, "type": "string", - "pattern": "^folders/[^/]+$" + "pattern": "^billingAccounts/[^/]+$", + "location": "path" }, "uniqueWriterIdentity": { + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", "type": "boolean", - "location": "query", - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink." + "location": "query" } }, - "flatPath": "v2/folders/{foldersId}/sinks", - "id": "logging.folders.sinks.create", + "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", "path": "v2/{+parent}/sinks", + "id": "logging.billingAccounts.sinks.create", "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", "request": { "$ref": "LogSink" } } } - } - } - }, - "monitoredResourceDescriptors": { - "methods": { - "list": { - "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging.", - "response": { - "$ref": "ListMonitoredResourceDescriptorsResponse" - }, - "parameterOrder": [], - "httpMethod": "GET", - "parameters": { - "pageToken": { - "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." - }, - "pageSize": { - "type": "integer", - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2/monitoredResourceDescriptors", - "path": "v2/monitoredResourceDescriptors", - "id": "logging.monitoredResourceDescriptors.list" - } - } - }, - "organizations": { - "resources": { - "logs": { - "methods": { - "list": { - "flatPath": "v2/organizations/{organizationsId}/logs", - "id": "logging.organizations.logs.list", - "path": "v2/{+parent}/logs", - "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListLogsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "pageToken": { - "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." - }, - "pageSize": { - "type": "integer", - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" - }, - "parent": { - "required": true, - "type": "string", - "pattern": "^organizations/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n" - } - } - }, - "delete": { - "flatPath": "v2/organizations/{organizationsId}/logs/{logsId}", - "id": "logging.organizations.logs.delete", - "path": "v2/{+logName}", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", - "httpMethod": "DELETE", - "parameterOrder": [ - "logName" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "logName": { - "location": "path", - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string", - "pattern": "^organizations/[^/]+/logs/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ] - } - } - }, - "sinks": { + } + } + }, + "folders": { + "resources": { + "logs": { "methods": { "list": { - "response": { - "$ref": "ListSinksResponse" - }, + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", "parameterOrder": [ "parent" ], + "response": { + "$ref": "ListLogsResponse" + }, "httpMethod": "GET", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -1840,11 +1747,11 @@ ], "parameters": { "parent": { + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", "required": true, "type": "string", - "pattern": "^organizations/[^/]+$", - "location": "path", - "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\"." + "pattern": "^folders/[^/]+$", + "location": "path" }, "pageToken": { "location": "query", @@ -1852,50 +1759,51 @@ "type": "string" }, "pageSize": { - "type": "integer", - "location": "query", "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32" + "format": "int32", + "type": "integer", + "location": "query" } }, - "flatPath": "v2/organizations/{organizationsId}/sinks", - "path": "v2/{+parent}/sinks", - "id": "logging.organizations.sinks.list", - "description": "Lists sinks." + "flatPath": "v2/folders/{foldersId}/logs", + "path": "v2/{+parent}/logs", + "id": "logging.folders.logs.list" }, - "get": { + "delete": { + "flatPath": "v2/folders/{foldersId}/logs/{logsId}", + "path": "v2/{+logName}", + "id": "logging.folders.logs.delete", + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", "response": { - "$ref": "LogSink" + "$ref": "Empty" }, "parameterOrder": [ - "sinkName" + "logName" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" ], - "httpMethod": "GET", "parameters": { - "sinkName": { - "location": "path", - "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "logName": { + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", "required": true, "type": "string", - "pattern": "^organizations/[^/]+/sinks/[^/]+$" + "pattern": "^folders/[^/]+/logs/[^/]+$", + "location": "path" } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.organizations.sinks.get", - "description": "Gets a sink." - }, + } + } + } + }, + "sinks": { + "methods": { "update": { + "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", "request": { "$ref": "LogSink" }, - "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", "response": { "$ref": "LogSink" }, @@ -1903,27 +1811,27 @@ "sinkName" ], "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], "parameters": { "sinkName": { + "pattern": "^folders/[^/]+/sinks/[^/]+$", "location": "path", - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, - "type": "string", - "pattern": "^organizations/[^/]+/sinks/[^/]+$" + "type": "string" }, "uniqueWriterIdentity": { + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", "type": "boolean", - "location": "query", - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false." + "location": "query" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", + "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", "path": "v2/{+sinkName}", - "id": "logging.organizations.sinks.update" + "id": "logging.folders.sinks.update" }, "create": { "request": { @@ -1939,86 +1847,202 @@ "httpMethod": "POST", "parameters": { "parent": { - "location": "path", - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", "required": true, "type": "string", - "pattern": "^organizations/[^/]+$" + "pattern": "^folders/[^/]+$", + "location": "path" }, "uniqueWriterIdentity": { "type": "boolean", "location": "query", - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink." + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink." } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ], - "flatPath": "v2/organizations/{organizationsId}/sinks", + "flatPath": "v2/folders/{foldersId}/sinks", "path": "v2/{+parent}/sinks", - "id": "logging.organizations.sinks.create" + "id": "logging.folders.sinks.create" }, "delete": { - "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", - "path": "v2/{+sinkName}", - "id": "logging.organizations.sinks.delete", "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", + "httpMethod": "DELETE", + "parameterOrder": [ + "sinkName" + ], "response": { "$ref": "Empty" }, + "parameters": { + "sinkName": { + "location": "path", + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^folders/[^/]+/sinks/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", + "id": "logging.folders.sinks.delete", + "path": "v2/{+sinkName}" + }, + "list": { + "description": "Lists sinks.", + "response": { + "$ref": "ListSinksResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "parameters": { + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "parent": { + "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^folders/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2/folders/{foldersId}/sinks", + "path": "v2/{+parent}/sinks", + "id": "logging.folders.sinks.list" + }, + "get": { + "response": { + "$ref": "LogSink" + }, "parameterOrder": [ "sinkName" ], - "httpMethod": "DELETE", + "httpMethod": "GET", "parameters": { "sinkName": { "location": "path", - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", + "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", "required": true, "type": "string", - "pattern": "^organizations/[^/]+/sinks/[^/]+$" + "pattern": "^folders/[^/]+/sinks/[^/]+$" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ] + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", + "path": "v2/{+sinkName}", + "id": "logging.folders.sinks.get", + "description": "Gets a sink." } } } } + }, + "monitoredResourceDescriptors": { + "methods": { + "list": { + "httpMethod": "GET", + "response": { + "$ref": "ListMonitoredResourceDescriptorsResponse" + }, + "parameterOrder": [], + "parameters": { + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2/monitoredResourceDescriptors", + "id": "logging.monitoredResourceDescriptors.list", + "path": "v2/monitoredResourceDescriptors", + "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging." + } + } } }, "parameters": { - "upload_protocol": { + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "bearer_token": { + "type": "string", "location": "query", + "description": "OAuth bearer token." + }, + "upload_protocol": { "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string" + "type": "string", + "location": "query" }, "prettyPrint": { - "location": "query", "description": "Returns response with indentations and line breaks.", "type": "boolean", - "default": "true" + "default": "true", + "location": "query" }, "fields": { - "type": "string", "location": "query", - "description": "Selector specifying which fields to include in a partial response." + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, "uploadType": { - "type": "string", - "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\")." - }, - "callback": { "location": "query", - "description": "JSONP", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", "type": "string" }, "$.xgafv": { - "location": "query", "enum": [ "1", "2" @@ -2028,15 +2052,15 @@ "enumDescriptions": [ "v1 error format", "v2 error format" - ] - }, - "alt": { - "enum": [ - "json", - "media", - "proto" ], + "location": "query" + }, + "callback": { + "description": "JSONP", "type": "string", + "location": "query" + }, + "alt": { "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", @@ -2044,47 +2068,23 @@ ], "location": "query", "description": "Data format for response.", - "default": "json" - }, - "key": { - "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], "type": "string" }, "access_token": { - "type": "string", - "location": "query", - "description": "OAuth access token." - }, - "quotaUser": { - "type": "string", - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." - }, - "pp": { - "location": "query", - "description": "Pretty-print response.", - "type": "boolean", - "default": "true" - }, - "bearer_token": { "location": "query", - "description": "OAuth bearer token.", + "description": "OAuth access token.", "type": "string" }, - "oauth_token": { - "type": "string", + "key": { "location": "query", - "description": "OAuth 2.0 token for the current user." + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" } - }, - "version": "v2", - "baseUrl": "https://logging.googleapis.com/", - "description": "Writes log entries and manages your Stackdriver Logging configuration.", - "kind": "discovery#restDescription", - "servicePath": "", - "basePath": "", - "documentationLink": "https://cloud.google.com/logging/docs/", - "revision": "20170114", - "id": "logging:v2" + } } diff --git a/vendor/google.golang.org/api/logging/v2/logging-gen.go b/vendor/google.golang.org/api/logging/v2/logging-gen.go index 36604524d..9924b6344 100644 --- a/vendor/google.golang.org/api/logging/v2/logging-gen.go +++ b/vendor/google.golang.org/api/logging/v2/logging-gen.go @@ -78,9 +78,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BillingAccounts *BillingAccountsService @@ -102,6 +103,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBillingAccountsService(s *Service) *BillingAccountsService { rs := &BillingAccountsService{s: s} rs.Logs = NewBillingAccountsLogsService(s) @@ -443,13 +448,15 @@ type ListLogEntriesRequest struct { // list of resources in resource_names. ProjectIds []string `json:"projectIds,omitempty"` - // ResourceNames: Required. Names of one or more resources from which to - // retrieve log + // ResourceNames: Required. Names of one or more parent resources from + // which to retrieve log // entries: // "projects/[PROJECT_ID]" // "organizations/[ORGANIZATION_ID]" - // Pro - // jects listed in the project_ids field are added to this list. + // "bi + // llingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + // Projects listed in the project_ids field are added to this list. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "Filter") to @@ -701,6 +708,9 @@ type LogEntry struct { // "projects/[PROJECT_ID]/logs/[LOG_ID]" // "organizations/[ORGANIZ // ATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[L + // OG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" // [LOG_ID] must be URL-encoded within log_name. Example: // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa // ctivity". [LOG_ID] must be less than 512 characters long and can only @@ -993,7 +1003,7 @@ func (s *LogMetric) MarshalJSON() ([]byte, error) { // following destinations in any project: a Cloud Storage bucket, a // BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls // which log entries are exported. The sink must be created within a -// project or organization. +// project, organization, billing account, or folder. type LogSink struct { // Destination: Required. The export // destination: @@ -1454,6 +1464,9 @@ type WriteLogEntriesRequest struct { // "projects/[PROJECT_ID]/logs/[LOG_ID]" // "organizations/[ORGANI // ZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[ + // LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" // [LOG_ID] must be URL-encoded. For example, // "projects/my-project-id/logs/syslog" or // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa @@ -1519,7 +1532,8 @@ type BillingAccountsLogsDeleteCall struct { } // Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. func (r *BillingAccountsLogsService) Delete(logName string) *BillingAccountsLogsDeleteCall { c := &BillingAccountsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.logName = logName @@ -1557,6 +1571,7 @@ func (c *BillingAccountsLogsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") @@ -1607,7 +1622,7 @@ func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", // "flatPath": "v2/billingAccounts/{billingAccountsId}/logs/{logsId}", // "httpMethod": "DELETE", // "id": "logging.billingAccounts.logs.delete", @@ -1616,7 +1631,7 @@ func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", // "required": true, @@ -1646,8 +1661,8 @@ type BillingAccountsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects or organizations. Only logs that -// have entries are listed. +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall { c := &BillingAccountsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -1714,6 +1729,7 @@ func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1767,7 +1783,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog } return ret, nil // { - // "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", // "flatPath": "v2/billingAccounts/{billingAccountsId}/logs", // "httpMethod": "GET", // "id": "logging.billingAccounts.logs.list", @@ -1787,7 +1803,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -1857,11 +1873,11 @@ func (r *BillingAccountsSinksService) Create(parent string, logsink *LogSink) *B // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set // to false, and if the sink's parent is a project, then the value -// returned as writer_identity is cloud-logs@google.com, the same -// identity used before the addition of writer identities to this API. -// The sink's destination must be in the same project as the sink -// itself.If this field is set to true, or if the sink is owned by a -// non-project resource such as an organization, then the value of +// returned as writer_identity is the same group or service account used +// by Stackdriver Logging before the addition of writer identities to +// this API. The sink's destination must be in the same project as the +// sink itself.If this field is set to true, or if the sink is owned by +// a non-project resource such as an organization, then the value of // writer_identity will be a unique service account used only for // exports from the new sink. For more information, see writer_identity // in LogSink. @@ -1901,6 +1917,7 @@ func (c *BillingAccountsSinksCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -1965,14 +1982,14 @@ func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogS // ], // "parameters": { // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", // "location": "query", // "type": "boolean" // } @@ -2041,6 +2058,7 @@ func (c *BillingAccountsSinksDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") @@ -2100,7 +2118,7 @@ func (c *BillingAccountsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empt // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", // "required": true, @@ -2178,6 +2196,7 @@ func (c *BillingAccountsSinksGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2240,7 +2259,7 @@ func (c *BillingAccountsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink // ], // "parameters": { // "sinkName": { - // "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", // "required": true, @@ -2339,6 +2358,7 @@ func (c *BillingAccountsSinksListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2412,7 +2432,7 @@ func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSi // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -2486,9 +2506,9 @@ func (r *BillingAccountsSinksService) Update(sinkNameid string, logsink *LogSink // values of this field: // If the old and new values of this field are both false or both true, // then there is no change to the sink's writer_identity. -// If the old value was false and the new value is true, then +// If the old value is false and the new value is true, then // writer_identity is changed to a unique service account. -// It is an error if the old value was true and the new value is false. +// It is an error if the old value is true and the new value is false. func (c *BillingAccountsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksUpdateCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -2525,6 +2545,7 @@ func (c *BillingAccountsSinksUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -2589,14 +2610,14 @@ func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogS // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", // "location": "query", // "type": "boolean" // } @@ -2666,6 +2687,7 @@ func (c *EntriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.listlogentriesrequest) if err != nil { @@ -2811,6 +2833,7 @@ func (c *EntriesWriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest) if err != nil { @@ -2896,7 +2919,8 @@ type FoldersLogsDeleteCall struct { } // Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. func (r *FoldersLogsService) Delete(logName string) *FoldersLogsDeleteCall { c := &FoldersLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.logName = logName @@ -2934,6 +2958,7 @@ func (c *FoldersLogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") @@ -2984,7 +3009,7 @@ func (c *FoldersLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", // "flatPath": "v2/folders/{foldersId}/logs/{logsId}", // "httpMethod": "DELETE", // "id": "logging.folders.logs.delete", @@ -2993,7 +3018,7 @@ func (c *FoldersLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^folders/[^/]+/logs/[^/]+$", // "required": true, @@ -3023,8 +3048,8 @@ type FoldersLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects or organizations. Only logs that -// have entries are listed. +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. func (r *FoldersLogsService) List(parent string) *FoldersLogsListCall { c := &FoldersLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3091,6 +3116,7 @@ func (c *FoldersLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3144,7 +3170,7 @@ func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespons } return ret, nil // { - // "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", // "flatPath": "v2/folders/{foldersId}/logs", // "httpMethod": "GET", // "id": "logging.folders.logs.list", @@ -3164,7 +3190,7 @@ func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespons // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -3234,11 +3260,11 @@ func (r *FoldersSinksService) Create(parent string, logsink *LogSink) *FoldersSi // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set // to false, and if the sink's parent is a project, then the value -// returned as writer_identity is cloud-logs@google.com, the same -// identity used before the addition of writer identities to this API. -// The sink's destination must be in the same project as the sink -// itself.If this field is set to true, or if the sink is owned by a -// non-project resource such as an organization, then the value of +// returned as writer_identity is the same group or service account used +// by Stackdriver Logging before the addition of writer identities to +// this API. The sink's destination must be in the same project as the +// sink itself.If this field is set to true, or if the sink is owned by +// a non-project resource such as an organization, then the value of // writer_identity will be a unique service account used only for // exports from the new sink. For more information, see writer_identity // in LogSink. @@ -3278,6 +3304,7 @@ func (c *FoldersSinksCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -3342,14 +3369,14 @@ func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, err // ], // "parameters": { // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", // "location": "query", // "type": "boolean" // } @@ -3418,6 +3445,7 @@ func (c *FoldersSinksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") @@ -3477,7 +3505,7 @@ func (c *FoldersSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^folders/[^/]+/sinks/[^/]+$", // "required": true, @@ -3555,6 +3583,7 @@ func (c *FoldersSinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3617,7 +3646,7 @@ func (c *FoldersSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) // ], // "parameters": { // "sinkName": { - // "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^folders/[^/]+/sinks/[^/]+$", // "required": true, @@ -3716,6 +3745,7 @@ func (c *FoldersSinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3789,7 +3819,7 @@ func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksRespo // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -3863,9 +3893,9 @@ func (r *FoldersSinksService) Update(sinkNameid string, logsink *LogSink) *Folde // values of this field: // If the old and new values of this field are both false or both true, // then there is no change to the sink's writer_identity. -// If the old value was false and the new value is true, then +// If the old value is false and the new value is true, then // writer_identity is changed to a unique service account. -// It is an error if the old value was true and the new value is false. +// It is an error if the old value is true and the new value is false. func (c *FoldersSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksUpdateCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -3902,6 +3932,7 @@ func (c *FoldersSinksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -3966,14 +3997,14 @@ func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, err // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^folders/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", // "location": "query", // "type": "boolean" // } @@ -4070,6 +4101,7 @@ func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4186,7 +4218,8 @@ type OrganizationsLogsDeleteCall struct { } // Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. func (r *OrganizationsLogsService) Delete(logName string) *OrganizationsLogsDeleteCall { c := &OrganizationsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.logName = logName @@ -4224,6 +4257,7 @@ func (c *OrganizationsLogsDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") @@ -4274,7 +4308,7 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", // "flatPath": "v2/organizations/{organizationsId}/logs/{logsId}", // "httpMethod": "DELETE", // "id": "logging.organizations.logs.delete", @@ -4283,7 +4317,7 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^organizations/[^/]+/logs/[^/]+$", // "required": true, @@ -4313,8 +4347,8 @@ type OrganizationsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects or organizations. Only logs that -// have entries are listed. +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall { c := &OrganizationsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -4381,6 +4415,7 @@ func (c *OrganizationsLogsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4434,7 +4469,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR } return ret, nil // { - // "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", // "flatPath": "v2/organizations/{organizationsId}/logs", // "httpMethod": "GET", // "id": "logging.organizations.logs.list", @@ -4454,7 +4489,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -4524,11 +4559,11 @@ func (r *OrganizationsSinksService) Create(parent string, logsink *LogSink) *Org // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set // to false, and if the sink's parent is a project, then the value -// returned as writer_identity is cloud-logs@google.com, the same -// identity used before the addition of writer identities to this API. -// The sink's destination must be in the same project as the sink -// itself.If this field is set to true, or if the sink is owned by a -// non-project resource such as an organization, then the value of +// returned as writer_identity is the same group or service account used +// by Stackdriver Logging before the addition of writer identities to +// this API. The sink's destination must be in the same project as the +// sink itself.If this field is set to true, or if the sink is owned by +// a non-project resource such as an organization, then the value of // writer_identity will be a unique service account used only for // exports from the new sink. For more information, see writer_identity // in LogSink. @@ -4568,6 +4603,7 @@ func (c *OrganizationsSinksCreateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -4632,14 +4668,14 @@ func (c *OrganizationsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSin // ], // "parameters": { // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", // "location": "query", // "type": "boolean" // } @@ -4708,6 +4744,7 @@ func (c *OrganizationsSinksDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") @@ -4767,7 +4804,7 @@ func (c *OrganizationsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^organizations/[^/]+/sinks/[^/]+$", // "required": true, @@ -4845,6 +4882,7 @@ func (c *OrganizationsSinksGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4907,7 +4945,7 @@ func (c *OrganizationsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, // ], // "parameters": { // "sinkName": { - // "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^organizations/[^/]+/sinks/[^/]+$", // "required": true, @@ -5006,6 +5044,7 @@ func (c *OrganizationsSinksListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5079,7 +5118,7 @@ func (c *OrganizationsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSink // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -5153,9 +5192,9 @@ func (r *OrganizationsSinksService) Update(sinkNameid string, logsink *LogSink) // values of this field: // If the old and new values of this field are both false or both true, // then there is no change to the sink's writer_identity. -// If the old value was false and the new value is true, then +// If the old value is false and the new value is true, then // writer_identity is changed to a unique service account. -// It is an error if the old value was true and the new value is false. +// It is an error if the old value is true and the new value is false. func (c *OrganizationsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksUpdateCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -5192,6 +5231,7 @@ func (c *OrganizationsSinksUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -5256,14 +5296,14 @@ func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSin // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^organizations/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", // "location": "query", // "type": "boolean" // } @@ -5294,7 +5334,8 @@ type ProjectsLogsDeleteCall struct { } // Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. func (r *ProjectsLogsService) Delete(logName string) *ProjectsLogsDeleteCall { c := &ProjectsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.logName = logName @@ -5332,6 +5373,7 @@ func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") @@ -5382,7 +5424,7 @@ func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", // "flatPath": "v2/projects/{projectsId}/logs/{logsId}", // "httpMethod": "DELETE", // "id": "logging.projects.logs.delete", @@ -5391,7 +5433,7 @@ func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^projects/[^/]+/logs/[^/]+$", // "required": true, @@ -5421,8 +5463,8 @@ type ProjectsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects or organizations. Only logs that -// have entries are listed. +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. func (r *ProjectsLogsService) List(parent string) *ProjectsLogsListCall { c := &ProjectsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -5489,6 +5531,7 @@ func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5542,7 +5585,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon } return ret, nil // { - // "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", // "flatPath": "v2/projects/{projectsId}/logs", // "httpMethod": "GET", // "id": "logging.projects.logs.list", @@ -5562,7 +5605,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -5654,6 +5697,7 @@ func (c *ProjectsMetricsCreateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric) if err != nil { @@ -5789,6 +5833,7 @@ func (c *ProjectsMetricsDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+metricName}") @@ -5927,6 +5972,7 @@ func (c *ProjectsMetricsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6088,6 +6134,7 @@ func (c *ProjectsMetricsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6253,6 +6300,7 @@ func (c *ProjectsMetricsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric) if err != nil { @@ -6368,11 +6416,11 @@ func (r *ProjectsSinksService) Create(parent string, logsink *LogSink) *Projects // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set // to false, and if the sink's parent is a project, then the value -// returned as writer_identity is cloud-logs@google.com, the same -// identity used before the addition of writer identities to this API. -// The sink's destination must be in the same project as the sink -// itself.If this field is set to true, or if the sink is owned by a -// non-project resource such as an organization, then the value of +// returned as writer_identity is the same group or service account used +// by Stackdriver Logging before the addition of writer identities to +// this API. The sink's destination must be in the same project as the +// sink itself.If this field is set to true, or if the sink is owned by +// a non-project resource such as an organization, then the value of // writer_identity will be a unique service account used only for // exports from the new sink. For more information, see writer_identity // in LogSink. @@ -6412,6 +6460,7 @@ func (c *ProjectsSinksCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -6476,14 +6525,14 @@ func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // ], // "parameters": { // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", // "location": "query", // "type": "boolean" // } @@ -6552,6 +6601,7 @@ func (c *ProjectsSinksDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") @@ -6611,7 +6661,7 @@ func (c *ProjectsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, erro // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, @@ -6689,6 +6739,7 @@ func (c *ProjectsSinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6751,7 +6802,7 @@ func (c *ProjectsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error // ], // "parameters": { // "sinkName": { - // "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, @@ -6850,6 +6901,7 @@ func (c *ProjectsSinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6923,7 +6975,7 @@ func (c *ProjectsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResp // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -6997,9 +7049,9 @@ func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *Proj // values of this field: // If the old and new values of this field are both false or both true, // then there is no change to the sink's writer_identity. -// If the old value was false and the new value is true, then +// If the old value is false and the new value is true, then // writer_identity is changed to a unique service account. -// It is an error if the old value was true and the new value is false. +// It is an error if the old value is true and the new value is false. func (c *ProjectsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksUpdateCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -7036,6 +7088,7 @@ func (c *ProjectsSinksUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -7100,14 +7153,14 @@ func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", // "location": "query", // "type": "boolean" // } diff --git a/vendor/google.golang.org/api/logging/v2beta1/logging-api.json b/vendor/google.golang.org/api/logging/v2beta1/logging-api.json index b8fff7239..7cd4d8b2f 100644 --- a/vendor/google.golang.org/api/logging/v2beta1/logging-api.json +++ b/vendor/google.golang.org/api/logging/v2beta1/logging-api.json @@ -1,771 +1,532 @@ { - "canonicalName": "Logging", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" + "version": "v2beta1", + "baseUrl": "https://logging.googleapis.com/", + "servicePath": "", + "description": "Writes log entries and manages your Stackdriver Logging configuration.", + "kind": "discovery#restDescription", + "basePath": "", + "id": "logging:v2beta1", + "documentationLink": "https://cloud.google.com/logging/docs/", + "revision": "20170220", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "ListLogEntriesRequest": { + "description": "The parameters to ListLogEntries.", + "type": "object", + "properties": { + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" }, - "https://www.googleapis.com/auth/logging.admin": { - "description": "Administrate log data for your projects" + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" }, - "https://www.googleapis.com/auth/logging.read": { - "description": "View log data for your projects" + "orderBy": { + "description": "Optional. How the results should be sorted. Presently, the only permitted values are \"timestamp asc\" (default) and \"timestamp desc\". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of LogEntry.insertId.", + "type": "string" }, - "https://www.googleapis.com/auth/logging.write": { - "description": "Submit log data for your projects" + "resourceNames": { + "description": "Required. Names of one or more parent resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nProjects listed in the project_ids field are added to this list.", + "type": "array", + "items": { + "type": "string" + } }, - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "projectIds": { + "description": "Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\". If present, these project identifiers are converted to resource name format and added to the list of resources in resource_names.", + "type": "array", + "items": { + "type": "string" + } + }, + "filter": { + "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", + "type": "string" } - } - } - }, - "rootUrl": "https://logging.googleapis.com/", - "ownerDomain": "google.com", - "name": "logging", - "batchPath": "batch", - "title": "Stackdriver Logging API", - "ownerName": "Google", - "resources": { - "projects": { - "resources": { - "logs": { - "methods": { - "delete": { - "path": "v2beta1/{+logName}", - "id": "logging.projects.logs.delete", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", - "parameterOrder": [ - "logName" - ], - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "parameters": { - "logName": { - "pattern": "^projects/[^/]+/logs/[^/]+$", - "location": "path", - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/projects/{projectsId}/logs/{logsId}" - }, - "list": { - "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", - "httpMethod": "GET", - "response": { - "$ref": "ListLogsResponse" - }, - "parameterOrder": [ - "parent" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "parent": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", - "required": true, - "type": "string" - }, - "pageToken": { - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/logs", - "id": "logging.projects.logs.list", - "path": "v2beta1/{+parent}/logs" - } + }, + "id": "ListLogEntriesRequest" + }, + "RequestLog": { + "description": "Complete log information about a single HTTP request to an App Engine application.", + "type": "object", + "properties": { + "userAgent": { + "description": "User agent that made the request.", + "type": "string" + }, + "wasLoadingRequest": { + "description": "Whether this was a loading request for the instance.", + "type": "boolean" + }, + "sourceReference": { + "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.", + "type": "array", + "items": { + "$ref": "SourceReference" } }, - "sinks": { - "methods": { - "delete": { - "path": "v2beta1/{+sinkName}", - "id": "logging.projects.sinks.delete", - "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", - "response": { - "$ref": "Empty" - }, - "httpMethod": "DELETE", - "parameterOrder": [ - "sinkName" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "parameters": { - "sinkName": { - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path", - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", - "required": true, - "type": "string" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}" - }, - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListSinksResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - }, - "parent": { - "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks", - "id": "logging.projects.sinks.list", - "path": "v2beta1/{+parent}/sinks", - "description": "Lists sinks." - }, - "get": { - "description": "Gets a sink.", - "response": { - "$ref": "LogSink" - }, - "parameterOrder": [ - "sinkName" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "sinkName": { - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path", - "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "required": true, - "type": "string" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - "path": "v2beta1/{+sinkName}", - "id": "logging.projects.sinks.get" - }, - "update": { - "httpMethod": "PUT", - "parameterOrder": [ - "sinkName" - ], - "response": { - "$ref": "LogSink" - }, - "parameters": { - "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path" - }, - "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", - "type": "boolean", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - "id": "logging.projects.sinks.update", - "path": "v2beta1/{+sinkName}", - "request": { - "$ref": "LogSink" - }, - "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field." - }, - "create": { - "response": { - "$ref": "LogSink" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "parameters": { - "parent": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", - "required": true, - "type": "string" - }, - "uniqueWriterIdentity": { - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", - "type": "boolean", - "location": "query" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks", - "path": "v2beta1/{+parent}/sinks", - "id": "logging.projects.sinks.create", - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - "request": { - "$ref": "LogSink" - } - } - } + "responseSize": { + "description": "Size in bytes sent back to client by request.", + "format": "int64", + "type": "string" }, - "metrics": { - "methods": { - "create": { - "response": { - "$ref": "LogMetric" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "parameters": { - "parent": { - "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics", - "path": "v2beta1/{+parent}/metrics", - "id": "logging.projects.metrics.create", - "request": { - "$ref": "LogMetric" - }, - "description": "Creates a logs-based metric." - }, - "delete": { - "path": "v2beta1/{+metricName}", - "id": "logging.projects.metrics.delete", - "description": "Deletes a logs-based metric.", - "parameterOrder": [ - "metricName" - ], - "response": { - "$ref": "Empty" - }, - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "parameters": { - "metricName": { - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path", - "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - "required": true, - "type": "string" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}" - }, - "list": { - "id": "logging.projects.metrics.list", - "path": "v2beta1/{+parent}/metrics", - "description": "Lists logs-based metrics.", - "httpMethod": "GET", - "response": { - "$ref": "ListLogMetricsResponse" - }, - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", - "required": true, - "type": "string" - }, - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics" - }, - "get": { - "response": { - "$ref": "LogMetric" - }, - "parameterOrder": [ - "metricName" - ], - "httpMethod": "GET", - "parameters": { - "metricName": { - "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - "path": "v2beta1/{+metricName}", - "id": "logging.projects.metrics.get", - "description": "Gets a logs-based metric." - }, - "update": { - "parameters": { - "metricName": { - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path", - "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - "id": "logging.projects.metrics.update", - "path": "v2beta1/{+metricName}", - "request": { - "$ref": "LogMetric" - }, - "description": "Creates or updates a logs-based metric.", - "httpMethod": "PUT", - "parameterOrder": [ - "metricName" - ], - "response": { - "$ref": "LogMetric" - } - } - } - } - } - }, - "billingAccounts": { - "resources": { - "logs": { - "methods": { - "delete": { - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "logName" - ], - "parameters": { - "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string", - "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs/{logsId}", - "id": "logging.billingAccounts.logs.delete", - "path": "v2beta1/{+logName}" - }, - "list": { - "parameters": { - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - }, - "parent": { - "pattern": "^billingAccounts/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs", - "id": "logging.billingAccounts.logs.list", - "path": "v2beta1/{+parent}/logs", - "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", - "httpMethod": "GET", - "response": { - "$ref": "ListLogsResponse" - }, - "parameterOrder": [ - "parent" - ] - } - } - } - } - }, - "monitoredResourceDescriptors": { - "methods": { - "list": { - "parameterOrder": [], - "response": { - "$ref": "ListMonitoredResourceDescriptorsResponse" - }, - "httpMethod": "GET", - "parameters": { - "pageToken": { - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/monitoredResourceDescriptors", - "path": "v2beta1/monitoredResourceDescriptors", - "id": "logging.monitoredResourceDescriptors.list", - "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging." - } - } - }, - "organizations": { - "resources": { - "logs": { - "methods": { - "delete": { - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", - "parameterOrder": [ - "logName" - ], - "response": { - "$ref": "Empty" - }, - "httpMethod": "DELETE", - "parameters": { - "logName": { - "pattern": "^organizations/[^/]+/logs/[^/]+$", - "location": "path", - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/organizations/{organizationsId}/logs/{logsId}", - "path": "v2beta1/{+logName}", - "id": "logging.organizations.logs.delete" - }, - "list": { - "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", - "httpMethod": "GET", - "response": { - "$ref": "ListLogsResponse" - }, - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", - "required": true, - "type": "string", - "pattern": "^organizations/[^/]+$", - "location": "path" - }, - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/organizations/{organizationsId}/logs", - "id": "logging.organizations.logs.list", - "path": "v2beta1/{+parent}/logs" - } + "traceId": { + "description": "Stackdriver Trace identifier for this request.", + "type": "string" + }, + "line": { + "description": "A list of log lines emitted by the application while serving this request.", + "type": "array", + "items": { + "$ref": "LogLine" } + }, + "taskQueueName": { + "description": "Queue name of the request, in the case of an offline request.", + "type": "string" + }, + "referrer": { + "description": "Referrer URL of request.", + "type": "string" + }, + "requestId": { + "description": "Globally unique identifier for a request, which is based on the request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier.", + "type": "string" + }, + "nickname": { + "description": "The logged-in user who made the request.Most likely, this is the part of the user's email before the @ sign. The field value is the same for different requests from the same user, but different users can have similar names. This information is also available to the application via the App Engine Users API.This field will be populated starting with App Engine 1.9.21.", + "type": "string" + }, + "pendingTime": { + "description": "Time this request spent in the pending request queue.", + "format": "google-duration", + "type": "string" + }, + "resource": { + "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". The fragment identifier, which is identified by the # character, is not included.", + "type": "string" + }, + "status": { + "type": "integer", + "description": "HTTP response status code. Example: 200, 404.", + "format": "int32" + }, + "taskName": { + "description": "Task name of the request, in the case of an offline request.", + "type": "string" + }, + "urlMapEntry": { + "description": "File or class that handled the request.", + "type": "string" + }, + "instanceIndex": { + "description": "If the instance processing this request belongs to a manually scaled module, then this is the 0-based index of the instance. Otherwise, this value is -1.", + "format": "int32", + "type": "integer" + }, + "host": { + "description": "Internet host and port number of the resource being requested.", + "type": "string" + }, + "finished": { + "description": "Whether this request is finished or active.", + "type": "boolean" + }, + "httpVersion": { + "description": "HTTP version of request. Example: \"HTTP/1.1\".", + "type": "string" + }, + "startTime": { + "description": "Time when the request started.", + "format": "google-datetime", + "type": "string" + }, + "latency": { + "description": "Latency of the request.", + "format": "google-duration", + "type": "string" + }, + "ip": { + "description": "Origin IP address.", + "type": "string" + }, + "appId": { + "description": "Application that handled this request.", + "type": "string" + }, + "appEngineRelease": { + "description": "App Engine release version.", + "type": "string" + }, + "method": { + "description": "Request method. Example: \"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\".", + "type": "string" + }, + "cost": { + "description": "An indication of the relative cost of serving this request.", + "format": "double", + "type": "number" + }, + "instanceId": { + "description": "An identifier for the instance that handled the request.", + "type": "string" + }, + "megaCycles": { + "description": "Number of CPU megacycles used to process request.", + "format": "int64", + "type": "string" + }, + "first": { + "type": "boolean", + "description": "Whether this is the first RequestLog entry for this request. If an active request has several RequestLog entries written to Stackdriver Logging, then this field will be set for one of them." + }, + "versionId": { + "description": "Version of the application that handled this request.", + "type": "string" + }, + "moduleId": { + "description": "Module of the application that handled this request.", + "type": "string" + }, + "endTime": { + "description": "Time when the request finished.", + "format": "google-datetime", + "type": "string" } - } + }, + "id": "RequestLog" }, - "entries": { - "methods": { - "list": { - "request": { - "$ref": "ListLogEntriesRequest" - }, - "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.", - "response": { - "$ref": "ListLogEntriesResponse" - }, - "parameterOrder": [], - "httpMethod": "POST", - "parameters": {}, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/entries:list", - "path": "v2beta1/entries:list", - "id": "logging.entries.list" + "ListMonitoredResourceDescriptorsResponse": { + "description": "Result returned from ListMonitoredResourceDescriptors.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", + "type": "string" }, - "write": { - "id": "logging.entries.write", - "path": "v2beta1/entries:write", - "request": { - "$ref": "WriteLogEntriesRequest" - }, - "description": "Writes log entries to Stackdriver Logging. All log entries are written by this method.", - "httpMethod": "POST", - "parameterOrder": [], - "response": { - "$ref": "WriteLogEntriesResponse" - }, - "parameters": {}, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2beta1/entries:write" + "resourceDescriptors": { + "description": "A list of resource descriptors.", + "type": "array", + "items": { + "$ref": "MonitoredResourceDescriptor" + } } - } - } - }, - "parameters": { - "key": { - "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string" - }, - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" - }, - "pp": { - "description": "Pretty-print response.", - "type": "boolean", - "default": "true", - "location": "query" + }, + "id": "ListMonitoredResourceDescriptorsResponse" }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" + "SourceReference": { + "description": "A reference to a particular snapshot of the source tree used to build and deploy an application.", + "type": "object", + "properties": { + "revisionId": { + "description": "The canonical and persistent identifier of the deployed revision. Example (git): \"0035781c50ec7aa23385dc841529ce8a4b70db1b\"", + "type": "string" + }, + "repository": { + "description": "Optional. A URI string identifying the repository. Example: \"https://github.com/GoogleCloudPlatform/kubernetes.git\"", + "type": "string" + } + }, + "id": "SourceReference" }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" + "LogEntryOperation": { + "description": "Additional information about a potentially long-running operation with which a log entry is associated.", + "type": "object", + "properties": { + "last": { + "description": "Optional. Set this to True if this is the last log entry in the operation.", + "type": "boolean" + }, + "id": { + "description": "Optional. An arbitrary operation identifier. Log entries with the same identifier are assumed to be part of the same operation.", + "type": "string" + }, + "producer": { + "description": "Optional. An arbitrary producer identifier. The combination of id and producer must be globally unique. Examples for producer: \"MyDivision.MyBigCompany.com\", \"github.com/MyProject/MyApplication\".", + "type": "string" + }, + "first": { + "description": "Optional. Set this to True if this is the first log entry in the operation.", + "type": "boolean" + } + }, + "id": "LogEntryOperation" }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "LogMetric": { + "description": "Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval.", + "type": "object", + "properties": { + "name": { + "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".", + "type": "string" + }, + "description": { + "description": "Optional. A description of this metric, which is used in documentation.", + "type": "string" + }, + "version": { + "description": "Output only. The API version that created or updated this metric. The version also dictates the syntax of the filter expression. When a value for this field is missing, the default value of V2 should be assumed.", + "type": "string", + "enumDescriptions": [ + "Stackdriver Logging API v2.", + "Stackdriver Logging API v1." + ], + "enum": [ + "V2", + "V1" + ] + }, + "filter": { + "type": "string", + "description": "Required. An advanced logs filter which is used to match log entries. Example:\n\"resource.type=gae_app AND severity\u003e=ERROR\"\nThe maximum length of the filter is 20000 characters." + } + }, + "id": "LogMetric" }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true", - "location": "query" + "WriteLogEntriesResponse": { + "description": "Result returned from WriteLogEntries. empty", + "type": "object", + "properties": {}, + "id": "WriteLogEntriesResponse" }, - "fields": { - "location": "query", - "description": "Selector specifying which fields to include in a partial response.", - "type": "string" + "MonitoredResource": { + "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", + "type": "object", + "properties": { + "type": { + "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Cloud SQL database is \"cloudsql_database\".", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Cloud SQL databases use the labels \"database_id\" and \"zone\".", + "type": "object" + } + }, + "id": "MonitoredResource" }, - "uploadType": { - "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" + "WriteLogEntriesRequest": { + "description": "The parameters to WriteLogEntries.", + "type": "object", + "properties": { + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Default labels that are added to the labels field of all log entries in entries. If a log entry already has a label with the same key as a label in this parameter, then the log entry's label is not changed. See LogEntry.", + "type": "object" + }, + "resource": { + "$ref": "MonitoredResource", + "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry." + }, + "logName": { + "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\" or \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "type": "string" + }, + "entries": { + "description": "Required. The log entries to write. Values supplied for the fields log_name, resource, and labels in this entries.write request are added to those log entries that do not provide their own values for the fields.To improve throughput and to avoid exceeding the quota limit for calls to entries.write, you should write multiple log entries at once rather than calling this method for each individual log entry.", + "type": "array", + "items": { + "$ref": "LogEntry" + } + }, + "partialSuccess": { + "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, the response status will be the error associated with one of the failed entries and include error details in the form of WriteLogEntriesPartialErrors.", + "type": "boolean" + } + }, + "id": "WriteLogEntriesRequest" }, - "$.xgafv": { - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ] + "LogSink": { + "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.", + "type": "object", + "properties": { + "destination": { + "type": "string", + "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks." + }, + "filter": { + "description": "Optional. An advanced logs filter. The only exported log entries are those that are in the resource owning the sink and that match the filter. The filter must use the log entry format specified by the output_version_format parameter. For example, in the v2 format:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n", + "type": "string" + }, + "endTime": { + "description": "Optional. The time at which this sink will stop exporting log entries. Log entries are exported only if their timestamp is earlier than the end time. If this field is not supplied, there is no end time. If both a start time and an end time are provided, then the end time must be later than the start time.", + "format": "google-datetime", + "type": "string" + }, + "writerIdentity": { + "description": "Output only. An IAM identity—a service account or group—under which Stackdriver Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", + "type": "string" + }, + "startTime": { + "type": "string", + "description": "Optional. The time at which this sink will begin exporting log entries. Log entries are exported only if their timestamp is not earlier than the start time. The default value of this field is the time the sink is created or updated.", + "format": "google-datetime" + }, + "outputVersionFormat": { + "enum": [ + "VERSION_FORMAT_UNSPECIFIED", + "V2", + "V1" + ], + "description": "Optional. The log entry format to use for this sink's exported log entries. The v2 format is used by default. The v1 format is deprecated and should be used only as part of a migration effort to v2. See Migration to the v2 API.", + "type": "string", + "enumDescriptions": [ + "An unspecified format version that will default to V2.", + "LogEntry version 2 format.", + "LogEntry version 1 format." + ] + }, + "name": { + "description": "Required. The client-assigned sink identifier, unique within the project. Example: \"my-syslog-errors-to-pubsub\". Sink identifiers are limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, and periods.", + "type": "string" + } + }, + "id": "LogSink" }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" + "ListLogsResponse": { + "description": "Result returned from ListLogs.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", + "type": "string" + }, + "logNames": { + "description": "A list of log names. For example, \"projects/my-project/syslog\" or \"organizations/123/cloudresourcemanager.googleapis.com%2Factivity\".", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "ListLogsResponse" }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" - } - }, - "version": "v2beta1", - "baseUrl": "https://logging.googleapis.com/", - "servicePath": "", - "description": "Writes log entries and manages your Stackdriver Logging configuration.", - "kind": "discovery#restDescription", - "basePath": "", - "revision": "20170114", - "documentationLink": "https://cloud.google.com/logging/docs/", - "id": "logging:v2beta1", - "discoveryVersion": "v1", - "version_module": "True", - "schemas": { - "MonitoredResourceDescriptor": { - "id": "MonitoredResourceDescriptor", - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", + "ListSinksResponse": { + "description": "Result returned from ListSinks.", "type": "object", "properties": { - "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", + "sinks": { + "description": "A list of sinks.", "type": "array", "items": { - "$ref": "LabelDescriptor" + "$ref": "LogSink" } }, - "name": { - "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", + "nextPageToken": { + "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", + "type": "string" + } + }, + "id": "ListSinksResponse" + }, + "HttpRequest": { + "type": "object", + "properties": { + "cacheFillBytes": { + "type": "string", + "description": "The number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.", + "format": "int64" + }, + "requestMethod": { + "description": "The request method. Examples: \"GET\", \"HEAD\", \"PUT\", \"POST\".", + "type": "string" + }, + "requestSize": { + "description": "The size of the HTTP request message in bytes, including the request headers and the request body.", + "format": "int64", + "type": "string" + }, + "responseSize": { + "description": "The size of the HTTP response message sent back to the client, in bytes, including the response headers and the response body.", + "format": "int64", + "type": "string" + }, + "requestUrl": { + "type": "string", + "description": "The scheme (http, https), the host name, the path and the query portion of the URL that was requested. Example: \"http://example.com/some/info?color=red\"." + }, + "remoteIp": { + "description": "The IP address (IPv4 or IPv6) of the client that issued the HTTP request. Examples: \"192.168.1.1\", \"FE80::0202:B3FF:FE1E:8329\".", + "type": "string" + }, + "serverIp": { + "type": "string", + "description": "The IP address (IPv4 or IPv6) of the origin server that the request was sent to." + }, + "cacheLookup": { + "description": "Whether or not a cache lookup was attempted.", + "type": "boolean" + }, + "cacheHit": { + "description": "Whether or not an entity was served from cache (with or without validation).", + "type": "boolean" + }, + "cacheValidatedWithOriginServer": { + "description": "Whether or not the response was validated with the origin server before being served from cache. This field is only meaningful if cache_hit is True.", + "type": "boolean" + }, + "status": { + "description": "The response code indicating the status of response. Examples: 200, 404.", + "format": "int32", + "type": "integer" + }, + "referer": { + "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", + "type": "string" + }, + "latency": { + "description": "The request processing latency on the server, from the time the request was received until the response was sent.", + "format": "google-duration", + "type": "string" + }, + "userAgent": { + "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\".", + "type": "string" + } + }, + "id": "HttpRequest", + "description": "A common proto for logging HTTP requests. Only contains semantics defined by the HTTP specification. Product-specific logging information MUST be defined in a separate message." + }, + "LabelDescriptor": { + "description": "A description of a label.", + "type": "object", + "properties": { + "key": { + "description": "The label key.", "type": "string" }, + "description": { + "type": "string", + "description": "A human-readable description for the label." + }, + "valueType": { + "enumDescriptions": [ + "A variable-length string. This is the default.", + "Boolean; true or false.", + "A 64-bit signed integer." + ], + "enum": [ + "STRING", + "BOOL", + "INT64" + ], + "description": "The type of data that can be assigned to the label.", + "type": "string" + } + }, + "id": "LabelDescriptor" + }, + "MonitoredResourceDescriptor": { + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", + "type": "object", + "properties": { "displayName": { "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", "type": "string" @@ -777,13 +538,30 @@ "type": { "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", "type": "string" + }, + "labels": { + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } + }, + "name": { + "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", + "type": "string" } - } + }, + "id": "MonitoredResourceDescriptor" }, "LogEntrySourceLocation": { + "id": "LogEntrySourceLocation", "description": "Additional information about the source code location that produced the log entry.", "type": "object", "properties": { + "function": { + "description": "Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", + "type": "string" + }, "line": { "description": "Optional. Line within the source file. 1-based; 0 indicates no line number available.", "format": "int64", @@ -792,37 +570,48 @@ "file": { "description": "Optional. Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", "type": "string" - }, - "function": { - "description": "Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", - "type": "string" } - }, - "id": "LogEntrySourceLocation" + } }, "ListLogEntriesResponse": { - "id": "ListLogEntriesResponse", "description": "Result returned from ListLogEntries.", "type": "object", "properties": { + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.If a value for next_page_token appears and the entries field is empty, it means that the search found no log entries so far but it did not have time to search all the possible log entries. Retry the method with this value for page_token to continue the search. Alternatively, consider speeding up the search by changing your filter to specify a single log name or resource type, or to narrow the time range of the search.", + "type": "string" + }, "entries": { "description": "A list of log entries.", "type": "array", "items": { "$ref": "LogEntry" } - }, - "nextPageToken": { - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.If a value for next_page_token appears and the entries field is empty, it means that the search found no log entries so far but it did not have time to search all the possible log entries. Retry the method with this value for page_token to continue the search. Alternatively, consider speeding up the search by changing your filter to specify a single log name or resource type, or to narrow the time range of the search.", - "type": "string" } - } + }, + "id": "ListLogEntriesResponse" }, "LogLine": { "description": "Application log line emitted while processing a request.", "type": "object", "properties": { + "time": { + "description": "Approximate time when this log entry was made.", + "format": "google-datetime", + "type": "string" + }, "severity": { + "enum": [ + "DEFAULT", + "DEBUG", + "INFO", + "NOTICE", + "WARNING", + "ERROR", + "CRITICAL", + "ALERT", + "EMERGENCY" + ], "description": "Severity of this log entry.", "type": "string", "enumDescriptions": [ @@ -835,17 +624,6 @@ "(600) Critical events cause more severe problems or outages.", "(700) A person must take an action immediately.", "(800) One or more systems are unusable." - ], - "enum": [ - "DEFAULT", - "DEBUG", - "INFO", - "NOTICE", - "WARNING", - "ERROR", - "CRITICAL", - "ALERT", - "EMERGENCY" ] }, "logMessage": { @@ -853,13 +631,8 @@ "type": "string" }, "sourceLocation": { - "description": "Where in the source code this log message was written.", - "$ref": "SourceLocation" - }, - "time": { - "description": "Approximate time when this log entry was made.", - "format": "google-datetime", - "type": "string" + "$ref": "SourceLocation", + "description": "Where in the source code this log message was written." } }, "id": "LogLine" @@ -876,8 +649,8 @@ } }, "nextPageToken": { - "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", - "type": "string" + "type": "string", + "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken." } }, "id": "ListLogMetricsResponse" @@ -889,64 +662,10 @@ "id": "Empty" }, "LogEntry": { - "id": "LogEntry", "description": "An individual entry in a log.", "type": "object", "properties": { - "logName": { - "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", - "type": "string" - }, - "httpRequest": { - "$ref": "HttpRequest", - "description": "Optional. Information about the HTTP request associated with this log entry, if applicable." - }, - "resource": { - "$ref": "MonitoredResource", - "description": "Required. The monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error." - }, - "jsonPayload": { - "description": "The log entry payload, represented as a structure that is expressed as a JSON object.", - "type": "object", - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" - } - }, - "insertId": { - "description": "Optional. A unique ID for the log entry. If you provide this field, the logging service considers other log entries in the same project with the same ID as duplicates which can be removed. If omitted, Stackdriver Logging will generate a unique ID for this log entry.", - "type": "string" - }, - "operation": { - "description": "Optional. Information about an operation associated with the log entry, if applicable.", - "$ref": "LogEntryOperation" - }, - "textPayload": { - "description": "The log entry payload, represented as a Unicode string (UTF-8).", - "type": "string" - }, - "protoPayload": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.", - "type": "object" - }, - "trace": { - "description": "Optional. Resource name of the trace associated with the log entry, if any. If it contains a relative resource name, the name is assumed to be relative to //tracing.googleapis.com. Example: projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. A set of user-defined (key, value) data that provides additional information about the log entry.", - "type": "object" - }, "severity": { - "description": "Optional. The severity of the log entry. The default value is LogSeverity.DEFAULT.", - "type": "string", "enumDescriptions": [ "(0) The log entry has no assigned severity level.", "(100) Debug or trace information.", @@ -968,7 +687,9 @@ "CRITICAL", "ALERT", "EMERGENCY" - ] + ], + "description": "Optional. The severity of the log entry. The default value is LogSeverity.DEFAULT.", + "type": "string" }, "sourceLocation": { "description": "Optional. Source code location information associated with the log entry, if any.", @@ -978,545 +699,824 @@ "description": "Optional. The time the event described by the log entry occurred. If omitted, Stackdriver Logging will use the time the log entry is received.", "format": "google-datetime", "type": "string" - } - } - }, - "SourceLocation": { - "description": "Specifies a location in a source code file.", - "type": "object", - "properties": { - "file": { - "description": "Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", - "type": "string" - }, - "functionName": { - "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information is used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", - "type": "string" - }, - "line": { - "description": "Line within the source file.", - "format": "int64", - "type": "string" - } - }, - "id": "SourceLocation" - }, - "ListLogEntriesRequest": { - "description": "The parameters to ListLogEntries.", - "type": "object", - "properties": { - "orderBy": { - "description": "Optional. How the results should be sorted. Presently, the only permitted values are \"timestamp asc\" (default) and \"timestamp desc\". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of LogEntry.insertId.", - "type": "string" - }, - "resourceNames": { - "description": "Required. Names of one or more resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nProjects listed in the project_ids field are added to this list.", - "type": "array", - "items": { - "type": "string" - } - }, - "projectIds": { - "description": "Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\". If present, these project identifiers are converted to resource name format and added to the list of resources in resource_names.", - "type": "array", - "items": { - "type": "string" - } - }, - "filter": { - "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", - "type": "string" - }, - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - } - }, - "id": "ListLogEntriesRequest" - }, - "RequestLog": { - "id": "RequestLog", - "description": "Complete log information about a single HTTP request to an App Engine application.", - "type": "object", - "properties": { - "ip": { - "description": "Origin IP address.", - "type": "string" - }, - "appId": { - "description": "Application that handled this request.", - "type": "string" - }, - "appEngineRelease": { - "description": "App Engine release version.", - "type": "string" - }, - "method": { - "description": "Request method. Example: \"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\".", - "type": "string" - }, - "cost": { - "description": "An indication of the relative cost of serving this request.", - "format": "double", - "type": "number" - }, - "instanceId": { - "description": "An identifier for the instance that handled the request.", - "type": "string" - }, - "megaCycles": { - "description": "Number of CPU megacycles used to process request.", - "format": "int64", - "type": "string" - }, - "first": { - "description": "Whether this is the first RequestLog entry for this request. If an active request has several RequestLog entries written to Stackdriver Logging, then this field will be set for one of them.", - "type": "boolean" - }, - "versionId": { - "description": "Version of the application that handled this request.", - "type": "string" - }, - "moduleId": { - "description": "Module of the application that handled this request.", - "type": "string" - }, - "endTime": { - "description": "Time when the request finished.", - "format": "google-datetime", - "type": "string" }, - "userAgent": { - "description": "User agent that made the request.", - "type": "string" - }, - "wasLoadingRequest": { - "description": "Whether this was a loading request for the instance.", - "type": "boolean" - }, - "sourceReference": { - "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.", - "type": "array", - "items": { - "$ref": "SourceReference" - } - }, - "responseSize": { - "description": "Size in bytes sent back to client by request.", - "format": "int64", - "type": "string" - }, - "traceId": { - "description": "Stackdriver Trace identifier for this request.", - "type": "string" - }, - "line": { - "description": "A list of log lines emitted by the application while serving this request.", - "type": "array", - "items": { - "$ref": "LogLine" - } - }, - "taskQueueName": { - "description": "Queue name of the request, in the case of an offline request.", - "type": "string" - }, - "referrer": { - "description": "Referrer URL of request.", - "type": "string" - }, - "requestId": { - "description": "Globally unique identifier for a request, which is based on the request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier.", + "logName": { + "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", "type": "string" }, - "nickname": { - "description": "The logged-in user who made the request.Most likely, this is the part of the user's email before the @ sign. The field value is the same for different requests from the same user, but different users can have similar names. This information is also available to the application via the App Engine Users API.This field will be populated starting with App Engine 1.9.21.", - "type": "string" + "httpRequest": { + "$ref": "HttpRequest", + "description": "Optional. Information about the HTTP request associated with this log entry, if applicable." }, - "status": { - "description": "HTTP response status code. Example: 200, 404.", - "format": "int32", - "type": "integer" + "resource": { + "$ref": "MonitoredResource", + "description": "Required. The monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error." }, - "pendingTime": { - "description": "Time this request spent in the pending request queue.", - "format": "google-duration", - "type": "string" + "jsonPayload": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The log entry payload, represented as a structure that is expressed as a JSON object." }, - "resource": { - "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". The fragment identifier, which is identified by the # character, is not included.", + "insertId": { + "description": "Optional. A unique ID for the log entry. If you provide this field, the logging service considers other log entries in the same project with the same ID as duplicates which can be removed. If omitted, Stackdriver Logging will generate a unique ID for this log entry.", "type": "string" }, - "taskName": { - "description": "Task name of the request, in the case of an offline request.", - "type": "string" + "operation": { + "$ref": "LogEntryOperation", + "description": "Optional. Information about an operation associated with the log entry, if applicable." }, - "urlMapEntry": { - "description": "File or class that handled the request.", + "textPayload": { + "description": "The log entry payload, represented as a Unicode string (UTF-8).", "type": "string" }, - "instanceIndex": { - "description": "If the instance processing this request belongs to a manually scaled module, then this is the 0-based index of the instance. Otherwise, this value is -1.", - "format": "int32", - "type": "integer" - }, - "finished": { - "description": "Whether this request is finished or active.", - "type": "boolean" + "protoPayload": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.", + "type": "object" }, - "host": { - "description": "Internet host and port number of the resource being requested.", + "trace": { + "description": "Optional. Resource name of the trace associated with the log entry, if any. If it contains a relative resource name, the name is assumed to be relative to //tracing.googleapis.com. Example: projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824", "type": "string" }, - "httpVersion": { - "description": "HTTP version of request. Example: \"HTTP/1.1\".", + "labels": { + "description": "Optional. A set of user-defined (key, value) data that provides additional information about the log entry.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "id": "LogEntry" + }, + "SourceLocation": { + "description": "Specifies a location in a source code file.", + "type": "object", + "properties": { + "file": { + "description": "Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", "type": "string" }, - "startTime": { - "description": "Time when the request started.", - "format": "google-datetime", + "functionName": { + "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information is used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", "type": "string" }, - "latency": { - "description": "Latency of the request.", - "format": "google-duration", + "line": { + "description": "Line within the source file.", + "format": "int64", "type": "string" } + }, + "id": "SourceLocation" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "canonicalName": "Logging", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/logging.admin": { + "description": "Administrate log data for your projects" + }, + "https://www.googleapis.com/auth/logging.read": { + "description": "View log data for your projects" + }, + "https://www.googleapis.com/auth/logging.write": { + "description": "Submit log data for your projects" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://logging.googleapis.com/", + "ownerDomain": "google.com", + "name": "logging", + "batchPath": "batch", + "title": "Stackdriver Logging API", + "ownerName": "Google", + "resources": { + "billingAccounts": { + "resources": { + "logs": { + "methods": { + "delete": { + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "logName" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "logName": { + "required": true, + "type": "string", + "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", + "location": "path", + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry." + } + }, + "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs/{logsId}", + "path": "v2beta1/{+logName}", + "id": "logging.billingAccounts.logs.delete" + }, + "list": { + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "response": { + "$ref": "ListLogsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "parameters": { + "parent": { + "location": "path", + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^billingAccounts/[^/]+$" + }, + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs", + "path": "v2beta1/{+parent}/logs", + "id": "logging.billingAccounts.logs.list" + } + } + } } }, - "ListMonitoredResourceDescriptorsResponse": { - "id": "ListMonitoredResourceDescriptorsResponse", - "description": "Result returned from ListMonitoredResourceDescriptors.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", - "type": "string" - }, - "resourceDescriptors": { - "description": "A list of resource descriptors.", - "type": "array", - "items": { - "$ref": "MonitoredResourceDescriptor" + "monitoredResourceDescriptors": { + "methods": { + "list": { + "flatPath": "v2beta1/monitoredResourceDescriptors", + "id": "logging.monitoredResourceDescriptors.list", + "path": "v2beta1/monitoredResourceDescriptors", + "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging.", + "httpMethod": "GET", + "response": { + "$ref": "ListMonitoredResourceDescriptorsResponse" + }, + "parameterOrder": [], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "parameters": { + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" + } + } + } + } + }, + "organizations": { + "resources": { + "logs": { + "methods": { + "list": { + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListLogsResponse" + }, + "parameters": { + "pageToken": { + "type": "string", + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." + }, + "pageSize": { + "type": "integer", + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32" + }, + "parent": { + "location": "path", + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/organizations/{organizationsId}/logs", + "id": "logging.organizations.logs.list", + "path": "v2beta1/{+parent}/logs" + }, + "delete": { + "httpMethod": "DELETE", + "parameterOrder": [ + "logName" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "logName": { + "pattern": "^organizations/[^/]+/logs/[^/]+$", + "location": "path", + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "required": true, + "type": "string" + } + }, + "flatPath": "v2beta1/organizations/{organizationsId}/logs/{logsId}", + "id": "logging.organizations.logs.delete", + "path": "v2beta1/{+logName}", + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted." + } } } } }, - "SourceReference": { - "id": "SourceReference", - "description": "A reference to a particular snapshot of the source tree used to build and deploy an application.", - "type": "object", - "properties": { - "repository": { - "description": "Optional. A URI string identifying the repository. Example: \"https://github.com/GoogleCloudPlatform/kubernetes.git\"", - "type": "string" - }, - "revisionId": { - "description": "The canonical and persistent identifier of the deployed revision. Example (git): \"0035781c50ec7aa23385dc841529ce8a4b70db1b\"", - "type": "string" - } - } - }, - "LogMetric": { - "id": "LogMetric", - "description": "Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval.", - "type": "object", - "properties": { - "name": { - "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".", - "type": "string" - }, - "description": { - "description": "Optional. A description of this metric, which is used in documentation.", - "type": "string" - }, - "version": { - "description": "Output only. The API version that created or updated this metric. The version also dictates the syntax of the filter expression. When a value for this field is missing, the default value of V2 should be assumed.", - "type": "string", - "enumDescriptions": [ - "Stackdriver Logging API v2.", - "Stackdriver Logging API v1." + "entries": { + "methods": { + "list": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" ], - "enum": [ - "V2", - "V1" - ] + "parameters": {}, + "flatPath": "v2beta1/entries:list", + "path": "v2beta1/entries:list", + "id": "logging.entries.list", + "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.", + "request": { + "$ref": "ListLogEntriesRequest" + }, + "response": { + "$ref": "ListLogEntriesResponse" + }, + "parameterOrder": [], + "httpMethod": "POST" }, - "filter": { - "description": "Required. An advanced logs filter which is used to match log entries. Example:\n\"resource.type=gae_app AND severity\u003e=ERROR\"\nThe maximum length of the filter is 20000 characters.", - "type": "string" + "write": { + "path": "v2beta1/entries:write", + "id": "logging.entries.write", + "request": { + "$ref": "WriteLogEntriesRequest" + }, + "description": "Writes log entries to Stackdriver Logging. All log entries are written by this method.", + "response": { + "$ref": "WriteLogEntriesResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "flatPath": "v2beta1/entries:write" } } }, - "LogEntryOperation": { - "description": "Additional information about a potentially long-running operation with which a log entry is associated.", - "type": "object", - "properties": { - "id": { - "description": "Optional. An arbitrary operation identifier. Log entries with the same identifier are assumed to be part of the same operation.", - "type": "string" - }, - "producer": { - "description": "Optional. An arbitrary producer identifier. The combination of id and producer must be globally unique. Examples for producer: \"MyDivision.MyBigCompany.com\", \"github.com/MyProject/MyApplication\".", - "type": "string" - }, - "first": { - "description": "Optional. Set this to True if this is the first log entry in the operation.", - "type": "boolean" - }, - "last": { - "description": "Optional. Set this to True if this is the last log entry in the operation.", - "type": "boolean" - } - }, - "id": "LogEntryOperation" - }, - "WriteLogEntriesResponse": { - "id": "WriteLogEntriesResponse", - "description": "Result returned from WriteLogEntries. empty", - "type": "object", - "properties": {} - }, - "MonitoredResource": { - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", - "type": "object", - "properties": { - "type": { - "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Cloud SQL database is \"cloudsql_database\".", - "type": "string" - }, - "labels": { - "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Cloud SQL databases use the labels \"database_id\" and \"zone\".", - "type": "object", - "additionalProperties": { - "type": "string" + "projects": { + "resources": { + "metrics": { + "methods": { + "delete": { + "httpMethod": "DELETE", + "parameterOrder": [ + "metricName" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "parameters": { + "metricName": { + "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metrics/[^/]+$", + "location": "path" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", + "id": "logging.projects.metrics.delete", + "path": "v2beta1/{+metricName}", + "description": "Deletes a logs-based metric." + }, + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListLogMetricsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "parameters": { + "parent": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n" + }, + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "type": "integer", + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/metrics", + "id": "logging.projects.metrics.list", + "path": "v2beta1/{+parent}/metrics", + "description": "Lists logs-based metrics." + }, + "get": { + "response": { + "$ref": "LogMetric" + }, + "parameterOrder": [ + "metricName" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "parameters": { + "metricName": { + "location": "path", + "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metrics/[^/]+$" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", + "path": "v2beta1/{+metricName}", + "id": "logging.projects.metrics.get", + "description": "Gets a logs-based metric." + }, + "update": { + "description": "Creates or updates a logs-based metric.", + "request": { + "$ref": "LogMetric" + }, + "response": { + "$ref": "LogMetric" + }, + "parameterOrder": [ + "metricName" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "parameters": { + "metricName": { + "pattern": "^projects/[^/]+/metrics/[^/]+$", + "location": "path", + "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", + "required": true, + "type": "string" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", + "path": "v2beta1/{+metricName}", + "id": "logging.projects.metrics.update" + }, + "create": { + "flatPath": "v2beta1/projects/{projectsId}/metrics", + "id": "logging.projects.metrics.create", + "path": "v2beta1/{+parent}/metrics", + "request": { + "$ref": "LogMetric" + }, + "description": "Creates a logs-based metric.", + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "LogMetric" + }, + "parameters": { + "parent": { + "location": "path", + "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ] + } } - } - }, - "id": "MonitoredResource" - }, - "LogSink": { - "id": "LogSink", - "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project or organization.", - "type": "object", - "properties": { - "name": { - "description": "Required. The client-assigned sink identifier, unique within the project. Example: \"my-syslog-errors-to-pubsub\". Sink identifiers are limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, and periods.", - "type": "string" - }, - "destination": { - "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks.", - "type": "string" - }, - "filter": { - "description": "Optional. An advanced logs filter. The only exported log entries are those that are in the resource owning the sink and that match the filter. The filter must use the log entry format specified by the output_version_format parameter. For example, in the v2 format:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n", - "type": "string" - }, - "endTime": { - "description": "Optional. The time at which this sink will stop exporting log entries. Log entries are exported only if their timestamp is earlier than the end time. If this field is not supplied, there is no end time. If both a start time and an end time are provided, then the end time must be later than the start time.", - "format": "google-datetime", - "type": "string" - }, - "startTime": { - "description": "Optional. The time at which this sink will begin exporting log entries. Log entries are exported only if their timestamp is not earlier than the start time. The default value of this field is the time the sink is created or updated.", - "format": "google-datetime", - "type": "string" - }, - "writerIdentity": { - "description": "Output only. An IAM identity—a service account or group—under which Stackdriver Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", - "type": "string" - }, - "outputVersionFormat": { - "description": "Optional. The log entry format to use for this sink's exported log entries. The v2 format is used by default. The v1 format is deprecated and should be used only as part of a migration effort to v2. See Migration to the v2 API.", - "type": "string", - "enumDescriptions": [ - "An unspecified format version that will default to V2.", - "LogEntry version 2 format.", - "LogEntry version 1 format." - ], - "enum": [ - "VERSION_FORMAT_UNSPECIFIED", - "V2", - "V1" - ] - } - } - }, - "WriteLogEntriesRequest": { - "id": "WriteLogEntriesRequest", - "description": "The parameters to WriteLogEntries.", - "type": "object", - "properties": { - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. Default labels that are added to the labels field of all log entries in entries. If a log entry already has a label with the same key as a label in this parameter, then the log entry's label is not changed. See LogEntry.", - "type": "object" - }, - "resource": { - "$ref": "MonitoredResource", - "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry." }, - "logName": { - "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\" or \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "type": "string" + "logs": { + "methods": { + "delete": { + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "logName" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "logName": { + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/logs/[^/]+$", + "location": "path" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/logs/{logsId}", + "path": "v2beta1/{+logName}", + "id": "logging.projects.logs.delete" + }, + "list": { + "response": { + "$ref": "ListLogsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "parameters": { + "parent": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/logs", + "path": "v2beta1/{+parent}/logs", + "id": "logging.projects.logs.list", + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed." + } + } }, - "entries": { - "description": "Required. The log entries to write. Values supplied for the fields log_name, resource, and labels in this entries.write request are added to those log entries that do not provide their own values for the fields.To improve throughput and to avoid exceeding the quota limit for calls to entries.write, you should write multiple log entries at once rather than calling this method for each individual log entry.", - "type": "array", - "items": { - "$ref": "LogEntry" + "sinks": { + "methods": { + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListSinksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "parameters": { + "parent": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n" + }, + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "type": "integer", + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/sinks", + "id": "logging.projects.sinks.list", + "path": "v2beta1/{+parent}/sinks", + "description": "Lists sinks." + }, + "get": { + "description": "Gets a sink.", + "response": { + "$ref": "LogSink" + }, + "parameterOrder": [ + "sinkName" + ], + "httpMethod": "GET", + "parameters": { + "sinkName": { + "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/sinks/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", + "path": "v2beta1/{+sinkName}", + "id": "logging.projects.sinks.get" + }, + "update": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "sinkName": { + "location": "path", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/sinks/[^/]+$" + }, + "uniqueWriterIdentity": { + "type": "boolean", + "location": "query", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false." + } + }, + "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", + "path": "v2beta1/{+sinkName}", + "id": "logging.projects.sinks.update", + "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", + "request": { + "$ref": "LogSink" + }, + "response": { + "$ref": "LogSink" + }, + "parameterOrder": [ + "sinkName" + ], + "httpMethod": "PUT" + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "LogSink" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "parent": { + "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "uniqueWriterIdentity": { + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "type": "boolean", + "location": "query" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/sinks", + "id": "logging.projects.sinks.create", + "path": "v2beta1/{+parent}/sinks", + "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + "request": { + "$ref": "LogSink" + } + }, + "delete": { + "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", + "httpMethod": "DELETE", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "sinkName" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "sinkName": { + "location": "path", + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/sinks/[^/]+$" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", + "id": "logging.projects.sinks.delete", + "path": "v2beta1/{+sinkName}" + } } - }, - "partialSuccess": { - "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, the response status will be the error associated with one of the failed entries and include error details in the form of WriteLogEntriesPartialErrors.", - "type": "boolean" } } + } + }, + "parameters": { + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" }, - "ListLogsResponse": { - "description": "Result returned from ListLogs.", - "type": "object", - "properties": { - "logNames": { - "description": "A list of log names. For example, \"projects/my-project/syslog\" or \"organizations/123/cloudresourcemanager.googleapis.com%2Factivity\".", - "type": "array", - "items": { - "type": "string" - } - }, - "nextPageToken": { - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", - "type": "string" - } - }, - "id": "ListLogsResponse" + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" }, - "HttpRequest": { - "id": "HttpRequest", - "description": "A common proto for logging HTTP requests. Only contains semantics defined by the HTTP specification. Product-specific logging information MUST be defined in a separate message.", - "type": "object", - "properties": { - "latency": { - "description": "The request processing latency on the server, from the time the request was received until the response was sent.", - "format": "google-duration", - "type": "string" - }, - "userAgent": { - "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\".", - "type": "string" - }, - "cacheFillBytes": { - "description": "The number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.", - "format": "int64", - "type": "string" - }, - "requestMethod": { - "description": "The request method. Examples: \"GET\", \"HEAD\", \"PUT\", \"POST\".", - "type": "string" - }, - "responseSize": { - "description": "The size of the HTTP response message sent back to the client, in bytes, including the response headers and the response body.", - "format": "int64", - "type": "string" - }, - "requestSize": { - "description": "The size of the HTTP request message in bytes, including the request headers and the request body.", - "format": "int64", - "type": "string" - }, - "requestUrl": { - "description": "The scheme (http, https), the host name, the path and the query portion of the URL that was requested. Example: \"http://example.com/some/info?color=red\".", - "type": "string" - }, - "remoteIp": { - "description": "The IP address (IPv4 or IPv6) of the client that issued the HTTP request. Examples: \"192.168.1.1\", \"FE80::0202:B3FF:FE1E:8329\".", - "type": "string" - }, - "serverIp": { - "description": "The IP address (IPv4 or IPv6) of the origin server that the request was sent to.", - "type": "string" - }, - "cacheLookup": { - "description": "Whether or not a cache lookup was attempted.", - "type": "boolean" - }, - "cacheHit": { - "description": "Whether or not an entity was served from cache (with or without validation).", - "type": "boolean" - }, - "cacheValidatedWithOriginServer": { - "description": "Whether or not the response was validated with the origin server before being served from cache. This field is only meaningful if cache_hit is True.", - "type": "boolean" - }, - "status": { - "description": "The response code indicating the status of response. Examples: 200, 404.", - "format": "int32", - "type": "integer" - }, - "referer": { - "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", - "type": "string" - } - } + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" }, - "ListSinksResponse": { - "description": "Result returned from ListSinks.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", - "type": "string" - }, - "sinks": { - "description": "A list of sinks.", - "type": "array", - "items": { - "$ref": "LogSink" - } - } - }, - "id": "ListSinksResponse" + "quotaUser": { + "type": "string", + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." }, - "LabelDescriptor": { - "id": "LabelDescriptor", - "description": "A description of a label.", - "type": "object", - "properties": { - "key": { - "description": "The label key.", - "type": "string" - }, - "description": { - "description": "A human-readable description for the label.", - "type": "string" - }, - "valueType": { - "description": "The type of data that can be assigned to the label.", - "type": "string", - "enumDescriptions": [ - "A variable-length string. This is the default.", - "Boolean; true or false.", - "A 64-bit signed integer." - ], - "enum": [ - "STRING", - "BOOL", - "INT64" - ] - } - } + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "uploadType": { + "type": "string", + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\")." + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] } - }, - "protocol": "rest", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" } } diff --git a/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go b/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go index cb0fde112..78555ead6 100644 --- a/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go +++ b/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go @@ -77,9 +77,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BillingAccounts *BillingAccountsService @@ -99,6 +100,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBillingAccountsService(s *Service) *BillingAccountsService { rs := &BillingAccountsService{s: s} rs.Logs = NewBillingAccountsLogsService(s) @@ -383,13 +388,15 @@ type ListLogEntriesRequest struct { // list of resources in resource_names. ProjectIds []string `json:"projectIds,omitempty"` - // ResourceNames: Required. Names of one or more resources from which to - // retrieve log + // ResourceNames: Required. Names of one or more parent resources from + // which to retrieve log // entries: // "projects/[PROJECT_ID]" // "organizations/[ORGANIZATION_ID]" - // Pro - // jects listed in the project_ids field are added to this list. + // "bi + // llingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + // Projects listed in the project_ids field are added to this list. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "Filter") to @@ -641,6 +648,9 @@ type LogEntry struct { // "projects/[PROJECT_ID]/logs/[LOG_ID]" // "organizations/[ORGANIZ // ATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[L + // OG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" // [LOG_ID] must be URL-encoded within log_name. Example: // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa // ctivity". [LOG_ID] must be less than 512 characters long and can only @@ -933,7 +943,7 @@ func (s *LogMetric) MarshalJSON() ([]byte, error) { // following destinations in any project: a Cloud Storage bucket, a // BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls // which log entries are exported. The sink must be created within a -// project or organization. +// project, organization, billing account, or folder. type LogSink struct { // Destination: Required. The export // destination: @@ -1394,6 +1404,9 @@ type WriteLogEntriesRequest struct { // "projects/[PROJECT_ID]/logs/[LOG_ID]" // "organizations/[ORGANI // ZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[ + // LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" // [LOG_ID] must be URL-encoded. For example, // "projects/my-project-id/logs/syslog" or // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa @@ -1459,7 +1472,8 @@ type BillingAccountsLogsDeleteCall struct { } // Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. func (r *BillingAccountsLogsService) Delete(logName string) *BillingAccountsLogsDeleteCall { c := &BillingAccountsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.logName = logName @@ -1497,6 +1511,7 @@ func (c *BillingAccountsLogsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}") @@ -1547,7 +1562,7 @@ func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", // "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs/{logsId}", // "httpMethod": "DELETE", // "id": "logging.billingAccounts.logs.delete", @@ -1556,7 +1571,7 @@ func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", // "required": true, @@ -1586,8 +1601,8 @@ type BillingAccountsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects or organizations. Only logs that -// have entries are listed. +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall { c := &BillingAccountsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -1654,6 +1669,7 @@ func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1707,7 +1723,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog } return ret, nil // { - // "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", // "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs", // "httpMethod": "GET", // "id": "logging.billingAccounts.logs.list", @@ -1727,7 +1743,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -1819,6 +1835,7 @@ func (c *EntriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.listlogentriesrequest) if err != nil { @@ -1964,6 +1981,7 @@ func (c *EntriesWriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest) if err != nil { @@ -2115,6 +2133,7 @@ func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2231,7 +2250,8 @@ type OrganizationsLogsDeleteCall struct { } // Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. func (r *OrganizationsLogsService) Delete(logName string) *OrganizationsLogsDeleteCall { c := &OrganizationsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.logName = logName @@ -2269,6 +2289,7 @@ func (c *OrganizationsLogsDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}") @@ -2319,7 +2340,7 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", // "flatPath": "v2beta1/organizations/{organizationsId}/logs/{logsId}", // "httpMethod": "DELETE", // "id": "logging.organizations.logs.delete", @@ -2328,7 +2349,7 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^organizations/[^/]+/logs/[^/]+$", // "required": true, @@ -2358,8 +2379,8 @@ type OrganizationsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects or organizations. Only logs that -// have entries are listed. +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall { c := &OrganizationsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -2426,6 +2447,7 @@ func (c *OrganizationsLogsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2479,7 +2501,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR } return ret, nil // { - // "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", // "flatPath": "v2beta1/organizations/{organizationsId}/logs", // "httpMethod": "GET", // "id": "logging.organizations.logs.list", @@ -2499,7 +2521,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -2552,7 +2574,8 @@ type ProjectsLogsDeleteCall struct { } // Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. func (r *ProjectsLogsService) Delete(logName string) *ProjectsLogsDeleteCall { c := &ProjectsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.logName = logName @@ -2590,6 +2613,7 @@ func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}") @@ -2640,7 +2664,7 @@ func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries.", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", // "flatPath": "v2beta1/projects/{projectsId}/logs/{logsId}", // "httpMethod": "DELETE", // "id": "logging.projects.logs.delete", @@ -2649,7 +2673,7 @@ func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^projects/[^/]+/logs/[^/]+$", // "required": true, @@ -2679,8 +2703,8 @@ type ProjectsLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects or organizations. Only logs that -// have entries are listed. +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. func (r *ProjectsLogsService) List(parent string) *ProjectsLogsListCall { c := &ProjectsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -2747,6 +2771,7 @@ func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2800,7 +2825,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon } return ret, nil // { - // "description": "Lists the logs in projects or organizations. Only logs that have entries are listed.", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", // "flatPath": "v2beta1/projects/{projectsId}/logs", // "httpMethod": "GET", // "id": "logging.projects.logs.list", @@ -2820,7 +2845,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n", + // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -2912,6 +2937,7 @@ func (c *ProjectsMetricsCreateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric) if err != nil { @@ -3047,6 +3073,7 @@ func (c *ProjectsMetricsDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+metricName}") @@ -3185,6 +3212,7 @@ func (c *ProjectsMetricsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3346,6 +3374,7 @@ func (c *ProjectsMetricsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3511,6 +3540,7 @@ func (c *ProjectsMetricsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric) if err != nil { @@ -3626,11 +3656,11 @@ func (r *ProjectsSinksService) Create(parent string, logsink *LogSink) *Projects // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set // to false, and if the sink's parent is a project, then the value -// returned as writer_identity is cloud-logs@google.com, the same -// identity used before the addition of writer identities to this API. -// The sink's destination must be in the same project as the sink -// itself.If this field is set to true, or if the sink is owned by a -// non-project resource such as an organization, then the value of +// returned as writer_identity is the same group or service account used +// by Stackdriver Logging before the addition of writer identities to +// this API. The sink's destination must be in the same project as the +// sink itself.If this field is set to true, or if the sink is owned by +// a non-project resource such as an organization, then the value of // writer_identity will be a unique service account used only for // exports from the new sink. For more information, see writer_identity // in LogSink. @@ -3670,6 +3700,7 @@ func (c *ProjectsSinksCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -3734,14 +3765,14 @@ func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // ], // "parameters": { // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is cloud-logs@google.com, the same identity used before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", // "location": "query", // "type": "boolean" // } @@ -3810,6 +3841,7 @@ func (c *ProjectsSinksDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+sinkName}") @@ -3869,7 +3901,7 @@ func (c *ProjectsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, erro // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nIt is an error if the sink does not exist. Example: \"projects/my-project-id/sinks/my-sink-id\". It is an error if the sink does not exist.", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, @@ -3947,6 +3979,7 @@ func (c *ProjectsSinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4009,7 +4042,7 @@ func (c *ProjectsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error // ], // "parameters": { // "sinkName": { - // "description": "Required. The parent resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, @@ -4108,6 +4141,7 @@ func (c *ProjectsSinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4181,7 +4215,7 @@ func (c *ProjectsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResp // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed. Examples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -4255,9 +4289,9 @@ func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *Proj // values of this field: // If the old and new values of this field are both false or both true, // then there is no change to the sink's writer_identity. -// If the old value was false and the new value is true, then +// If the old value is false and the new value is true, then // writer_identity is changed to a unique service account. -// It is an error if the old value was true and the new value is false. +// It is an error if the old value is true and the new value is false. func (c *ProjectsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksUpdateCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -4294,6 +4328,7 @@ func (c *ProjectsSinksUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { @@ -4358,14 +4393,14 @@ func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value was false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value was true and the new value is false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", // "location": "query", // "type": "boolean" // } diff --git a/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go b/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go index 891c35bb8..020a7633a 100644 --- a/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go +++ b/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Products = NewAccountsProductsService(s) @@ -435,6 +440,7 @@ func (c *AccountsProductsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -598,6 +604,7 @@ func (c *AccountsProductsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/mirror/v1/mirror-gen.go b/vendor/google.golang.org/api/mirror/v1/mirror-gen.go index fb63974ed..198052a0c 100644 --- a/vendor/google.golang.org/api/mirror/v1/mirror-gen.go +++ b/vendor/google.golang.org/api/mirror/v1/mirror-gen.go @@ -69,9 +69,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} return rs @@ -1305,6 +1310,7 @@ func (c *AccountsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -1449,6 +1455,7 @@ func (c *ContactsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "contacts/{id}") @@ -1555,6 +1562,7 @@ func (c *ContactsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1681,6 +1689,7 @@ func (c *ContactsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contact) if err != nil { @@ -1807,6 +1816,7 @@ func (c *ContactsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1922,6 +1932,7 @@ func (c *ContactsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contact) if err != nil { @@ -2055,6 +2066,7 @@ func (c *ContactsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.contact) if err != nil { @@ -2197,6 +2209,7 @@ func (c *LocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2333,6 +2346,7 @@ func (c *LocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2457,6 +2471,7 @@ func (c *SettingsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2583,6 +2598,7 @@ func (c *SubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "subscriptions/{id}") @@ -2678,6 +2694,7 @@ func (c *SubscriptionsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { @@ -2805,6 +2822,7 @@ func (c *SubscriptionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2919,6 +2937,7 @@ func (c *SubscriptionsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { @@ -3050,6 +3069,7 @@ func (c *TimelineDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "timeline/{id}") @@ -3157,6 +3177,7 @@ func (c *TimelineGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3337,6 +3358,7 @@ func (c *TimelineInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.timelineitem) if err != nil { @@ -3586,6 +3608,7 @@ func (c *TimelineListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3769,6 +3792,7 @@ func (c *TimelinePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.timelineitem) if err != nil { @@ -3956,6 +3980,7 @@ func (c *TimelineUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.timelineitem) if err != nil { @@ -4157,6 +4182,7 @@ func (c *TimelineAttachmentsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "timeline/{itemId}/attachments/{attachmentId}") @@ -4274,6 +4300,7 @@ func (c *TimelineAttachmentsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4478,6 +4505,7 @@ func (c *TimelineAttachmentsInsertCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "timeline/{itemId}/attachments") @@ -4679,6 +4707,7 @@ func (c *TimelineAttachmentsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/ml/v1beta1/ml-api.json b/vendor/google.golang.org/api/ml/v1beta1/ml-api.json index b790eca0a..a8071c772 100644 --- a/vendor/google.golang.org/api/ml/v1beta1/ml-api.json +++ b/vendor/google.golang.org/api/ml/v1beta1/ml-api.json @@ -17,86 +17,200 @@ "ownerName": "Google", "resources": { "projects": { + "methods": { + "getConfig": { + "description": "Get the service account information associated with your project. You need\nthis information in order to grant the service account persmissions for\nthe Google Cloud Storage location where you put your model training code\nfor training the model with Google Cloud Machine Learning.", + "httpMethod": "GET", + "response": { + "$ref": "GoogleCloudMlV1beta1__GetConfigResponse" + }, + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The project name.\n\nAuthorization: requires `Viewer` role on the specified project.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}:getConfig", + "id": "ml.projects.getConfig", + "path": "v1beta1/{+name}:getConfig" + }, + "predict": { + "description": "Performs prediction on the data in the request.\n\n**** REMOVE FROM GENERATED DOCUMENTATION", + "request": { + "$ref": "GoogleCloudMlV1beta1__PredictRequest" + }, + "response": { + "$ref": "GoogleApi__HttpBody" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "pattern": "^projects/.+$", + "location": "path", + "description": "Required. The resource name of a model or a version.\n\nAuthorization: requires `Viewer` role on the parent project.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}:predict", + "path": "v1beta1/{+name}:predict", + "id": "ml.projects.predict" + } + }, "resources": { - "operations": { + "jobs": { "methods": { - "list": { - "flatPath": "v1beta1/projects/{projectsId}/operations", - "id": "ml.projects.operations.list", - "path": "v1beta1/{+name}/operations", - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", - "httpMethod": "GET", + "cancel": { + "response": { + "$ref": "GoogleProtobuf__Empty" + }, "parameterOrder": [ "name" ], + "httpMethod": "POST", + "parameters": { + "name": { + "pattern": "^projects/[^/]+/jobs/[^/]+$", + "location": "path", + "description": "Required. The name of the job to cancel.\n\nAuthorization: requires `Editor` role on the parent project.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/jobs/{jobsId}:cancel", + "path": "v1beta1/{+name}:cancel", + "id": "ml.projects.jobs.cancel", + "request": { + "$ref": "GoogleCloudMlV1beta1__CancelJobRequest" + }, + "description": "Cancels a running job." + }, + "list": { + "description": "Lists the jobs in the project.", + "httpMethod": "GET", "response": { - "$ref": "GoogleLongrunning__ListOperationsResponse" + "$ref": "GoogleCloudMlV1beta1__ListJobsResponse" }, + "parameterOrder": [ + "parent" + ], "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "filter": { + "pageToken": { + "description": "Optional. A page token to request the next page of results.\n\nYou get the token from the `next_page_token` field of the response from\nthe previous call.", + "type": "string", + "location": "query" + }, + "pageSize": { "location": "query", - "description": "The standard list filter.", - "type": "string" + "description": "Optional. The number of jobs to retrieve per \"page\" of results. If there\nare more remaining results than this number, the response message will\ncontain a valid value in the `next_page_token` field.\n\nThe default value is 20, and the maximum page size is 100.", + "format": "int32", + "type": "integer" }, - "name": { + "parent": { + "pattern": "^projects/[^/]+$", "location": "path", - "description": "The name of the operation collection.", + "description": "Required. The name of the project for which to list jobs.\n\nAuthorization: requires `Viewer` role on the specified project.", "required": true, - "type": "string", - "pattern": "^projects/[^/]+$" + "type": "string" }, - "pageToken": { + "filter": { "location": "query", - "description": "The standard list page token.", + "description": "Optional. Specifies the subset of jobs to retrieve.", "type": "string" - }, - "pageSize": { - "description": "The standard list page size.", - "format": "int32", - "type": "integer", - "location": "query" } - } + }, + "flatPath": "v1beta1/projects/{projectsId}/jobs", + "id": "ml.projects.jobs.list", + "path": "v1beta1/{+parent}/jobs" }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "response": { - "$ref": "GoogleLongrunning__Operation" - }, - "httpMethod": "GET", + "path": "v1beta1/{+name}", + "id": "ml.projects.jobs.get", + "description": "Describes a job.", "parameterOrder": [ "name" ], + "httpMethod": "GET", + "response": { + "$ref": "GoogleCloudMlV1beta1__Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { "name": { - "description": "The name of the operation resource.", + "pattern": "^projects/[^/]+/jobs/[^/]+$", + "location": "path", + "description": "Required. The name of the job to get the description of.\n\nAuthorization: requires `Viewer` role on the parent project.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/jobs/{jobsId}" + }, + "create": { + "id": "ml.projects.jobs.create", + "path": "v1beta1/{+parent}/jobs", + "request": { + "$ref": "GoogleCloudMlV1beta1__Job" + }, + "description": "Creates a training or a batch prediction job.", + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "GoogleCloudMlV1beta1__Job" + }, + "parameters": { + "parent": { + "description": "Required. The project name.\n\nAuthorization: requires `Editor` role on the specified project.", "required": true, "type": "string", - "pattern": "^projects/[^/]+/operations/[^/]+$", + "pattern": "^projects/[^/]+$", "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1beta1/projects/{projectsId}/operations/{operationsId}", - "path": "v1beta1/{+name}", - "id": "ml.projects.operations.get" - }, + "flatPath": "v1beta1/projects/{projectsId}/jobs" + } + } + }, + "operations": { + "methods": { "cancel": { + "id": "ml.projects.operations.cancel", + "path": "v1beta1/{+name}:cancel", "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", - "parameterOrder": [ - "name" - ], + "httpMethod": "POST", "response": { "$ref": "GoogleProtobuf__Empty" }, - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" + "parameterOrder": [ + "name" ], "parameters": { "name": { @@ -107,25 +221,92 @@ "location": "path" } }, - "flatPath": "v1beta1/projects/{projectsId}/operations/{operationsId}:cancel", - "path": "v1beta1/{+name}:cancel", - "id": "ml.projects.operations.cancel" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/operations/{operationsId}:cancel" }, "delete": { - "parameterOrder": [ - "name" - ], + "id": "ml.projects.operations.delete", + "path": "v1beta1/{+name}", + "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", "httpMethod": "DELETE", "response": { "$ref": "GoogleProtobuf__Empty" }, + "parameterOrder": [ + "name" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { "name": { + "pattern": "^projects/[^/]+/operations/[^/]+$", + "location": "path", "description": "The name of the operation resource to be deleted.", "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/operations/{operationsId}" + }, + "list": { + "response": { + "$ref": "GoogleLongrunning__ListOperationsResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The name of the operation collection.", + "required": true, + "type": "string" + }, + "pageToken": { + "description": "The standard list page token.", "type": "string", + "location": "query" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "filter": { + "location": "query", + "description": "The standard list filter.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/projects/{projectsId}/operations", + "path": "v1beta1/{+name}/operations", + "id": "ml.projects.operations.list", + "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`." + }, + "get": { + "response": { + "$ref": "GoogleLongrunning__Operation" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { "pattern": "^projects/[^/]+/operations/[^/]+$", - "location": "path" + "location": "path", + "description": "The name of the operation resource.", + "required": true, + "type": "string" } }, "scopes": [ @@ -133,8 +314,8 @@ ], "flatPath": "v1beta1/projects/{projectsId}/operations/{operationsId}", "path": "v1beta1/{+name}", - "id": "ml.projects.operations.delete", - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`." + "id": "ml.projects.operations.get", + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice." } } }, @@ -153,11 +334,11 @@ ], "parameters": { "name": { + "pattern": "^projects/[^/]+/models/[^/]+$", "location": "path", "description": "Required. The name of the model.\n\nAuthorization: requires `Editor` role on the parent project.", "required": true, - "type": "string", - "pattern": "^projects/[^/]+/models/[^/]+$" + "type": "string" } }, "flatPath": "v1beta1/projects/{projectsId}/models/{modelsId}", @@ -166,10 +347,27 @@ "description": "Deletes a model.\n\nYou can only delete a model if there are no versions in it. You can delete\nversions by calling\n[projects.models.versions.delete](/ml/reference/rest/v1beta1/projects.models.versions/delete)." }, "list": { + "path": "v1beta1/{+parent}/models", + "id": "ml.projects.models.list", + "description": "Lists the models in a project.\n\nEach project can contain multiple models, and each model can have multiple\nversions.", + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "response": { + "$ref": "GoogleCloudMlV1beta1__ListModelsResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { + "parent": { + "description": "Required. The name of the project whose models are to be listed.\n\nAuthorization: requires `Viewer` role on the specified project.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, "pageToken": { "description": "Optional. A page token to request the next page of results.\n\nYou get the token from the `next_page_token` field of the response from\nthe previous call.", "type": "string", @@ -180,26 +378,9 @@ "description": "Optional. The number of models to retrieve per \"page\" of results. If there\nare more remaining results than this number, the response message will\ncontain a valid value in the `next_page_token` field.\n\nThe default value is 20, and the maximum page size is 100.", "format": "int32", "type": "integer" - }, - "parent": { - "description": "Required. The name of the project whose models are to be listed.\n\nAuthorization: requires `Viewer` role on the specified project.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" } }, - "flatPath": "v1beta1/projects/{projectsId}/models", - "path": "v1beta1/{+parent}/models", - "id": "ml.projects.models.list", - "description": "Lists the models in a project.\n\nEach project can contain multiple models, and each model can have multiple\nversions.", - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "response": { - "$ref": "GoogleCloudMlV1beta1__ListModelsResponse" - } + "flatPath": "v1beta1/projects/{projectsId}/models" }, "get": { "httpMethod": "GET", @@ -209,18 +390,18 @@ "parameterOrder": [ "name" ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], "parameters": { "name": { - "required": true, - "type": "string", "pattern": "^projects/[^/]+/models/[^/]+$", "location": "path", - "description": "Required. The name of the model.\n\nAuthorization: requires `Viewer` role on the parent project." + "description": "Required. The name of the model.\n\nAuthorization: requires `Viewer` role on the parent project.", + "required": true, + "type": "string" } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "flatPath": "v1beta1/projects/{projectsId}/models/{modelsId}", "id": "ml.projects.models.get", "path": "v1beta1/{+name}", @@ -239,11 +420,11 @@ ], "parameters": { "parent": { - "location": "path", "description": "Required. The project name.\n\nAuthorization: requires `Editor` role on the specified project.", "required": true, "type": "string", - "pattern": "^projects/[^/]+$" + "pattern": "^projects/[^/]+$", + "location": "path" } }, "flatPath": "v1beta1/projects/{projectsId}/models", @@ -260,20 +441,20 @@ "methods": { "delete": { "description": "Deletes a model version.\n\nEach model can have multiple versions deployed and in use at any given\ntime. Use this method to remove a single version.\n\nNote: You cannot delete the version that is set as the default version\nof the model unless it is the only remaining version.", - "response": { - "$ref": "GoogleLongrunning__Operation" - }, "parameterOrder": [ "name" ], "httpMethod": "DELETE", + "response": { + "$ref": "GoogleLongrunning__Operation" + }, "parameters": { "name": { + "pattern": "^projects/[^/]+/models/[^/]+/versions/[^/]+$", "location": "path", "description": "Required. The name of the version. You can get the names of all the\nversions of a model by calling\n[projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).\n\nAuthorization: requires `Editor` role on the parent project.", "required": true, - "type": "string", - "pattern": "^projects/[^/]+/models/[^/]+/versions/[^/]+$" + "type": "string" } }, "scopes": [ @@ -284,16 +465,14 @@ "id": "ml.projects.models.versions.delete" }, "list": { - "path": "v1beta1/{+parent}/versions", - "id": "ml.projects.models.versions.list", "description": "Gets basic information about all the versions of a model.\n\nIf you expect that a model has a lot of versions, or if you need to handle\nonly a limited number of results at a time, you can request that the list\nbe retrieved in batches (called pages):", - "parameterOrder": [ - "parent" - ], "httpMethod": "GET", "response": { "$ref": "GoogleCloudMlV1beta1__ListVersionsResponse" }, + "parameterOrder": [ + "parent" + ], "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], @@ -304,26 +483,29 @@ "type": "string" }, "pageSize": { + "location": "query", "description": "Optional. The number of versions to retrieve per \"page\" of results. If\nthere are more remaining results than this number, the response message\nwill contain a valid value in the `next_page_token` field.\n\nThe default value is 20, and the maximum page size is 100.", "format": "int32", - "type": "integer", - "location": "query" + "type": "integer" }, "parent": { - "location": "path", "description": "Required. The name of the model for which to list the version.\n\nAuthorization: requires `Viewer` role on the parent project.", "required": true, "type": "string", - "pattern": "^projects/[^/]+/models/[^/]+$" + "pattern": "^projects/[^/]+/models/[^/]+$", + "location": "path" } }, - "flatPath": "v1beta1/projects/{projectsId}/models/{modelsId}/versions" + "flatPath": "v1beta1/projects/{projectsId}/models/{modelsId}/versions", + "id": "ml.projects.models.versions.list", + "path": "v1beta1/{+parent}/versions" }, "get": { - "httpMethod": "GET", + "description": "Gets information about a model version.\n\nModels can have multiple versions. You can call\n[projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list)\nto get the same information that this method returns for all of the\nversions of a model.", "parameterOrder": [ "name" ], + "httpMethod": "GET", "response": { "$ref": "GoogleCloudMlV1beta1__Version" }, @@ -332,23 +514,22 @@ ], "parameters": { "name": { + "pattern": "^projects/[^/]+/models/[^/]+/versions/[^/]+$", "location": "path", "description": "Required. The name of the version.\n\nAuthorization: requires `Viewer` role on the parent project.", "required": true, - "type": "string", - "pattern": "^projects/[^/]+/models/[^/]+/versions/[^/]+$" + "type": "string" } }, "flatPath": "v1beta1/projects/{projectsId}/models/{modelsId}/versions/{versionsId}", - "id": "ml.projects.models.versions.get", "path": "v1beta1/{+name}", - "description": "Gets information about a model version.\n\nModels can have multiple versions. You can call\n[projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list)\nto get the same information that this method returns for all of the\nversions of a model." + "id": "ml.projects.models.versions.get" }, "create": { - "description": "Creates a new version of a model from a trained TensorFlow model.\n\nIf the version created in the cloud by this call is the first deployed\nversion of the specified model, it will be made the default version of the\nmodel. When you add a version to a model that already has one or more\nversions, the default version does not automatically change. If you want a\nnew version to be the default, you must call\n[projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).", "request": { "$ref": "GoogleCloudMlV1beta1__Version" }, + "description": "Creates a new version of a model from a trained TensorFlow model.\n\nIf the version created in the cloud by this call is the first deployed\nversion of the specified model, it will be made the default version of the\nmodel. When you add a version to a model that already has one or more\nversions, the default version does not automatically change. If you want a\nnew version to be the default, you must call\n[projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).", "httpMethod": "POST", "parameterOrder": [ "parent" @@ -356,9 +537,6 @@ "response": { "$ref": "GoogleLongrunning__Operation" }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], "parameters": { "parent": { "pattern": "^projects/[^/]+/models/[^/]+$", @@ -368,33 +546,36 @@ "type": "string" } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "flatPath": "v1beta1/projects/{projectsId}/models/{modelsId}/versions", "id": "ml.projects.models.versions.create", "path": "v1beta1/{+parent}/versions" }, "setDefault": { - "httpMethod": "POST", - "parameterOrder": [ - "name" - ], "response": { "$ref": "GoogleCloudMlV1beta1__Version" }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { "name": { - "pattern": "^projects/[^/]+/models/[^/]+/versions/[^/]+$", - "location": "path", "description": "Required. The name of the version to make the default for the model. You\ncan get the names of all the versions of a model by calling\n[projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).\n\nAuthorization: requires `Editor` role on the parent project.", "required": true, - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/models/[^/]+/versions/[^/]+$", + "location": "path" } }, "flatPath": "v1beta1/projects/{projectsId}/models/{modelsId}/versions/{versionsId}:setDefault", - "id": "ml.projects.models.versions.setDefault", "path": "v1beta1/{+name}:setDefault", + "id": "ml.projects.models.versions.setDefault", "description": "Designates a version to be the default for the model.\n\nThe default version is used for prediction requests made against the model\nthat don't specify a version.\n\nThe first version to be created for a model is automatically set as the\ndefault. You must make any subsequent changes to the default version\nsetting manually using this method.", "request": { "$ref": "GoogleCloudMlV1beta1__SetDefaultVersionRequest" @@ -403,196 +584,14 @@ } } } - }, - "jobs": { - "methods": { - "cancel": { - "flatPath": "v1beta1/projects/{projectsId}/jobs/{jobsId}:cancel", - "id": "ml.projects.jobs.cancel", - "path": "v1beta1/{+name}:cancel", - "description": "Cancels a running job.", - "request": { - "$ref": "GoogleCloudMlV1beta1__CancelJobRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "GoogleProtobuf__Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "description": "Required. The name of the job to cancel.\n\nAuthorization: requires `Editor` role on the parent project.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/jobs/[^/]+$", - "location": "path" - } - } - }, - "list": { - "httpMethod": "GET", - "response": { - "$ref": "GoogleCloudMlV1beta1__ListJobsResponse" - }, - "parameterOrder": [ - "parent" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The name of the project for which to list jobs.\n\nAuthorization: requires `Viewer` role on the specified project.", - "required": true, - "type": "string" - }, - "filter": { - "location": "query", - "description": "Optional. Specifies the subset of jobs to retrieve.", - "type": "string" - }, - "pageToken": { - "type": "string", - "location": "query", - "description": "Optional. A page token to request the next page of results.\n\nYou get the token from the `next_page_token` field of the response from\nthe previous call." - }, - "pageSize": { - "description": "Optional. The number of jobs to retrieve per \"page\" of results. If there\nare more remaining results than this number, the response message will\ncontain a valid value in the `next_page_token` field.\n\nThe default value is 20, and the maximum page size is 100.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/jobs", - "id": "ml.projects.jobs.list", - "path": "v1beta1/{+parent}/jobs", - "description": "Lists the jobs in the project." - }, - "get": { - "description": "Describes a job.", - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "GoogleCloudMlV1beta1__Job" - }, - "parameters": { - "name": { - "location": "path", - "description": "Required. The name of the job to get the description of.\n\nAuthorization: requires `Viewer` role on the parent project.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/jobs/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/jobs/{jobsId}", - "id": "ml.projects.jobs.get", - "path": "v1beta1/{+name}" - }, - "create": { - "id": "ml.projects.jobs.create", - "path": "v1beta1/{+parent}/jobs", - "request": { - "$ref": "GoogleCloudMlV1beta1__Job" - }, - "description": "Creates a training or a batch prediction job.", - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "GoogleCloudMlV1beta1__Job" - }, - "parameters": { - "parent": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The project name.\n\nAuthorization: requires `Editor` role on the specified project.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1beta1/projects/{projectsId}/jobs" - } - } - } - }, - "methods": { - "predict": { - "response": { - "$ref": "GoogleApi__HttpBody" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "location": "path", - "description": "Required. The resource name of a model or a version.\n\nAuthorization: requires `Viewer` role on the parent project.", - "required": true, - "type": "string", - "pattern": "^projects/.+$" - } - }, - "flatPath": "v1beta1/projects/{projectsId}:predict", - "path": "v1beta1/{+name}:predict", - "id": "ml.projects.predict", - "description": "Performs prediction on the data in the request.\n\nResponses are very similar to requests. There are two top-level fields,\neach of which are JSON lists:\n\n\u003cdl\u003e\n \u003cdt\u003epredictions\u003c/dt\u003e\n \u003cdd\u003eThe list of predictions, one per instance in the request.\u003c/dd\u003e\n \u003cdt\u003eerror\u003c/dt\u003e\n \u003cdd\u003eAn error message returned instead of a prediction list if any\n instance produced an error.\u003c/dd\u003e\n\u003c/dl\u003e\n\nIf the call is successful, the response body will contain one prediction\nentry per instance in the request body. If prediction fails for any\ninstance, the response body will contain no predictions and will contian\na single error entry instead.\n\nEven though there is one prediction per instance, the format of a\nprediction is not directly related to the format of an instance.\nPredictions take whatever format is specified in the outputs collection\ndefined in the model. The collection of predictions is returned in a JSON\nlist. Each member of the list can be a simple value, a list, or a JSON\nobject of any complexity. If your model has more than one output tensor,\neach prediction will be a JSON object containing a name/value pair for each\noutput. The names identify the output aliases in the graph.\n\nThe following examples show some possible responses:\n\nA simple set of predictions for three input instances, where each\nprediction is an integer value:\n\u003cpre\u003e\n{\"predictions\": [5, 4, 3]}\n\u003c/pre\u003e\nA more complex set of predictions, each containing two named values that\ncorrespond to output tensors, named **label** and **scores** respectively.\nThe value of **label** is the predicted category (\"car\" or \"beach\") and\n**scores** contains a list of probabilities for that instance across the\npossible categories.\n\u003cpre\u003e\n{\"predictions\": [{\"label\": \"beach\", \"scores\": [0.1, 0.9]},\n {\"label\": \"car\", \"scores\": [0.75, 0.25]}]}\n\u003c/pre\u003e\nA response when there is an error processing an input instance:\n\u003cpre\u003e\n{\"error\": \"Divide by zero\"}\n\u003c/pre\u003e", - "request": { - "$ref": "GoogleCloudMlV1beta1__PredictRequest" - } - }, - "getConfig": { - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "GoogleCloudMlV1beta1__GetConfigResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "location": "path", - "description": "Required. The project name.\n\nAuthorization: requires `Viewer` role on the specified project.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$" - } - }, - "flatPath": "v1beta1/projects/{projectsId}:getConfig", - "id": "ml.projects.getConfig", - "path": "v1beta1/{+name}:getConfig", - "description": "Get the service account information associated with your project. You need\nthis information in order to grant the service account persmissions for\nthe Google Cloud Storage location where you put your model training code\nfor training the model with Google Cloud Machine Learning." } } } }, "parameters": { - "pp": { - "description": "Pretty-print response.", - "type": "boolean", - "default": "true", + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", "location": "query" }, "oauth_token": { @@ -600,35 +599,33 @@ "description": "OAuth 2.0 token for the current user.", "type": "string" }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" - }, "upload_protocol": { "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", "location": "query" }, "prettyPrint": { - "location": "query", "description": "Returns response with indentations and line breaks.", "type": "boolean", - "default": "true" + "default": "true", + "location": "query" }, "uploadType": { + "location": "query", "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", "type": "string", "location": "query" }, - "fields": { + "callback": { "location": "query", - "description": "Selector specifying which fields to include in a partial response.", + "description": "JSONP", "type": "string" }, "$.xgafv": { - "description": "V1 error format.", - "type": "string", "enumDescriptions": [ "v1 error format", "v2 error format" @@ -637,15 +634,11 @@ "enum": [ "1", "2" - ] - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" + ], + "description": "V1 error format.", + "type": "string" }, "alt": { - "type": "string", "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", @@ -658,161 +651,193 @@ "json", "media", "proto" - ] - }, - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" + ], + "type": "string" }, "key": { + "location": "query", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", "type": "string", "location": "query" }, "quotaUser": { - "type": "string", "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" } }, "version": "v1beta1", "baseUrl": "https://ml.googleapis.com/", - "kind": "discovery#restDescription", "servicePath": "", "description": "An API to enable creating and using machine learning models.", + "kind": "discovery#restDescription", "basePath": "", - "revision": "20170111", + "revision": "20170216", "documentationLink": "https://cloud.google.com/ml/", "id": "ml:v1beta1", "discoveryVersion": "v1", "version_module": "True", "schemas": { - "GoogleCloudMlV1beta1__TrainingOutput": { - "type": "object", + "GoogleCloudMlV1beta1__PredictRequest": { "properties": { - "isHyperparameterTuningJob": { - "description": "Whether this job is a hyperparameter tuning job.", - "type": "boolean" + "httpBody": { + "$ref": "GoogleApi__HttpBody", + "description": "\nRequired. The prediction request body." + } + }, + "id": "GoogleCloudMlV1beta1__PredictRequest", + "description": "Request for predictions to be issued against a trained model.\n\nThe body of the request is a single JSON object with a single top-level\nfield:\n\n\u003cdl\u003e\n \u003cdt\u003einstances\u003c/dt\u003e\n \u003cdd\u003eA JSON array containing values representing the instances to use for\n prediction.\u003c/dd\u003e\n\u003c/dl\u003e\n\nThe structure of each element of the instances list is determined by your\nmodel's input definition. Instances can include named inputs or can contain\nonly unlabeled values.\n\nNot all data includes named inputs. Some instances will be simple\nJSON values (boolean, number, or string). However, instances are often lists\nof simple values, or complex nested lists. Here are some examples of request\nbodies:\n\nCSV data with each row encoded as a string value:\n\u003cpre\u003e\n{\"instances\": [\"1.0,true,\\\\\"x\\\\\"\", \"-2.0,false,\\\\\"y\\\\\"\"]}\n\u003c/pre\u003e\nPlain text:\n\u003cpre\u003e\n{\"instances\": [\"the quick brown fox\", \"la bruja le dio\"]}\n\u003c/pre\u003e\nSentences encoded as lists of words (vectors of strings):\n\u003cpre\u003e\n{\n \"instances\": [\n [\"the\",\"quick\",\"brown\"],\n [\"la\",\"bruja\",\"le\"],\n ...\n ]\n}\n\u003c/pre\u003e\nFloating point scalar values:\n\u003cpre\u003e\n{\"instances\": [0.0, 1.1, 2.2]}\n\u003c/pre\u003e\nVectors of integers:\n\u003cpre\u003e\n{\n \"instances\": [\n [0, 1, 2],\n [3, 4, 5],\n ...\n ]\n}\n\u003c/pre\u003e\nTensors (in this case, two-dimensional tensors):\n\u003cpre\u003e\n{\n \"instances\": [\n [\n [0, 1, 2],\n [3, 4, 5]\n ],\n ...\n ]\n}\n\u003c/pre\u003e\nImages can be represented different ways. In this encoding scheme the first\ntwo dimensions represent the rows and columns of the image, and the third\ncontains lists (vectors) of the R, G, and B values for each pixel.\n\u003cpre\u003e\n{\n \"instances\": [\n [\n [\n [138, 30, 66],\n [130, 20, 56],\n ...\n ],\n [\n [126, 38, 61],\n [122, 24, 57],\n ...\n ],\n ...\n ],\n ...\n ]\n}\n\u003c/pre\u003e\nJSON strings must be encoded as UTF-8. To send binary data, you must\nbase64-encode the data and mark it as binary. To mark a JSON string\nas binary, replace it with a JSON object with a single attribute named `b64`:\n\u003cpre\u003e{\"b64\": \"...\"} \u003c/pre\u003e\nFor example:\n\nTwo Serialized tf.Examples (fake data, for illustrative purposes only):\n\u003cpre\u003e\n{\"instances\": [{\"b64\": \"X5ad6u\"}, {\"b64\": \"IA9j4nx\"}]}\n\u003c/pre\u003e\nTwo JPEG image byte strings (fake data, for illustrative purposes only):\n\u003cpre\u003e\n{\"instances\": [{\"b64\": \"ASa8asdf\"}, {\"b64\": \"JLK7ljk3\"}]}\n\u003c/pre\u003e\nIf your data includes named references, format each instance as a JSON object\nwith the named references as the keys:\n\nJSON input data to be preprocessed:\n\u003cpre\u003e\n{\n \"instances\": [\n {\n \"a\": 1.0,\n \"b\": true,\n \"c\": \"x\"\n },\n {\n \"a\": -2.0,\n \"b\": false,\n \"c\": \"y\"\n }\n ]\n}\n\u003c/pre\u003e\nSome models have an underlying TensorFlow graph that accepts multiple input\ntensors. In this case, you should use the names of JSON name/value pairs to\nidentify the input tensors, as shown in the following exmaples:\n\nFor a graph with input tensor aliases \"tag\" (string) and \"image\"\n(base64-encoded string):\n\u003cpre\u003e\n{\n \"instances\": [\n {\n \"tag\": \"beach\",\n \"image\": {\"b64\": \"ASa8asdf\"}\n },\n {\n \"tag\": \"car\",\n \"image\": {\"b64\": \"JLK7ljk3\"}\n }\n ]\n}\n\u003c/pre\u003e\nFor a graph with input tensor aliases \"tag\" (string) and \"image\"\n(3-dimensional array of 8-bit ints):\n\u003cpre\u003e\n{\n \"instances\": [\n {\n \"tag\": \"beach\",\n \"image\": [\n [\n [138, 30, 66],\n [130, 20, 56],\n ...\n ],\n [\n [126, 38, 61],\n [122, 24, 57],\n ...\n ],\n ...\n ]\n },\n {\n \"tag\": \"car\",\n \"image\": [\n [\n [255, 0, 102],\n [255, 0, 97],\n ...\n ],\n [\n [254, 1, 101],\n [254, 2, 93],\n ...\n ],\n ...\n ]\n },\n ...\n ]\n}\n\u003c/pre\u003e\nIf the call is successful, the response body will contain one prediction\nentry per instance in the request body. If prediction fails for any\ninstance, the response body will contain no predictions and will contian\na single error entry instead.", + "type": "object" + }, + "GoogleCloudMlV1beta1__PredictionInput": { + "properties": { + "versionName": { + "description": "Use this field if you want to specify a version of the model to use. The\nstring is formatted the same way as `model_version`, with the addition\nof the version information:\n\n`\"projects/\u003cvar\u003e[YOUR_PROJECT]\u003c/var\u003e/models/\u003cvar\u003eYOUR_MODEL/versions/\u003cvar\u003e[YOUR_VERSION]\u003c/var\u003e\"`", + "type": "string" }, - "consumedMLUnits": { - "description": "The amount of ML units consumed by the job.", - "format": "double", - "type": "number" + "modelName": { + "description": "Use this field if you want to use the default version for the specified\nmodel. The string must use the following format:\n\n`\"projects/\u003cvar\u003e[YOUR_PROJECT]\u003c/var\u003e/models/\u003cvar\u003e[YOUR_MODEL]\u003c/var\u003e\"`", + "type": "string" }, - "trials": { - "description": "Results for individual Hyperparameter trials.\nOnly set for hyperparameter tuning jobs.", + "outputPath": { + "description": "Required. The output Google Cloud Storage location.", + "type": "string" + }, + "uri": { + "description": "Use this field if you want to specify a Google Cloud Storage path for\nthe model to use.", + "type": "string" + }, + "maxWorkerCount": { + "description": "Optional. The maximum number of workers to be used for parallel processing.\nDefaults to 10 if not specified.", + "format": "int64", + "type": "string" + }, + "dataFormat": { + "enumDescriptions": [ + "Unspecified format.", + "The source file is a text file with instances separated by the\nnew-line character.", + "The source file is a TFRecord file.", + "The source file is a GZIP-compressed TFRecord file." + ], + "enum": [ + "DATA_FORMAT_UNSPECIFIED", + "TEXT", + "TF_RECORD", + "TF_RECORD_GZIP" + ], + "description": "Required. The format of the input data files.", + "type": "string" + }, + "runtimeVersion": { + "description": "Optional. The Google Cloud ML runtime version to use for this batch\nprediction. If not set, Google Cloud ML will pick the runtime version used\nduring the CreateVersion request for this model version, or choose the\nlatest stable version when model version information is not available\nsuch as when the model is specified by uri.", + "type": "string" + }, + "inputPaths": { + "description": "Required. The Google Cloud Storage location of the input data files.\nMay contain wildcards.", "type": "array", "items": { - "$ref": "GoogleCloudMlV1beta1__HyperparameterOutput" + "type": "string" } }, - "completedTrialCount": { - "description": "The number of hyperparameter tuning trials that completed successfully.\nOnly set for hyperparameter tuning jobs.", - "format": "int64", + "region": { + "description": "Required. The Google Compute Engine region to run the prediction job in.", "type": "string" } }, - "id": "GoogleCloudMlV1beta1__TrainingOutput", - "description": "Represents results of a training job. Output only." + "id": "GoogleCloudMlV1beta1__PredictionInput", + "description": "Represents input parameters for a prediction job.", + "type": "object" }, - "GoogleCloudMlV1beta1__HyperparameterSpec": { - "description": "Represents a set of hyperparameters to optimize.", + "GoogleApi__HttpBody": { + "description": "Message that represents an arbitrary HTTP body. It should only be used for\npayload formats that can't be represented as JSON, such as raw binary or\nan HTML page.\n\n\nThis message can be used both in streaming and non-streaming API methods in\nthe request as well as the response.\n\nIt can be used as a top-level request field, which is convenient if one\nwants to extract parameters from either the URL or HTTP template into the\nrequest fields and also want access to the raw HTTP body.\n\nExample:\n\n message GetResourceRequest {\n // A unique request id.\n string request_id = 1;\n\n // The raw HTTP body is bound to this field.\n google.api.HttpBody http_body = 2;\n }\n\n service ResourceService {\n rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);\n rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty);\n }\n\nExample with streaming methods:\n\n service CaldavService {\n rpc GetCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n rpc UpdateCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n }\n\nUse of this type only changes how the request and response bodies are\nhandled, all other features will continue to work unchanged.", "type": "object", "properties": { - "maxParallelTrials": { - "description": "Optional. The number of training trials to run concurrently.\nYou can reduce the time it takes to perform hyperparameter tuning by adding\ntrials in parallel. However, each trail only benefits from the information\ngained in completed trials. That means that a trial does not get access to\nthe results of trials running at the same time, which could reduce the\nquality of the overall optimization.\n\nEach trial will use the same scale tier and machine types.\n\nDefaults to one.", - "format": "int32", - "type": "integer" + "data": { + "description": "HTTP body binary data.", + "format": "byte", + "type": "string" }, - "goal": { - "enum": [ - "GOAL_TYPE_UNSPECIFIED", - "MAXIMIZE", - "MINIMIZE" - ], - "description": "Required. The type of goal to use for tuning. Available types are\n`MAXIMIZE` and `MINIMIZE`.\n\nDefaults to `MAXIMIZE`.", - "type": "string", - "enumDescriptions": [ - "Goal Type will default to maximize.", - "Maximize the goal metric.", - "Minimize the goal metric." - ] + "contentType": { + "description": "The HTTP Content-Type string representing the content type of the body.", + "type": "string" + } + }, + "id": "GoogleApi__HttpBody" + }, + "GoogleCloudMlV1beta1__ListVersionsResponse": { + "properties": { + "nextPageToken": { + "description": "Optional. Pass this token as the `page_token` field of the request for a\nsubsequent call.", + "type": "string" }, - "params": { - "description": "Required. The set of parameters to tune.", + "versions": { + "description": "The list of versions.", "type": "array", "items": { - "$ref": "GoogleCloudMlV1beta1__ParameterSpec" + "$ref": "GoogleCloudMlV1beta1__Version" } - }, - "maxTrials": { - "description": "Optional. How many training trials should be attempted to optimize\nthe specified hyperparameters.\n\nDefaults to one.", - "format": "int32", - "type": "integer" } }, - "id": "GoogleCloudMlV1beta1__HyperparameterSpec" + "id": "GoogleCloudMlV1beta1__ListVersionsResponse", + "description": "Response message for the ListVersions method.", + "type": "object" }, - "GoogleCloudMlV1beta1__OperationMetadata": { - "description": "Represents the metadata of the long-running operation.", + "GoogleCloudMlV1beta1__Version": { + "description": "Represents a version of the model.\n\nEach version is a trained model deployed in the cloud, ready to handle\nprediction requests. A model can have multiple versions. You can get\ninformation about all of the versions of a given model by calling\n[projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).", "type": "object", "properties": { - "startTime": { - "description": "The time operation processing started.", - "format": "google-datetime", + "runtimeVersion": { + "description": "Optional. The Google Cloud ML runtime version to use for this deployment.\nIf not set, Google Cloud ML will choose a version.", "type": "string" }, - "isCancellationRequested": { - "description": "Indicates whether a request to cancel this operation has been made.", - "type": "boolean" + "lastUseTime": { + "description": "Output only. The time the version was last used for prediction.", + "format": "google-datetime", + "type": "string" }, - "createTime": { - "type": "string", - "description": "The time the operation was submitted.", - "format": "google-datetime" + "description": { + "description": "Optional. The description specified for the version when it was created.", + "type": "string" }, - "modelName": { - "description": "Contains the name of the model associated with the operation.", + "deploymentUri": { + "description": "Required. The Google Cloud Storage location of the trained model used to\ncreate the version. See the\n[overview of model deployment](/ml/docs/concepts/deployment-overview) for\nmore informaiton.\n\nWhen passing Version to\n[projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create)\nthe model service uses the specified location as the source of the model.\nOnce deployed, the model version is hosted by the prediction service, so\nthis location is useful only as a historical record.", "type": "string" }, - "version": { - "$ref": "GoogleCloudMlV1beta1__Version", - "description": "Contains the version associated with the operation." + "isDefault": { + "description": "Output only. If true, this version will be used to handle prediction\nrequests that do not specify a version.\n\nYou can change the default version by calling\n[projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).", + "type": "boolean" }, - "endTime": { - "type": "string", - "description": "The time operation processing completed.", - "format": "google-datetime" + "createTime": { + "description": "Output only. The time the version was created.", + "format": "google-datetime", + "type": "string" }, - "operationType": { - "enumDescriptions": [ - "Unspecified operation type.", - "An operation to create a new version.", - "An operation to delete an existing version.", - "An operation to delete an existing model." - ], - "enum": [ - "OPERATION_TYPE_UNSPECIFIED", - "CREATE_VERSION", - "DELETE_VERSION", - "DELETE_MODEL" - ], - "description": "The operation type.", + "name": { + "description": "Required.The name specified for the version when it was created.\n\nThe version name must be unique within the model it is created in.", "type": "string" } }, - "id": "GoogleCloudMlV1beta1__OperationMetadata" + "id": "GoogleCloudMlV1beta1__Version" }, - "GoogleCloudMlV1beta1__ListModelsResponse": { - "description": "Response message for the ListModels method.", + "GoogleCloudMlV1beta1__ListJobsResponse": { + "description": "Response message for the ListJobs method.", "type": "object", "properties": { - "models": { - "description": "The list of models.", + "jobs": { + "description": "The list of jobs.", "type": "array", "items": { - "$ref": "GoogleCloudMlV1beta1__Model" + "$ref": "GoogleCloudMlV1beta1__Job" } }, "nextPageToken": { @@ -820,593 +845,669 @@ "type": "string" } }, - "id": "GoogleCloudMlV1beta1__ListModelsResponse" + "id": "GoogleCloudMlV1beta1__ListJobsResponse" }, - "GoogleLongrunning__Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "GoogleCloudMlV1beta1__CancelJobRequest": { + "description": "Request message for the CancelJob method.", + "type": "object", + "properties": {}, + "id": "GoogleCloudMlV1beta1__CancelJobRequest" + }, + "GoogleCloudMlV1beta1__Model": { + "description": "Represents a machine learning solution.\n\nA model can have multiple versions, each of which is a deployed, trained\nmodel ready to receive prediction requests. The model itself is just a\ncontainer.", "type": "object", "properties": { - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" + "defaultVersion": { + "$ref": "GoogleCloudMlV1beta1__Version", + "description": "Output only. The default version of the model. This version will be used to\nhandle prediction requests that do not specify a version.\n\nYou can change the default version by calling\n[projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault)." }, - "response": { - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", - "type": "object", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" + "regions": { + "description": "Optional. The list of regions where the model is going to be deployed.\nCurrently only one region per model is supported.\nDefaults to 'us-central1' if nothing is set.", + "type": "array", + "items": { + "type": "string" } }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "description": "Required. The name specified for the model when it was created.\n\nThe model name must be unique within the project it is created in.", "type": "string" }, - "error": { - "$ref": "GoogleRpc__Status", - "description": "The error result of the operation in case of failure or cancellation." + "description": { + "description": "Optional. The description specified for the model when it was created.", + "type": "string" }, - "metadata": { - "type": "object", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any." + "onlinePredictionLogging": { + "description": "Optional. If true, enables StackDriver Logging for online prediction.\nDefault is false.", + "type": "boolean" } }, - "id": "GoogleLongrunning__Operation" + "id": "GoogleCloudMlV1beta1__Model" }, - "GoogleCloudMlV1beta1__HyperparameterOutput": { - "description": "Represents the result of a single hyperparameter tuning trial from a\ntraining job. The TrainingOutput object that is returned on successful\ncompletion of a training job with hyperparameter tuning includes a list\nof HyperparameterOutput objects, one for each successful trial.", + "GoogleCloudMlV1beta1__Job": { + "description": "Represents a training or prediction job.", "type": "object", "properties": { - "allMetrics": { - "description": "All recorded object metrics for this trial.", - "type": "array", - "items": { - "$ref": "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric" - } + "endTime": { + "description": "Output only. When the job processing was completed.", + "format": "google-datetime", + "type": "string" }, - "finalMetric": { - "$ref": "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric", - "description": "The final objective metric seen for this trial." + "startTime": { + "description": "Output only. When the job processing was started.", + "format": "google-datetime", + "type": "string" }, - "hyperparameters": { - "additionalProperties": { - "type": "string" - }, - "description": "The hyperparameters given to this trial.", - "type": "object" + "predictionOutput": { + "$ref": "GoogleCloudMlV1beta1__PredictionOutput", + "description": "The current prediction job result." }, - "trialId": { - "description": "The trial id for these results.", + "trainingOutput": { + "$ref": "GoogleCloudMlV1beta1__TrainingOutput", + "description": "The current training job result." + }, + "trainingInput": { + "$ref": "GoogleCloudMlV1beta1__TrainingInput", + "description": "Input parameters to create a training job." + }, + "createTime": { + "description": "Output only. When the job was created.", + "format": "google-datetime", + "type": "string" + }, + "predictionInput": { + "description": "Input parameters to create a prediction job.", + "$ref": "GoogleCloudMlV1beta1__PredictionInput" + }, + "state": { + "enum": [ + "STATE_UNSPECIFIED", + "QUEUED", + "PREPARING", + "RUNNING", + "SUCCEEDED", + "FAILED", + "CANCELLING", + "CANCELLED" + ], + "description": "Output only. The detailed state of a job.", + "type": "string", + "enumDescriptions": [ + "The job state is unspecified.", + "The job has been just created and processing has not yet begun.", + "The service is preparing to run the job.", + "The job is in progress.", + "The job completed successfully.", + "The job failed.\n`error_message` should contain the details of the failure.", + "The job is being cancelled.\n`error_message` should describe the reason for the cancellation.", + "The job has been cancelled.\n`error_message` should describe the reason for the cancellation." + ] + }, + "jobId": { + "description": "Required. The user-specified id of the job.", + "type": "string" + }, + "errorMessage": { + "description": "Output only. The details of a failure or a cancellation.", "type": "string" } }, - "id": "GoogleCloudMlV1beta1__HyperparameterOutput" - }, - "GoogleProtobuf__Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "GoogleProtobuf__Empty" + "id": "GoogleCloudMlV1beta1__Job" }, - "GoogleRpc__Status": { + "GoogleCloudMlV1beta1__TrainingInput": { "properties": { - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "parameterServerCount": { + "description": "Optional. The number of parameter server replicas to use for the training\njob. Each replica in the cluster will be of the type specified in\n`parameter_server_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`.If you\nset this value, you must also set `parameter_server_type`.", + "format": "int64", "type": "string" }, - "details": { + "packageUris": { + "description": "Required. The Google Cloud Storage location of the packages with\nthe training program and any additional dependencies.", "type": "array", "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use." + "type": "string" + } + }, + "workerCount": { + "description": "Optional. The number of worker replicas to use for the training job. Each\nreplica in the cluster will be of the type specified in `worker_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`. If you\nset this value, you must also set `worker_type`.", + "format": "int64", + "type": "string" + }, + "masterType": { + "description": "Optional. Specifies the type of virtual machine to use for your training\njob's master worker.\n\nThe following types are supported:\n\n\u003cdl\u003e\n \u003cdt\u003estandard\u003c/dt\u003e\n \u003cdd\u003e\n A basic machine configuration suitable for training simple models with\n small to moderate datasets.\n \u003c/dd\u003e\n \u003cdt\u003elarge_model\u003c/dt\u003e\n \u003cdd\u003e\n A machine with a lot of memory, specially suited for parameter servers\n when your model is large (having many hidden layers or layers with very\n large numbers of nodes).\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_s\u003c/dt\u003e\n \u003cdd\u003e\n A machine suitable for the master and workers of the cluster when your\n model requires more computation than the standard machine can handle\n satisfactorily.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ccode suppresswarning=\"true\"\u003ecomplex_model_s\u003c/code\u003e.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_l\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ccode suppresswarning=\"true\"\u003ecomplex_model_m\u003c/code\u003e.\n \u003c/dd\u003e\n \u003cdt\u003estandard_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ccode suppresswarning=\"true\"\u003estandard\u003c/code\u003e that\n also includes a\n \u003ca href=\"ml/docs/how-tos/using-gpus\"\u003e\n GPU that you can use in your trainer\u003c/a\u003e.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to\n \u003ccode suppresswarning=\"true\"\u003ecoplex_model_m\u003c/code\u003e that also includes\n four GPUs.\n \u003c/dd\u003e\n\u003c/dl\u003e\n\nYou must set this value when `scaleTier` is set to `CUSTOM`.", + "type": "string" }, - "code": { - "type": "integer", - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32" - } - }, - "id": "GoogleRpc__Status", - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", - "type": "object" - }, - "GoogleCloudMlV1beta1__PredictRequest": { - "properties": { - "httpBody": { - "description": "\nRequired. The prediction request body.", - "$ref": "GoogleApi__HttpBody" - } - }, - "id": "GoogleCloudMlV1beta1__PredictRequest", - "description": "Request for predictions to be issued against a trained model.\n\nThe body of the request is a single JSON object with a single top-level\nfield:\n\n\u003cdl\u003e\n \u003cdt\u003einstances\u003c/dt\u003e\n \u003cdd\u003eA JSON array containing values representing the instances to use for\n prediction.\u003c/dd\u003e\n\u003c/dl\u003e\n\nThe structure of each element of the instances list is determined by your\nmodel's input definition. Instances can include named inputs or can contain\nonly unlabeled values.\n\nMost data does not include named inputs. Some instances will be simple\nJSON values (boolean, number, or string). However, instances are often lists\nof simple values, or complex nested lists. Here are some examples of request\nbodies:\n\nCSV data with each row encoded as a string value:\n\u003cpre\u003e\n{\"instances\": [\"1.0,true,\\\\\"x\\\\\"\", \"-2.0,false,\\\\\"y\\\\\"\"]}\n\u003c/pre\u003e\nPlain text:\n\u003cpre\u003e\n{\"instances\": [\"the quick brown fox\", \"la bruja le dio\"]}\n\u003c/pre\u003e\nSentences encoded as lists of words (vectors of strings):\n\u003cpre\u003e\n{\"instances\": [[\"the\",\"quick\",\"brown\"], [\"la\",\"bruja\",\"le\"]]}\n\u003c/pre\u003e\nFloating point scalar values:\n\u003cpre\u003e\n{\"instances\": [0.0, 1.1, 2.2]}\n\u003c/pre\u003e\nVectors of integers:\n\u003cpre\u003e\n{\"instances\": [[0, 1, 2], [3, 4, 5],...]}\n\u003c/pre\u003e\nTensors (in this case, two-dimensional tensors):\n\u003cpre\u003e\n{\"instances\": [[[0, 1, 2], [3, 4, 5]], ...]}\n\u003c/pre\u003e\nImages represented as a three-dimensional list. In this encoding scheme the\nfirst two dimensions represent the rows and columns of the image, and the\nthird contains the R, G, and B values for each pixel.\n\u003cpre\u003e\n{\"instances\": [[[[138, 30, 66], [130, 20, 56], ...]]]]}\n\u003c/pre\u003e\nData must be encoded as UTF-8. If your data uses another character encoding,\nyou must base64 encode the data and mark it as binary. To mark a JSON string\nas binary, replace it with an object with a single attribute named `b`:\n\u003cpre\u003e{\"b\": \"...\"} \u003c/pre\u003e\nFor example:\n\nTwo Serialized tf.Examples (fake data, for illustrative purposes only):\n\u003cpre\u003e\n{\"instances\": [{\"b64\": \"X5ad6u\"}, {\"b64\": \"IA9j4nx\"}]}\n\u003c/pre\u003e\nTwo JPEG image byte strings (fake data, for illustrative purposes only):\n\u003cpre\u003e\n{\"instances\": [{\"b64\": \"ASa8asdf\"}, {\"b64\": \"JLK7ljk3\"}]}\n\u003c/pre\u003e\nIf your data includes named references, format each instance as a JSON object\nwith the named references as the keys:\n\nJSON input data to be preprocessed:\n\u003cpre\u003e\n{\"instances\": [{\"a\": 1.0, \"b\": true, \"c\": \"x\"},\n {\"a\": -2.0, \"b\": false, \"c\": \"y\"}]}\n\u003c/pre\u003e\nSome models have an underlying TensorFlow graph that accepts multiple input\ntensors. In this case, you should use the names of JSON name/value pairs to\nidentify the input tensors, as shown in the following exmaples:\n\nFor a graph with input tensor aliases \"tag\" (string) and \"image\"\n(base64-encoded string):\n\u003cpre\u003e\n{\"instances\": [{\"tag\": \"beach\", \"image\": {\"b64\": \"ASa8asdf\"}},\n {\"tag\": \"car\", \"image\": {\"b64\": \"JLK7ljk3\"}}]}\n\u003c/pre\u003e\nFor a graph with input tensor aliases \"tag\" (string) and \"image\"\n(3-dimensional array of 8-bit ints):\n\u003cpre\u003e\n{\"instances\": [{\"tag\": \"beach\", \"image\": [[[263, 1, 10], [262, 2, 11], ...]]},\n {\"tag\": \"car\", \"image\": [[[10, 11, 24], [23, 10, 15], ...]]}]}\n\u003c/pre\u003e\nIf the call is successful, the response body will contain one prediction\nentry per instance in the request body. If prediction fails for any\ninstance, the response body will contain no predictions and will contian\na single error entry instead.", - "type": "object" - }, - "GoogleCloudMlV1beta1__PredictionInput": { - "description": "Represents input parameters for a prediction job.", - "type": "object", - "properties": { "runtimeVersion": { - "description": "Optional. The Google Cloud ML runtime version to use for this batch\nprediction. If not set, Google Cloud ML will choose a version.", + "description": "Optional. The Google Cloud ML runtime version to use for training. If not\nset, Google Cloud ML will choose the latest stable version.", "type": "string" }, - "inputPaths": { - "description": "Required. The Google Cloud Storage location of the input data files.\nMay contain wildcards.", + "pythonModule": { + "description": "Required. The Python module name to run after installing the packages.", + "type": "string" + }, + "args": { + "description": "Optional. Command line arguments to pass to the program.", "type": "array", "items": { "type": "string" } }, "region": { - "description": "Required. The Google Compute Engine region to run the prediction job in.", - "type": "string" - }, - "versionName": { - "description": "Use this field if you want to specify a version of the model to use. The\nstring is formatted the same way as `model_version`, with the addition\nof the version information:\n\n`\"projects/\u003cvar\u003e[YOUR_PROJECT]\u003c/var\u003e/models/\u003cvar\u003eYOUR_MODEL/versions/\u003cvar\u003e[YOUR_VERSION]\u003c/var\u003e\"`", - "type": "string" - }, - "modelName": { - "description": "Use this field if you want to use the default version for the specified\nmodel. The string must use the following format:\n\n`\"projects/\u003cvar\u003e[YOUR_PROJECT]\u003c/var\u003e/models/\u003cvar\u003e[YOUR_MODEL]\u003c/var\u003e\"`", + "description": "Required. The Google Compute Engine region to run the training job in.", "type": "string" }, - "outputPath": { - "description": "Required. The output Google Cloud Storage location.", + "workerType": { + "description": "Optional. Specifies the type of virtual machine to use for your training\njob's worker nodes.\n\nThe supported values are the same as those described in the entry for\n`masterType`.\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`workerCount` is greater than zero.", "type": "string" }, - "maxWorkerCount": { - "description": "Optional. The maximum number of workers to be used for parallel processing.\nDefaults to 10 if not specified.", - "format": "int64", + "parameterServerType": { + "description": "Optional. Specifies the type of virtual machine to use for your training\njob's parameter server.\n\nThe supported values are the same as those described in the entry for\n`master_type`.\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`parameter_server_count` is greater than zero.", "type": "string" }, - "dataFormat": { - "enumDescriptions": [ - "Unspecified format.", - "The source file is a text file with instances separated by the\nnew-line character.", - "The source file is a TFRecord file.", - "The source file is a GZIP-compressed TFRecord file." - ], + "scaleTier": { "enum": [ - "DATA_FORMAT_UNSPECIFIED", - "TEXT", - "TF_RECORD", - "TF_RECORD_GZIP" + "BASIC", + "STANDARD_1", + "PREMIUM_1", + "BASIC_GPU", + "CUSTOM" ], - "description": "Required. The format of the input data files.", - "type": "string" - } - }, - "id": "GoogleCloudMlV1beta1__PredictionInput" - }, - "GoogleApi__HttpBody": { - "type": "object", - "properties": { - "data": { - "description": "HTTP body binary data.", - "format": "byte", - "type": "string" + "description": "Required. Specifies the machine types, the number of replicas for workers\nand parameter servers.", + "type": "string", + "enumDescriptions": [ + "A single worker instance. This tier is suitable for learning how to use\nCloud ML, and for experimenting with new models using small datasets.", + "Many workers and a few parameter servers.", + "A large number of workers with many parameter servers.", + "A single worker instance [with a GPU](ml/docs/how-tos/using-gpus).", + "The CUSTOM tier is not a set tier, but rather enables you to use your\nown cluster specification. When you use this tier, set values to\nconfigure your processing cluster according to these guidelines:\n\n* You _must_ set `TrainingInput.masterType` to specify the type\n of machine to use for your master node. This is the only required\n setting.\n\n* You _may_ set `TrainingInput.workerCount` to specify the number of\n workers to use. If you specify one or more workers, you _must_ also\n set `TrainingInput.workerType` to specify the type of machine to use\n for your worker nodes.\n\n* You _may_ set `TrainingInput.parameterServerCount` to specify the\n number of parameter servers to use. If you specify one or more\n parameter servers, you _must_ also set\n `TrainingInput.parameterServerType` to specify the type of machine to\n use for your parameter servers.\n\nNote that all of your workers must use the same machine type, which can\nbe different from your parameter server type and master type. Your\nparameter servers must likewise use the same machine type, which can be\ndifferent from your worker type and master type." + ] }, - "contentType": { - "description": "The HTTP Content-Type string representing the content type of the body.", + "jobDir": { + "description": "Optional. A Google Cloud Storage path in which to store training outputs\nand other data needed for training. This path is passed to your TensorFlow\nprogram as the 'job_dir' command-line argument. The benefit of specifying\nthis field is that Cloud ML validates the path for use in training.", "type": "string" + }, + "hyperparameters": { + "$ref": "GoogleCloudMlV1beta1__HyperparameterSpec", + "description": "Optional. The set of Hyperparameters to tune." } }, - "id": "GoogleApi__HttpBody", - "description": "Message that represents an arbitrary HTTP body. It should only be used for\npayload formats that can't be represented as JSON, such as raw binary or\nan HTML page.\n\n\nThis message can be used both in streaming and non-streaming API methods in\nthe request as well as the response.\n\nIt can be used as a top-level request field, which is convenient if one\nwants to extract parameters from either the URL or HTTP template into the\nrequest fields and also want access to the raw HTTP body.\n\nExample:\n\n message GetResourceRequest {\n // A unique request id.\n string request_id = 1;\n\n // The raw HTTP body is bound to this field.\n google.api.HttpBody http_body = 2;\n }\n\n service ResourceService {\n rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);\n rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty);\n }\n\nExample with streaming methods:\n\n service CaldavService {\n rpc GetCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n rpc UpdateCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n }\n\nUse of this type only changes how the request and response bodies are\nhandled, all other features will continue to work unchanged." + "id": "GoogleCloudMlV1beta1__TrainingInput", + "description": "Represents input parameters for a training job.", + "type": "object" }, - "GoogleCloudMlV1beta1__ListJobsResponse": { - "description": "Response message for the ListJobs method.", - "type": "object", + "GoogleLongrunning__ListOperationsResponse": { "properties": { "nextPageToken": { - "description": "Optional. Pass this token as the `page_token` field of the request for a\nsubsequent call.", + "description": "The standard List next-page token.", "type": "string" }, - "jobs": { - "description": "The list of jobs.", + "operations": { + "description": "A list of operations that matches the specified filter in the request.", "type": "array", "items": { - "$ref": "GoogleCloudMlV1beta1__Job" + "$ref": "GoogleLongrunning__Operation" } } }, - "id": "GoogleCloudMlV1beta1__ListJobsResponse" + "id": "GoogleLongrunning__ListOperationsResponse", + "description": "The response message for Operations.ListOperations.", + "type": "object" }, - "GoogleCloudMlV1beta1__Version": { - "id": "GoogleCloudMlV1beta1__Version", - "description": "Represents a version of the model.\n\nEach version is a trained model deployed in the cloud, ready to handle\nprediction requests. A model can have multiple versions. You can get\ninformation about all of the versions of a given model by calling\n[projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).", - "type": "object", + "GoogleCloudMlV1beta1__GetConfigResponse": { + "description": "Returns service account information associated with a project.", + "type": "object", "properties": { - "description": { - "description": "Optional. The description specified for the version when it was created.", + "serviceAccountProject": { + "description": "The project number for `service_account`.", + "format": "int64", "type": "string" }, - "deploymentUri": { - "description": "Required. The Google Cloud Storage location of the trained model used to\ncreate the version. See the\n[overview of model deployment](/ml/docs/concepts/deployment-overview) for\nmore informaiton.\n\nWhen passing Version to\n[projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create)\nthe model service uses the specified location as the source of the model.\nOnce deployed, the model version is hosted by the prediction service, so\nthis location is useful only as a historical record.", + "serviceAccount": { + "description": "The service account Cloud ML uses to access resources in the project.", "type": "string" + } + }, + "id": "GoogleCloudMlV1beta1__GetConfigResponse" + }, + "GoogleCloudMlV1beta1__SetDefaultVersionRequest": { + "description": "Request message for the SetDefaultVersion request.", + "type": "object", + "properties": {}, + "id": "GoogleCloudMlV1beta1__SetDefaultVersionRequest" + }, + "GoogleCloudMlV1beta1__ParameterSpec": { + "description": "Represents a single hyperparameter to optimize.", + "type": "object", + "properties": { + "categoricalValues": { + "description": "Required if type is `CATEGORICAL`. The list of possible categories.", + "type": "array", + "items": { + "type": "string" + } }, - "onlinePredictionLogging": { - "description": "Optional. If true, enables StackDriver Logging for online prediction.\nDefault is false.", - "type": "boolean" + "parameterName": { + "description": "Required. The parameter name must be unique amongst all ParameterConfigs in\na HyperparameterSpec message. E.g., \"learning_rate\".", + "type": "string" }, - "isDefault": { - "description": "Output only. If true, this version will be used to handle prediction\nrequests that do not specify a version.\n\nYou can change the default version by calling\n[projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).", - "type": "boolean" + "minValue": { + "description": "Required if type is `DOUBLE` or `INTEGER`. This field\nshould be unset if type is `CATEGORICAL`. This value should be integers if\ntype is INTEGER.", + "format": "double", + "type": "number" }, - "createTime": { - "description": "Output only. The time the version was created.", - "format": "google-datetime", - "type": "string" + "discreteValues": { + "description": "Required if type is `DISCRETE`.\nA list of feasible points.\nThe list should be in strictly increasing order. For instance, this\nparameter might have possible settings of 1.5, 2.5, and 4.0. This list\nshould not contain more than 1,000 values.", + "type": "array", + "items": { + "format": "double", + "type": "number" + } }, - "name": { - "description": "Required.The name specified for the version when it was created.\n\nThe version name must be unique within the model it is created in.", - "type": "string" + "scaleType": { + "enum": [ + "NONE", + "UNIT_LINEAR_SCALE", + "UNIT_LOG_SCALE", + "UNIT_REVERSE_LOG_SCALE" + ], + "description": "Optional. How the parameter should be scaled to the hypercube.\nLeave unset for categorical parameters.\nSome kind of scaling is strongly recommended for real or integral\nparameters (e.g., `UNIT_LINEAR_SCALE`).", + "type": "string", + "enumDescriptions": [ + "By default, no scaling is applied.", + "Scales the feasible space to (0, 1) linearly.", + "Scales the feasible space logarithmically to (0, 1). The entire feasible\nspace must be strictly positive.", + "Scales the feasible space \"reverse\" logarithmically to (0, 1). The result\nis that values close to the top of the feasible space are spread out more\nthan points near the bottom. The entire feasible space must be strictly\npositive." + ] }, - "lastUseTime": { - "description": "Output only. The time the version was last used for prediction.", - "format": "google-datetime", - "type": "string" + "maxValue": { + "description": "Required if typeis `DOUBLE` or `INTEGER`. This field\nshould be unset if type is `CATEGORICAL`. This value should be integers if\ntype is `INTEGER`.", + "format": "double", + "type": "number" }, - "runtimeVersion": { - "description": "Optional. The Google Cloud ML runtime version to use for this deployment.\nIf not set, Google Cloud ML will choose a version.", + "type": { + "enum": [ + "PARAMETER_TYPE_UNSPECIFIED", + "DOUBLE", + "INTEGER", + "CATEGORICAL", + "DISCRETE" + ], + "description": "Required. The type of the parameter.", + "type": "string", + "enumDescriptions": [ + "You must specify a valid type. Using this unspecified type will result in\nan error.", + "Type for real-valued parameters.", + "Type for integral parameters.", + "The parameter is categorical, with a value chosen from the categories\nfield.", + "The parameter is real valued, with a fixed set of feasible points. If\n`type==DISCRETE`, feasible_points must be provided, and\n{`min_value`, `max_value`} will be ignored." + ] + } + }, + "id": "GoogleCloudMlV1beta1__ParameterSpec" + }, + "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric": { + "properties": { + "trainingStep": { + "description": "The global training step for this metric.", + "format": "int64", "type": "string" + }, + "objectiveValue": { + "description": "The objective value at this training step.", + "format": "double", + "type": "number" } - } + }, + "id": "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric", + "description": "An observed value of a metric.", + "type": "object" }, - "GoogleCloudMlV1beta1__ListVersionsResponse": { - "id": "GoogleCloudMlV1beta1__ListVersionsResponse", - "description": "Response message for the ListVersions method.", + "GoogleCloudMlV1beta1__PredictionOutput": { + "description": "Represents results of a prediction job.", "type": "object", "properties": { - "nextPageToken": { - "description": "Optional. Pass this token as the `page_token` field of the request for a\nsubsequent call.", + "predictionCount": { + "description": "The number of generated predictions.", + "format": "int64", "type": "string" }, - "versions": { - "type": "array", - "items": { - "$ref": "GoogleCloudMlV1beta1__Version" - }, - "description": "The list of versions." + "errorCount": { + "description": "The number of data instances which resulted in errors.", + "format": "int64", + "type": "string" + }, + "outputPath": { + "description": "The output Google Cloud Storage location provided at the job creation time.", + "type": "string" + }, + "nodeHours": { + "description": "Node hours used by the batch prediction job.", + "format": "double", + "type": "number" } - } + }, + "id": "GoogleCloudMlV1beta1__PredictionOutput" }, - "GoogleCloudMlV1beta1__Model": { - "id": "GoogleCloudMlV1beta1__Model", - "description": "Represents a machine learning solution.\n\nA model can have multiple versions, each of which is a deployed, trained\nmodel ready to receive prediction requests. The model itself is just a\ncontainer.", - "type": "object", + "GoogleCloudMlV1beta1__TrainingOutput": { "properties": { - "regions": { - "description": "Optional. The list of regions where the model is going to be deployed.\nCurrently only one region per model is supported.\nDefaults to 'us-central1' if nothing is set.", + "consumedMLUnits": { + "description": "The amount of ML units consumed by the job.", + "format": "double", + "type": "number" + }, + "trials": { + "description": "Results for individual Hyperparameter trials.\nOnly set for hyperparameter tuning jobs.", "type": "array", "items": { - "type": "string" + "$ref": "GoogleCloudMlV1beta1__HyperparameterOutput" } }, - "name": { - "description": "Required. The name specified for the model when it was created.\n\nThe model name must be unique within the project it is created in.", - "type": "string" - }, - "description": { - "description": "Optional. The description specified for the model when it was created.", + "completedTrialCount": { + "description": "The number of hyperparameter tuning trials that completed successfully.\nOnly set for hyperparameter tuning jobs.", + "format": "int64", "type": "string" }, - "defaultVersion": { - "$ref": "GoogleCloudMlV1beta1__Version", - "description": "Output only. The default version of the model. This version will be used to\nhandle prediction requests that do not specify a version.\n\nYou can change the default version by calling\n[projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault)." + "isHyperparameterTuningJob": { + "description": "Whether this job is a hyperparameter tuning job.", + "type": "boolean" } - } - }, - "GoogleCloudMlV1beta1__CancelJobRequest": { - "type": "object", - "properties": {}, - "id": "GoogleCloudMlV1beta1__CancelJobRequest", - "description": "Request message for the CancelJob method." + }, + "id": "GoogleCloudMlV1beta1__TrainingOutput", + "description": "Represents results of a training job. Output only.", + "type": "object" }, - "GoogleCloudMlV1beta1__Job": { - "description": "Represents a training or prediction job.", + "GoogleCloudMlV1__Version": { + "description": "Represents a version of the model.\n\nEach version is a trained model deployed in the cloud, ready to handle\nprediction requests. A model can have multiple versions. You can get\ninformation about all of the versions of a given model by calling\n[projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list).", "type": "object", "properties": { - "endTime": { - "description": "Output only. When the job processing was completed.", - "format": "google-datetime", - "type": "string" + "isDefault": { + "description": "Output only. If true, this version will be used to handle prediction\nrequests that do not specify a version.\n\nYou can change the default version by calling\n[projects.methods.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault).", + "type": "boolean" }, - "startTime": { - "description": "Output only. When the job processing was started.", + "createTime": { + "description": "Output only. The time the version was created.", "format": "google-datetime", "type": "string" }, - "predictionOutput": { - "$ref": "GoogleCloudMlV1beta1__PredictionOutput", - "description": "The current prediction job result." + "name": { + "description": "Required.The name specified for the version when it was created.\n\nThe version name must be unique within the model it is created in.", + "type": "string" }, - "trainingOutput": { - "$ref": "GoogleCloudMlV1beta1__TrainingOutput", - "description": "The current training job result." + "runtimeVersion": { + "description": "Optional. The Google Cloud ML runtime version to use for this deployment.\nIf not set, Google Cloud ML will choose a version.", + "type": "string" }, - "createTime": { - "description": "Output only. When the job was created.", + "lastUseTime": { + "description": "Output only. The time the version was last used for prediction.", "format": "google-datetime", "type": "string" }, - "trainingInput": { - "$ref": "GoogleCloudMlV1beta1__TrainingInput", - "description": "Input parameters to create a training job." - }, - "predictionInput": { - "description": "Input parameters to create a prediction job.", - "$ref": "GoogleCloudMlV1beta1__PredictionInput" - }, - "state": { - "enumDescriptions": [ - "The job state is unspecified.", - "The job has been just created and processing has not yet begun.", - "The service is preparing to run the job.", - "The job is in progress.", - "The job completed successfully.", - "The job failed.\n`error_message` should contain the details of the failure.", - "The job is being cancelled.\n`error_message` should describe the reason for the cancellation.", - "The job has been cancelled.\n`error_message` should describe the reason for the cancellation." - ], - "enum": [ - "STATE_UNSPECIFIED", - "QUEUED", - "PREPARING", - "RUNNING", - "SUCCEEDED", - "FAILED", - "CANCELLING", - "CANCELLED" - ], - "description": "Output only. The detailed state of a job.", + "description": { + "description": "Optional. The description specified for the version when it was created.", "type": "string" }, - "jobId": { - "description": "Required. The user-specified id of the job.", + "deploymentUri": { + "description": "Required. The Google Cloud Storage location of the trained model used to\ncreate the version. See the\n[overview of model deployment](/ml/docs/concepts/deployment-overview) for\nmore informaiton.\n\nWhen passing Version to\n[projects.models.versions.create](/ml/reference/rest/v1/projects.models.versions/create)\nthe model service uses the specified location as the source of the model.\nOnce deployed, the model version is hosted by the prediction service, so\nthis location is useful only as a historical record.", "type": "string" - }, - "errorMessage": { - "type": "string", - "description": "Output only. The details of a failure or a cancellation." } }, - "id": "GoogleCloudMlV1beta1__Job" + "id": "GoogleCloudMlV1__Version" }, - "GoogleLongrunning__ListOperationsResponse": { - "description": "The response message for Operations.ListOperations.", - "type": "object", - "properties": { - "operations": { - "type": "array", - "items": { - "$ref": "GoogleLongrunning__Operation" - }, - "description": "A list of operations that matches the specified filter in the request." - }, - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - } - }, - "id": "GoogleLongrunning__ListOperationsResponse" - }, - "GoogleCloudMlV1beta1__TrainingInput": { - "id": "GoogleCloudMlV1beta1__TrainingInput", - "description": "Represents input parameters for a training job.", - "type": "object", + "GoogleCloudMlV1beta1__HyperparameterSpec": { "properties": { - "runtimeVersion": { - "description": "Optional. The Google Cloud ML runtime version to use for training. If not\nset, Google Cloud ML will choose the latest stable version.", - "type": "string" + "maxParallelTrials": { + "description": "Optional. The number of training trials to run concurrently.\nYou can reduce the time it takes to perform hyperparameter tuning by adding\ntrials in parallel. However, each trail only benefits from the information\ngained in completed trials. That means that a trial does not get access to\nthe results of trials running at the same time, which could reduce the\nquality of the overall optimization.\n\nEach trial will use the same scale tier and machine types.\n\nDefaults to one.", + "format": "int32", + "type": "integer" }, - "pythonModule": { - "description": "Required. The Python module name to run after installing the packages.", + "goal": { + "enumDescriptions": [ + "Goal Type will default to maximize.", + "Maximize the goal metric.", + "Minimize the goal metric." + ], + "enum": [ + "GOAL_TYPE_UNSPECIFIED", + "MAXIMIZE", + "MINIMIZE" + ], + "description": "Required. The type of goal to use for tuning. Available types are\n`MAXIMIZE` and `MINIMIZE`.\n\nDefaults to `MAXIMIZE`.", "type": "string" }, - "region": { - "description": "Required. The Google Compute Engine region to run the training job in.", + "hyperparameterMetricTag": { + "description": "Optional. The Tensorflow summary tag name to use for optimizing trials. For\ncurrent versions of Tensorflow, this tag name should exactly match what is\nshown in Tensorboard, including all scopes. For versions of Tensorflow\nprior to 0.12, this should be only the tag passed to tf.Summary.\nBy default, \"training/hptuning/metric\" will be used.", "type": "string" }, - "args": { - "description": "Optional. Command line arguments to pass to the program.", + "params": { + "description": "Required. The set of parameters to tune.", "type": "array", "items": { - "type": "string" + "$ref": "GoogleCloudMlV1beta1__ParameterSpec" } }, - "workerType": { - "description": "Optional. Specifies the type of virtual machine to use for your training\njob's worker nodes.\n\nThe supported values are the same as those described in the entry for\n`masterType`.\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`workerCount` is greater than zero.", - "type": "string" + "maxTrials": { + "description": "Optional. How many training trials should be attempted to optimize\nthe specified hyperparameters.\n\nDefaults to one.", + "format": "int32", + "type": "integer" + } + }, + "id": "GoogleCloudMlV1beta1__HyperparameterSpec", + "description": "Represents a set of hyperparameters to optimize.", + "type": "object" + }, + "GoogleCloudMlV1beta1__OperationMetadata": { + "properties": { + "version": { + "description": "Contains the version associated with the operation.", + "$ref": "GoogleCloudMlV1beta1__Version" }, - "parameterServerType": { - "description": "Optional. Specifies the type of virtual machine to use for your training\njob's parameter server.\n\nThe supported values are the same as those described in the entry for\n`master_type`.\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`parameter_server_count` is greater than zero.", + "endTime": { + "description": "The time operation processing completed.", + "format": "google-datetime", "type": "string" }, - "scaleTier": { - "type": "string", - "enumDescriptions": [ - "A single worker instance. This tier is suitable for learning how to use\nCloud ML, and for experimenting with new models using small datasets.", - "Many workers and a few parameter servers.", - "A large number of workers with many parameter servers.", - "The CUSTOM tier is not a set tier, but rather enables you to use your\nown cluster specification. When you use this tier, set values to\nconfigure your processing cluster according to these guidelines:\n\n* You _must_ set `TrainingInput.masterType` to specify the type\n of machine to use for your master node. This is the only required\n setting.\n\n* You _may_ set `TrainingInput.workerCount` to specify the number of\n workers to use. If you specify one or more workers, you _must_ also\n set `TrainingInput.workerType` to specify the type of machine to use\n for your worker nodes.\n\n* You _may_ set `TrainingInput.parameterServerCount` to specify the\n number of parameter servers to use. If you specify one or more\n parameter servers, you _must_ also set\n `TrainingInput.parameterServerType` to specify the type of machine to\n use for your parameter servers.\n\nNote that all of your workers must use the same machine type, which can\nbe different from your parameter server type and master type. Your\nparameter servers must likewise use the same machine type, which can be\ndifferent from your worker type and master type." - ], + "operationType": { "enum": [ - "BASIC", - "STANDARD_1", - "PREMIUM_1", - "CUSTOM" + "OPERATION_TYPE_UNSPECIFIED", + "CREATE_VERSION", + "DELETE_VERSION", + "DELETE_MODEL" ], - "description": "Required. Specifies the machine types, the number of replicas for workers\nand parameter servers." - }, - "hyperparameters": { - "$ref": "GoogleCloudMlV1beta1__HyperparameterSpec", - "description": "Optional. The set of Hyperparameters to tune." + "description": "The operation type.", + "type": "string", + "enumDescriptions": [ + "Unspecified operation type.", + "An operation to create a new version.", + "An operation to delete an existing version.", + "An operation to delete an existing model." + ] }, - "parameterServerCount": { - "description": "Optional. The number of parameter server replicas to use for the training\njob. Each replica in the cluster will be of the type specified in\n`parameter_server_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`.If you\nset this value, you must also set `parameter_server_type`.", - "format": "int64", + "startTime": { + "description": "The time operation processing started.", + "format": "google-datetime", "type": "string" }, - "packageUris": { - "description": "Required. The Google Cloud Storage location of the packages with\nthe training program and any additional dependencies.", - "type": "array", - "items": { - "type": "string" - } + "isCancellationRequested": { + "description": "Indicates whether a request to cancel this operation has been made.", + "type": "boolean" }, - "workerCount": { - "description": "Optional. The number of worker replicas to use for the training job. Each\nreplica in the cluster will be of the type specified in `worker_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`. If you\nset this value, you must also set `worker_type`.", - "format": "int64", + "createTime": { + "description": "The time the operation was submitted.", + "format": "google-datetime", "type": "string" }, - "masterType": { - "description": "Optional. Specifies the type of virtual machine to use for your training\njob's master worker.\n\nThe following types are supported:\n\n\u003cdl\u003e\n \u003cdt\u003estandard\u003c/dt\u003e\n \u003cdd\u003e\n A basic machine configuration suitable for training simple models with\n small to moderate datasets.\n \u003c/dd\u003e\n \u003cdt\u003elarge_model\u003c/dt\u003e\n \u003cdd\u003e\n A machine with a lot of memory, specially suited for parameter servers\n when your model is large (having many hidden layers or layers with very\n large numbers of nodes).\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_s\u003c/dt\u003e\n \u003cdd\u003e\n A machine suitable for the master and workers of the cluster when your\n model requires more computation than the standard machine can handle\n satisfactorily.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ccode suppresswarning=\"true\"\u003ecomplex_model_s\u003c/code\u003e.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_l\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ccode suppresswarning=\"true\"\u003ecomplex_model_m\u003c/code\u003e.\n \u003c/dd\u003e\n\u003c/dl\u003e\n\nYou must set this value when `scaleTier` is set to `CUSTOM`.", + "modelName": { + "description": "Contains the name of the model associated with the operation.", "type": "string" } - } + }, + "id": "GoogleCloudMlV1beta1__OperationMetadata", + "description": "Represents the metadata of the long-running operation.", + "type": "object" }, - "GoogleCloudMlV1beta1__GetConfigResponse": { - "id": "GoogleCloudMlV1beta1__GetConfigResponse", - "description": "Returns service account information associated with a project.", + "GoogleCloudMlV1__OperationMetadata": { + "description": "Represents the metadata of the long-running operation.", "type": "object", "properties": { - "serviceAccountProject": { - "description": "The project number for `service_account`.", - "format": "int64", + "isCancellationRequested": { + "description": "Indicates whether a request to cancel this operation has been made.", + "type": "boolean" + }, + "createTime": { + "description": "The time the operation was submitted.", + "format": "google-datetime", "type": "string" }, - "serviceAccount": { - "description": "The service account Cloud ML uses to access resources in the project.", + "modelName": { + "description": "Contains the name of the model associated with the operation.", + "type": "string" + }, + "version": { + "$ref": "GoogleCloudMlV1__Version", + "description": "Contains the version associated with the operation." + }, + "endTime": { + "description": "The time operation processing completed.", + "format": "google-datetime", + "type": "string" + }, + "operationType": { + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "CREATE_VERSION", + "DELETE_VERSION", + "DELETE_MODEL" + ], + "description": "The operation type.", + "type": "string", + "enumDescriptions": [ + "Unspecified operation type.", + "An operation to create a new version.", + "An operation to delete an existing version.", + "An operation to delete an existing model." + ] + }, + "startTime": { + "description": "The time operation processing started.", + "format": "google-datetime", "type": "string" } - } - }, - "GoogleCloudMlV1beta1__SetDefaultVersionRequest": { - "description": "Request message for the SetDefaultVersion request.", - "type": "object", - "properties": {}, - "id": "GoogleCloudMlV1beta1__SetDefaultVersionRequest" + }, + "id": "GoogleCloudMlV1__OperationMetadata" }, - "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric": { + "GoogleCloudMlV1beta1__ListModelsResponse": { "properties": { - "trainingStep": { - "description": "The global training step for this metric.", - "format": "int64", + "nextPageToken": { + "description": "Optional. Pass this token as the `page_token` field of the request for a\nsubsequent call.", "type": "string" }, - "objectiveValue": { - "description": "The objective value at this training step.", - "format": "double", - "type": "number" + "models": { + "description": "The list of models.", + "type": "array", + "items": { + "$ref": "GoogleCloudMlV1beta1__Model" + } } }, - "id": "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric", - "description": "An observed value of a metric.", + "id": "GoogleCloudMlV1beta1__ListModelsResponse", + "description": "Response message for the ListModels method.", "type": "object" }, - "GoogleCloudMlV1beta1__ParameterSpec": { - "description": "Represents a single hyperparameter to optimize.", - "type": "object", + "GoogleLongrunning__Operation": { "properties": { - "type": { - "description": "Required. The type of the parameter.", - "type": "string", - "enumDescriptions": [ - "You must specify a valid type. Using this unspecified type will result in\nan error.", - "Type for real-valued parameters.", - "Type for integral parameters.", - "The parameter is categorical, with a value chosen from the categories\nfield.", - "The parameter is real valued, with a fixed set of feasible points. If\n`type==DISCRETE`, feasible_points must be provided, and\n{`min_value`, `max_value`} will be ignored." - ], - "enum": [ - "PARAMETER_TYPE_UNSPECIFIED", - "DOUBLE", - "INTEGER", - "CATEGORICAL", - "DISCRETE" - ] + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object" }, - "parameterName": { - "description": "Required. The parameter name must be unique amongst all ParameterConfigs in\na HyperparameterSpec message. E.g., \"learning_rate\".", + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", "type": "string" }, - "categoricalValues": { - "description": "Required if type is `CATEGORICAL`. The list of possible categories.", - "type": "array", - "items": { - "type": "string" - } + "error": { + "description": "The error result of the operation in case of failure or cancellation.", + "$ref": "GoogleRpc__Status" }, - "minValue": { - "description": "Required if type is `DOUBLE` or `INTEGER`. This field\nshould be unset if type is `CATEGORICAL`. This value should be integers if\ntype is INTEGER.", - "format": "double", - "type": "number" + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object" }, - "discreteValues": { - "description": "Required if type is `DISCRETE`.\nA list of feasible points.\nThe list should be in strictly increasing order. For instance, this\nparameter might have possible settings of 1.5, 2.5, and 4.0. This list\nshould not contain more than 1,000 values.", + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + } + }, + "id": "GoogleLongrunning__Operation", + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "type": "object" + }, + "GoogleCloudMlV1beta1__HyperparameterOutput": { + "description": "Represents the result of a single hyperparameter tuning trial from a\ntraining job. The TrainingOutput object that is returned on successful\ncompletion of a training job with hyperparameter tuning includes a list\nof HyperparameterOutput objects, one for each successful trial.", + "type": "object", + "properties": { + "allMetrics": { + "description": "All recorded object metrics for this trial.", "type": "array", "items": { - "format": "double", - "type": "number" + "$ref": "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric" } }, - "maxValue": { - "description": "Required if typeis `DOUBLE` or `INTEGER`. This field\nshould be unset if type is `CATEGORICAL`. This value should be integers if\ntype is `INTEGER`.", - "format": "double", - "type": "number" + "finalMetric": { + "$ref": "GoogleCloudMlV1beta1_HyperparameterOutput_HyperparameterMetric", + "description": "The final objective metric seen for this trial." }, - "scaleType": { - "enum": [ - "NONE", - "UNIT_LINEAR_SCALE", - "UNIT_LOG_SCALE", - "UNIT_REVERSE_LOG_SCALE" - ], - "description": "Optional. How the parameter should be scaled to the hypercube.\nLeave unset for categorical parameters.\nSome kind of scaling is strongly recommended for real or integral\nparameters (e.g., `UNIT_LINEAR_SCALE`).", - "type": "string", - "enumDescriptions": [ - "By default, no scaling is applied.", - "Scales the feasible space to (0, 1) linearly.", - "Scales the feasible space logarithmically to (0, 1). The entire feasible\nspace must be strictly positive.", - "Scales the feasible space \"reverse\" logarithmically to (0, 1). The result\nis that values close to the top of the feasible space are spread out more\nthan points near the bottom. The entire feasible space must be strictly\npositive." - ] + "hyperparameters": { + "additionalProperties": { + "type": "string" + }, + "description": "The hyperparameters given to this trial.", + "type": "object" + }, + "trialId": { + "description": "The trial id for these results.", + "type": "string" } }, - "id": "GoogleCloudMlV1beta1__ParameterSpec" + "id": "GoogleCloudMlV1beta1__HyperparameterOutput" }, - "GoogleCloudMlV1beta1__PredictionOutput": { - "description": "Represents results of a prediction job.", + "GoogleProtobuf__Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", "type": "object", + "properties": {}, + "id": "GoogleProtobuf__Empty" + }, + "GoogleRpc__Status": { "properties": { - "errorCount": { - "description": "The number of data instances which resulted in errors.", - "format": "int64", - "type": "string" - }, - "outputPath": { - "description": "The output Google Cloud Storage location provided at the job creation time.", + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", "type": "string" }, - "nodeHours": { - "description": "Node hours used by the batch prediction job.", - "format": "double", - "type": "number" + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } }, - "predictionCount": { - "description": "The number of generated predictions.", - "format": "int64", - "type": "string" + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" } }, - "id": "GoogleCloudMlV1beta1__PredictionOutput" + "id": "GoogleRpc__Status", + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object" } }, "icons": { diff --git a/vendor/google.golang.org/api/ml/v1beta1/ml-gen.go b/vendor/google.golang.org/api/ml/v1beta1/ml-gen.go index 04296ccf6..5eb37cc13 100644 --- a/vendor/google.golang.org/api/ml/v1beta1/ml-gen.go +++ b/vendor/google.golang.org/api/ml/v1beta1/ml-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Jobs = NewProjectsJobsService(s) @@ -213,6 +218,145 @@ func (s *GoogleApi__HttpBody) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleCloudMlV1__OperationMetadata: Represents the metadata of the +// long-running operation. +type GoogleCloudMlV1__OperationMetadata struct { + // CreateTime: The time the operation was submitted. + CreateTime string `json:"createTime,omitempty"` + + // EndTime: The time operation processing completed. + EndTime string `json:"endTime,omitempty"` + + // IsCancellationRequested: Indicates whether a request to cancel this + // operation has been made. + IsCancellationRequested bool `json:"isCancellationRequested,omitempty"` + + // ModelName: Contains the name of the model associated with the + // operation. + ModelName string `json:"modelName,omitempty"` + + // OperationType: The operation type. + // + // Possible values: + // "OPERATION_TYPE_UNSPECIFIED" - Unspecified operation type. + // "CREATE_VERSION" - An operation to create a new version. + // "DELETE_VERSION" - An operation to delete an existing version. + // "DELETE_MODEL" - An operation to delete an existing model. + OperationType string `json:"operationType,omitempty"` + + // StartTime: The time operation processing started. + StartTime string `json:"startTime,omitempty"` + + // Version: Contains the version associated with the operation. + Version *GoogleCloudMlV1__Version `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudMlV1__OperationMetadata) MarshalJSON() ([]byte, error) { + type noMethod GoogleCloudMlV1__OperationMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudMlV1__Version: Represents a version of the model. +// +// Each version is a trained model deployed in the cloud, ready to +// handle +// prediction requests. A model can have multiple versions. You can +// get +// information about all of the versions of a given model by +// calling +// [projects.models.versions.list](/ml/reference/rest/v1/projects +// .models.versions/list). +type GoogleCloudMlV1__Version struct { + // CreateTime: Output only. The time the version was created. + CreateTime string `json:"createTime,omitempty"` + + // DeploymentUri: Required. The Google Cloud Storage location of the + // trained model used to + // create the version. See the + // [overview of model deployment](/ml/docs/concepts/deployment-overview) + // for + // more informaiton. + // + // When passing Version + // to + // [projects.models.versions.create](/ml/reference/rest/v1/projects.mo + // dels.versions/create) + // the model service uses the specified location as the source of the + // model. + // Once deployed, the model version is hosted by the prediction service, + // so + // this location is useful only as a historical record. + DeploymentUri string `json:"deploymentUri,omitempty"` + + // Description: Optional. The description specified for the version when + // it was created. + Description string `json:"description,omitempty"` + + // IsDefault: Output only. If true, this version will be used to handle + // prediction + // requests that do not specify a version. + // + // You can change the default version by + // calling + // [projects.methods.versions.setDefault](/ml/reference/rest/v1/p + // rojects.models.versions/setDefault). + IsDefault bool `json:"isDefault,omitempty"` + + // LastUseTime: Output only. The time the version was last used for + // prediction. + LastUseTime string `json:"lastUseTime,omitempty"` + + // Name: Required.The name specified for the version when it was + // created. + // + // The version name must be unique within the model it is created in. + Name string `json:"name,omitempty"` + + // RuntimeVersion: Optional. The Google Cloud ML runtime version to use + // for this deployment. + // If not set, Google Cloud ML will choose a version. + RuntimeVersion string `json:"runtimeVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudMlV1__Version) MarshalJSON() ([]byte, error) { + type noMethod GoogleCloudMlV1__Version + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleCloudMlV1beta1HyperparameterOutputHyperparameterMetric: An // observed value of a metric. type GoogleCloudMlV1beta1HyperparameterOutputHyperparameterMetric struct { @@ -361,6 +505,16 @@ type GoogleCloudMlV1beta1__HyperparameterSpec struct { // "MINIMIZE" - Minimize the goal metric. Goal string `json:"goal,omitempty"` + // HyperparameterMetricTag: Optional. The Tensorflow summary tag name to + // use for optimizing trials. For + // current versions of Tensorflow, this tag name should exactly match + // what is + // shown in Tensorboard, including all scopes. For versions of + // Tensorflow + // prior to 0.12, this should be only the tag passed to tf.Summary. + // By default, "training/hptuning/metric" will be used. + HyperparameterMetricTag string `json:"hyperparameterMetricTag,omitempty"` + // MaxParallelTrials: Optional. The number of training trials to run // concurrently. // You can reduce the time it takes to perform hyperparameter tuning by @@ -628,6 +782,11 @@ type GoogleCloudMlV1beta1__Model struct { // The model name must be unique within the project it is created in. Name string `json:"name,omitempty"` + // OnlinePredictionLogging: Optional. If true, enables StackDriver + // Logging for online prediction. + // Default is false. + OnlinePredictionLogging bool `json:"onlinePredictionLogging,omitempty"` + // Regions: Optional. The list of regions where the model is going to be // deployed. // Currently only one region per model is supported. @@ -850,7 +1009,7 @@ func (s *GoogleCloudMlV1beta1__ParameterSpec) UnmarshalJSON(data []byte) error { // contain // only unlabeled values. // -// Most data does not include named inputs. Some instances will be +// Not all data includes named inputs. Some instances will be // simple // JSON values (boolean, number, or string). However, instances are // often lists @@ -868,11 +1027,15 @@ func (s *GoogleCloudMlV1beta1__ParameterSpec) UnmarshalJSON(data []byte) error { // {"instances": ["the quick brown fox", "la bruja le // dio"]} // -// Sentences encoded as lists of words (vectors of -// strings): +// Sentences encoded as lists of words (vectors of strings): //
      -// {"instances": [["the","quick","brown"],
      -// ["la","bruja","le"]]}
      +// {
      +//   "instances": [
      +//     ["the","quick","brown"],
      +//     ["la","bruja","le"],
      +//     ...
      +//   ]
      +// }
       // 
      // Floating point scalar values: //
      @@ -880,28 +1043,59 @@ func (s *GoogleCloudMlV1beta1__ParameterSpec) UnmarshalJSON(data []byte) error {
       // 
      // Vectors of integers: //
      -// {"instances": [[0, 1, 2], [3, 4, 5],...]}
      +// {
      +//   "instances": [
      +//     [0, 1, 2],
      +//     [3, 4, 5],
      +//     ...
      +//   ]
      +// }
       // 
      // Tensors (in this case, two-dimensional tensors): //
      -// {"instances": [[[0, 1, 2], [3, 4, 5]], ...]}
      +// {
      +//   "instances": [
      +//     [
      +//       [0, 1, 2],
      +//       [3, 4, 5]
      +//     ],
      +//     ...
      +//   ]
      +// }
       // 
      -// Images represented as a three-dimensional list. In this encoding -// scheme the -// first two dimensions represent the rows and columns of the image, and -// the -// third contains the R, G, and B values for each +// Images can be represented different ways. In this encoding scheme the +// first +// two dimensions represent the rows and columns of the image, and the +// third +// contains lists (vectors) of the R, G, and B values for each // pixel. //
      -// {"instances": [[[[138, 30, 66], [130, 20, 56], ...]]]]}
      +// {
      +//   "instances": [
      +//     [
      +//       [
      +//         [138, 30, 66],
      +//         [130, 20, 56],
      +//         ...
      +//       ],
      +//       [
      +//         [126, 38, 61],
      +//         [122, 24, 57],
      +//         ...
      +//       ],
      +//       ...
      +//     ],
      +//     ...
      +//   ]
      +// }
       // 
      -// Data must be encoded as UTF-8. If your data uses another character -// encoding, -// you must base64 encode the data and mark it as binary. To mark a JSON +// JSON strings must be encoded as UTF-8. To send binary data, you +// must +// base64-encode the data and mark it as binary. To mark a JSON // string -// as binary, replace it with an object with a single attribute named -// `b`: -//
      {"b": "..."} 
      +// as binary, replace it with a JSON object with a single attribute +// named `b64`: +//
      {"b64": "..."} 
      // For example: // // Two Serialized tf.Examples (fake data, for illustrative purposes @@ -920,8 +1114,20 @@ func (s *GoogleCloudMlV1beta1__ParameterSpec) UnmarshalJSON(data []byte) error { // // JSON input data to be preprocessed: //
      -// {"instances": [{"a": 1.0,  "b": true,  "c": "x"},
      -//                {"a": -2.0, "b": false, "c": "y"}]}
      +// {
      +//   "instances": [
      +//     {
      +//       "a": 1.0,
      +//       "b": true,
      +//       "c": "x"
      +//     },
      +//     {
      +//       "a": -2.0,
      +//       "b": false,
      +//       "c": "y"
      +//     }
      +//   ]
      +// }
       // 
      // Some models have an underlying TensorFlow graph that accepts multiple // input @@ -933,18 +1139,60 @@ func (s *GoogleCloudMlV1beta1__ParameterSpec) UnmarshalJSON(data []byte) error { // "image" // (base64-encoded string): //
      -// {"instances": [{"tag": "beach", "image": {"b64": "ASa8asdf"}},
      -//                {"tag": "car", "image": {"b64":
      -// "JLK7ljk3"}}]}
      +// {
      +//   "instances": [
      +//     {
      +//       "tag": "beach",
      +//       "image": {"b64": "ASa8asdf"}
      +//     },
      +//     {
      +//       "tag": "car",
      +//       "image": {"b64": "JLK7ljk3"}
      +//     }
      +//   ]
      +// }
       // 
      // For a graph with input tensor aliases "tag" (string) and // "image" // (3-dimensional array of 8-bit ints): //
      -// {"instances": [{"tag": "beach", "image": [[[263, 1, 10], [262, 2,
      -// 11], ...]]},
      -//                {"tag": "car", "image": [[[10, 11, 24], [23, 10, 15],
      -// ...]]}]}
      +// {
      +//   "instances": [
      +//     {
      +//       "tag": "beach",
      +//       "image": [
      +//         [
      +//           [138, 30, 66],
      +//           [130, 20, 56],
      +//           ...
      +//         ],
      +//         [
      +//           [126, 38, 61],
      +//           [122, 24, 57],
      +//           ...
      +//         ],
      +//         ...
      +//       ]
      +//     },
      +//     {
      +//       "tag": "car",
      +//       "image": [
      +//         [
      +//           [255, 0, 102],
      +//           [255, 0, 97],
      +//           ...
      +//         ],
      +//         [
      +//           [254, 1, 101],
      +//           [254, 2, 93],
      +//           ...
      +//         ],
      +//         ...
      +//       ]
      +//     },
      +//     ...
      +//   ]
      +// }
       // 
      // If the call is successful, the response body will contain one // prediction @@ -1024,9 +1272,20 @@ type GoogleCloudMlV1beta1__PredictionInput struct { // RuntimeVersion: Optional. The Google Cloud ML runtime version to use // for this batch - // prediction. If not set, Google Cloud ML will choose a version. + // prediction. If not set, Google Cloud ML will pick the runtime version + // used + // during the CreateVersion request for this model version, or choose + // the + // latest stable version when model version information is not + // available + // such as when the model is specified by uri. RuntimeVersion string `json:"runtimeVersion,omitempty"` + // Uri: Use this field if you want to specify a Google Cloud Storage + // path for + // the model to use. + Uri string `json:"uri,omitempty"` + // VersionName: Use this field if you want to specify a version of the // model to use. The // string is formatted the same way as `model_version`, with the @@ -1128,6 +1387,15 @@ type GoogleCloudMlV1beta1__TrainingInput struct { // Hyperparameters: Optional. The set of Hyperparameters to tune. Hyperparameters *GoogleCloudMlV1beta1__HyperparameterSpec `json:"hyperparameters,omitempty"` + // JobDir: Optional. A Google Cloud Storage path in which to store + // training outputs + // and other data needed for training. This path is passed to your + // TensorFlow + // program as the 'job_dir' command-line argument. The benefit of + // specifying + // this field is that Cloud ML validates the path for use in training. + JobDir string `json:"jobDir,omitempty"` + // MasterType: Optional. Specifies the type of virtual machine to use // for your training // job's master worker. @@ -1169,6 +1437,21 @@ type GoogleCloudMlV1beta1__TrainingInput struct { // the // memory of complex_model_m. // + //
      standard_gpu
      + //
      + // A machine equivalent to standard that + // also includes a + // + // GPU that you can use in your trainer. + //
      + //
      complex_model_m_gpu
      + //
      + // A machine equivalent to + // coplex_model_m that also + // includes + // four GPUs. + //
      // // // You must set this value when `scaleTier` is set to `CUSTOM`. @@ -1227,6 +1510,8 @@ type GoogleCloudMlV1beta1__TrainingInput struct { // "STANDARD_1" - Many workers and a few parameter servers. // "PREMIUM_1" - A large number of workers with many parameter // servers. + // "BASIC_GPU" - A single worker instance [with a + // GPU](ml/docs/how-tos/using-gpus). // "CUSTOM" - The CUSTOM tier is not a set tier, but rather enables // you to use your // own cluster specification. When you use this tier, set values @@ -1423,11 +1708,6 @@ type GoogleCloudMlV1beta1__Version struct { // The version name must be unique within the model it is created in. Name string `json:"name,omitempty"` - // OnlinePredictionLogging: Optional. If true, enables StackDriver - // Logging for online prediction. - // Default is false. - OnlinePredictionLogging bool `json:"onlinePredictionLogging,omitempty"` - // RuntimeVersion: Optional. The Google Cloud ML runtime version to use // for this deployment. // If not set, Google Cloud ML will choose a version. @@ -1774,6 +2054,7 @@ func (c *ProjectsGetConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1868,68 +2149,7 @@ type ProjectsPredictCall struct { // Predict: Performs prediction on the data in the request. // -// Responses are very similar to requests. There are two top-level -// fields, -// each of which are JSON lists: -// -//
      -//
      predictions
      -//
      The list of predictions, one per instance in the request.
      -//
      error
      -//
      An error message returned instead of a prediction list if any -// instance produced an error.
      -//
      -// -// If the call is successful, the response body will contain one -// prediction -// entry per instance in the request body. If prediction fails for -// any -// instance, the response body will contain no predictions and will -// contian -// a single error entry instead. -// -// Even though there is one prediction per instance, the format of -// a -// prediction is not directly related to the format of an -// instance. -// Predictions take whatever format is specified in the outputs -// collection -// defined in the model. The collection of predictions is returned in a -// JSON -// list. Each member of the list can be a simple value, a list, or a -// JSON -// object of any complexity. If your model has more than one output -// tensor, -// each prediction will be a JSON object containing a name/value pair -// for each -// output. The names identify the output aliases in the graph. -// -// The following examples show some possible responses: -// -// A simple set of predictions for three input instances, where -// each -// prediction is an integer value: -//
      -// {"predictions": [5, 4, 3]}
      -// 
      -// A more complex set of predictions, each containing two named values -// that -// correspond to output tensors, named **label** and **scores** -// respectively. -// The value of **label** is the predicted category ("car" or "beach") -// and -// **scores** contains a list of probabilities for that instance across -// the -// possible categories. -//
      -// {"predictions": [{"label": "beach", "scores": [0.1, 0.9]},
      -//                  {"label": "car", "scores": [0.75, 0.25]}]}
      -// 
      -// A response when there is an error processing an input -// instance: -//
      -// {"error": "Divide by zero"}
      -// 
      +// **** REMOVE FROM GENERATED DOCUMENTATION func (r *ProjectsService) Predict(name string, googlecloudmlv1beta1__predictrequest *GoogleCloudMlV1beta1__PredictRequest) *ProjectsPredictCall { c := &ProjectsPredictCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1968,6 +2188,7 @@ func (c *ProjectsPredictCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.googlecloudmlv1beta1__predictrequest) if err != nil { @@ -2023,7 +2244,7 @@ func (c *ProjectsPredictCall) Do(opts ...googleapi.CallOption) (*GoogleApi__Http } return ret, nil // { - // "description": "Performs prediction on the data in the request.\n\nResponses are very similar to requests. There are two top-level fields,\neach of which are JSON lists:\n\n\u003cdl\u003e\n \u003cdt\u003epredictions\u003c/dt\u003e\n \u003cdd\u003eThe list of predictions, one per instance in the request.\u003c/dd\u003e\n \u003cdt\u003eerror\u003c/dt\u003e\n \u003cdd\u003eAn error message returned instead of a prediction list if any\n instance produced an error.\u003c/dd\u003e\n\u003c/dl\u003e\n\nIf the call is successful, the response body will contain one prediction\nentry per instance in the request body. If prediction fails for any\ninstance, the response body will contain no predictions and will contian\na single error entry instead.\n\nEven though there is one prediction per instance, the format of a\nprediction is not directly related to the format of an instance.\nPredictions take whatever format is specified in the outputs collection\ndefined in the model. The collection of predictions is returned in a JSON\nlist. Each member of the list can be a simple value, a list, or a JSON\nobject of any complexity. If your model has more than one output tensor,\neach prediction will be a JSON object containing a name/value pair for each\noutput. The names identify the output aliases in the graph.\n\nThe following examples show some possible responses:\n\nA simple set of predictions for three input instances, where each\nprediction is an integer value:\n\u003cpre\u003e\n{\"predictions\": [5, 4, 3]}\n\u003c/pre\u003e\nA more complex set of predictions, each containing two named values that\ncorrespond to output tensors, named **label** and **scores** respectively.\nThe value of **label** is the predicted category (\"car\" or \"beach\") and\n**scores** contains a list of probabilities for that instance across the\npossible categories.\n\u003cpre\u003e\n{\"predictions\": [{\"label\": \"beach\", \"scores\": [0.1, 0.9]},\n {\"label\": \"car\", \"scores\": [0.75, 0.25]}]}\n\u003c/pre\u003e\nA response when there is an error processing an input instance:\n\u003cpre\u003e\n{\"error\": \"Divide by zero\"}\n\u003c/pre\u003e", + // "description": "Performs prediction on the data in the request.\n\n**** REMOVE FROM GENERATED DOCUMENTATION", // "flatPath": "v1beta1/projects/{projectsId}:predict", // "httpMethod": "POST", // "id": "ml.projects.predict", @@ -2103,6 +2324,7 @@ func (c *ProjectsJobsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.googlecloudmlv1beta1__canceljobrequest) if err != nil { @@ -2238,6 +2460,7 @@ func (c *ProjectsJobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.googlecloudmlv1beta1__job) if err != nil { @@ -2382,6 +2605,7 @@ func (c *ProjectsJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2551,6 +2775,7 @@ func (c *ProjectsJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2727,6 +2952,7 @@ func (c *ProjectsModelsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.googlecloudmlv1beta1__model) if err != nil { @@ -2867,6 +3093,7 @@ func (c *ProjectsModelsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") @@ -3007,6 +3234,7 @@ func (c *ProjectsModelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3173,6 +3401,7 @@ func (c *ProjectsModelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3351,6 +3580,7 @@ func (c *ProjectsModelsVersionsCreateCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.googlecloudmlv1beta1__version) if err != nil { @@ -3492,6 +3722,7 @@ func (c *ProjectsModelsVersionsDeleteCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") @@ -3636,6 +3867,7 @@ func (c *ProjectsModelsVersionsGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3804,6 +4036,7 @@ func (c *ProjectsModelsVersionsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3979,6 +4212,7 @@ func (c *ProjectsModelsVersionsSetDefaultCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.googlecloudmlv1beta1__setdefaultversionrequest) if err != nil { @@ -4129,6 +4363,7 @@ func (c *ProjectsOperationsCancelCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}:cancel") @@ -4260,6 +4495,7 @@ func (c *ProjectsOperationsDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") @@ -4400,6 +4636,7 @@ func (c *ProjectsOperationsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4567,6 +4804,7 @@ func (c *ProjectsOperationsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json index c4f7290c0..c0b162fa8 100644 --- a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json @@ -1,1118 +1,7 @@ { - "ownerName": "Google", - "resources": { - "projects": { - "resources": { - "monitoredResourceDescriptors": { - "methods": { - "list": { - "description": "Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.", - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListMonitoredResourceDescriptorsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string" - }, - "pageToken": { - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer" - }, - "filter": { - "type": "string", - "location": "query", - "description": "An optional filter describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n" - } - }, - "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", - "id": "monitoring.projects.monitoredResourceDescriptors.list", - "path": "v3/{+name}/monitoredResourceDescriptors" - }, - "get": { - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "MonitoredResourceDescriptor" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/monitoredResourceDescriptors/[^/]+$", - "location": "path", - "description": "The monitored resource descriptor to get. The format is \"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\". The {resource_type} is a predefined type, such as cloudsql_database.", - "required": true, - "type": "string" - } - }, - "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}", - "id": "monitoring.projects.monitoredResourceDescriptors.get", - "path": "v3/{+name}", - "description": "Gets a single monitored resource descriptor. This method does not require a Stackdriver account." - } - } - }, - "groups": { - "methods": { - "update": { - "httpMethod": "PUT", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Group" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "parameters": { - "name": { - "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path" - }, - "validateOnly": { - "location": "query", - "description": "If true, validate this request but do not update the existing group.", - "type": "boolean" - } - }, - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "id": "monitoring.projects.groups.update", - "path": "v3/{+name}", - "description": "Updates an existing group. You can change any group attributes except name.", - "request": { - "$ref": "Group" - } - }, - "create": { - "request": { - "$ref": "Group" - }, - "description": "Creates a new group.", - "response": { - "$ref": "Group" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The project in which to create the group. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "validateOnly": { - "description": "If true, validate this request but do not create the group.", - "type": "boolean", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/groups", - "path": "v3/{+name}/groups", - "id": "monitoring.projects.groups.create" - }, - "delete": { - "description": "Deletes an existing group.", - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "location": "path", - "description": "The group to delete. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/groups/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "id": "monitoring.projects.groups.delete", - "path": "v3/{+name}" - }, - "list": { - "description": "Lists the existing groups.", - "httpMethod": "GET", - "response": { - "$ref": "ListGroupsResponse" - }, - "parameterOrder": [ - "name" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer" - }, - "ancestorsOfGroup": { - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", - "type": "string", - "location": "query" - }, - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project whose groups are to be listed. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string" - }, - "childrenOfGroup": { - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups whose parentName field contains the group name. If no groups have this parent, the results are empty.", - "type": "string", - "location": "query" - }, - "descendantsOfGroup": { - "type": "string", - "location": "query", - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns the descendants of the specified group. This is a superset of the results returned by the childrenOfGroup filter, and includes children-of-children, and so forth." - } - }, - "flatPath": "v3/projects/{projectsId}/groups", - "id": "monitoring.projects.groups.list", - "path": "v3/{+name}/groups" - }, - "get": { - "description": "Gets a single group.", - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Group" - }, - "parameters": { - "name": { - "description": "The group to retrieve. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "id": "monitoring.projects.groups.get", - "path": "v3/{+name}" - } - }, - "resources": { - "members": { - "methods": { - "list": { - "description": "Lists the monitored resources that are members of a group.", - "httpMethod": "GET", - "response": { - "$ref": "ListGroupMembersResponse" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "interval.startTime": { - "location": "query", - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - "format": "google-datetime", - "type": "string" - }, - "pageSize": { - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "name": { - "description": "The group whose members are listed. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path" - }, - "interval.endTime": { - "location": "query", - "description": "Required. The end of the time interval.", - "format": "google-datetime", - "type": "string" - }, - "filter": { - "type": "string", - "location": "query", - "description": "An optional list filter describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\nresource.type = \"gce_instance\"\n" - }, - "pageToken": { - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members", - "id": "monitoring.projects.groups.members.list", - "path": "v3/{+name}/members" - } - } - } - } - }, - "collectdTimeSeries": { - "methods": { - "create": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project in which to create the time series. The format is \"projects/PROJECT_ID_OR_NUMBER\".", - "required": true, - "type": "string" - } - }, - "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", - "path": "v3/{+name}/collectdTimeSeries", - "id": "monitoring.projects.collectdTimeSeries.create", - "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e", - "request": { - "$ref": "CreateCollectdTimeSeriesRequest" - } - } - } - }, - "metricDescriptors": { - "methods": { - "create": { - "request": { - "$ref": "MetricDescriptor" - }, - "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics.", - "response": { - "$ref": "MetricDescriptor" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors", - "path": "v3/{+name}/metricDescriptors", - "id": "monitoring.projects.metricDescriptors.create" - }, - "delete": { - "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", - "path": "v3/{+name}", - "id": "monitoring.projects.metricDescriptors.delete", - "description": "Deletes a metric descriptor. Only user-created custom metrics can be deleted.", - "response": { - "$ref": "Empty" - }, - "httpMethod": "DELETE", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example of {metric_id} is: \"custom.googleapis.com/my_test_metric\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/metricDescriptors/.+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ] - }, - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListMetricDescriptorsResponse" - }, - "parameters": { - "filter": { - "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics:\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", - "type": "string", - "location": "query" - }, - "name": { - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors", - "id": "monitoring.projects.metricDescriptors.list", - "path": "v3/{+name}/metricDescriptors", - "description": "Lists metric descriptors that match a filter. This method does not require a Stackdriver account." - }, - "get": { - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "MetricDescriptor" - }, - "httpMethod": "GET", - "parameters": { - "name": { - "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example value of {metric_id} is \"compute.googleapis.com/instance/disk/read_bytes_count\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/metricDescriptors/.+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", - "path": "v3/{+name}", - "id": "monitoring.projects.metricDescriptors.get", - "description": "Gets a single metric descriptor. This method does not require a Stackdriver account." - } - } - }, - "timeSeries": { - "methods": { - "list": { - "response": { - "$ref": "ListTimeSeriesResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "aggregation.groupByFields": { - "repeated": true, - "location": "query", - "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.", - "type": "string" - }, - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string" - }, - "interval.endTime": { - "location": "query", - "description": "Required. The end of the time interval.", - "format": "google-datetime", - "type": "string" - }, - "aggregation.alignmentPeriod": { - "location": "query", - "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", - "format": "google-duration", - "type": "string" - }, - "pageSize": { - "description": "A positive number that is the maximum number of results to return. When view field sets to FULL, it limits the number of Points server will return; if view field is HEADERS, it limits the number of TimeSeries server will return.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "orderBy": { - "location": "query", - "description": "Specifies the order in which the points of the time series should be returned. By default, results are not ordered. Currently, this field must be left blank.", - "type": "string" - }, - "aggregation.crossSeriesReducer": { - "location": "query", - "enum": [ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05" - ], - "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - "type": "string" - }, - "filter": { - "location": "query", - "description": "A monitoring filter that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.label.instance_name = \"my-instance-name\"\n", - "type": "string" - }, - "aggregation.perSeriesAligner": { - "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - "type": "string", - "location": "query", - "enum": [ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05" - ] - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "interval.startTime": { - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - "format": "google-datetime", - "type": "string", - "location": "query" - }, - "view": { - "enum": [ - "FULL", - "HEADERS" - ], - "description": "Specifies which information is returned about the time series.", - "type": "string", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/projects/{projectsId}/timeSeries", - "path": "v3/{+name}/timeSeries", - "id": "monitoring.projects.timeSeries.list", - "description": "Lists time series that match a filter. This method does not require a Stackdriver account." - }, - "create": { - "path": "v3/{+name}/timeSeries", - "id": "monitoring.projects.timeSeries.create", - "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", - "request": { - "$ref": "CreateTimeSeriesRequest" - }, - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "name": { - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "flatPath": "v3/projects/{projectsId}/timeSeries" - } - } - }, - "categories": { - "methods": { - "list": { - "id": "monitoring.projects.categories.list", - "path": "v3/{+parent}/categories", - "description": "List all Categories for a host project.", - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListCategoriesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "parent": { - "description": "Resource parent of the project to get. Resource parent form is projects/{project_id_or_number}.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "filter": { - "description": "A filter that specifies what Categories to return.", - "type": "string", - "location": "query" - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "flatPath": "v3/projects/{projectsId}/categories" - }, - "create": { - "request": { - "$ref": "Category" - }, - "description": "Create a new Category.", - "response": { - "$ref": "Category" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "parameters": { - "parent": { - "description": "Resource parent of the project to get. Resource parent form is projects/{project_id_or_number}.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/categories", - "path": "v3/{+parent}/categories", - "id": "monitoring.projects.categories.create" - }, - "delete": { - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "parameters": { - "name": { - "location": "path", - "description": "Resource name of category to delete. Resource name form is projects/{project_id_or_number}/categories/{short_name}.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/categories/[^/]+$" - } - }, - "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}", - "id": "monitoring.projects.categories.delete", - "path": "v3/{+name}", - "description": "Delete a Category." - } - }, - "resources": { - "metricAssociations": { - "methods": { - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListMetricAssociationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - "format": "int32", - "type": "integer" - }, - "parent": { - "description": "Resource parent of the category to get. Resource parent form is projects/{project_id_or_number}/categories/{short_name}.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/categories/[^/]+$", - "location": "path" - }, - "filter": { - "location": "query", - "description": "A filter that specifies what MetricAssociations to return.", - "type": "string" - } - }, - "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}/metricAssociations", - "id": "monitoring.projects.categories.metricAssociations.list", - "path": "v3/{+parent}/metricAssociations", - "description": "List the MetricAssociations in a given Category." - }, - "create": { - "response": { - "$ref": "MetricAssociation" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "parameters": { - "parent": { - "description": "Resource parent of the category to get. Resource parent form is projects/{project_id_or_number}/categories/{short_name}.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/categories/[^/]+$", - "location": "path" - } - }, - "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}/metricAssociations", - "path": "v3/{+parent}/metricAssociations", - "id": "monitoring.projects.categories.metricAssociations.create", - "description": "Create a MetricAssociation.", - "request": { - "$ref": "MetricAssociation" - } - }, - "delete": { - "description": "Delete a MetricAssociation.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/categories/[^/]+/metricAssociations/[^/]+$", - "location": "path", - "description": "Resource name of metric association to delete. Resource name form is projects/{project_id_or_number}/\n categories/{short_name}/metricAssociations/{metric_name}.", - "required": true, - "type": "string" - } - }, - "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}/metricAssociations/{metricAssociationsId}", - "path": "v3/{+name}", - "id": "monitoring.projects.categories.metricAssociations.delete" - } - } - } - } - } - } - }, - "categories": { - "methods": { - "list": { - "response": { - "$ref": "ListCategoriesResponse" - }, - "parameterOrder": [], - "httpMethod": "GET", - "parameters": { - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - "format": "int32", - "type": "integer" - }, - "parent": { - "location": "query", - "description": "Resource parent of the project to get. Resource parent form is projects/{project_id_or_number}.", - "type": "string" - }, - "filter": { - "type": "string", - "location": "query", - "description": "A filter that specifies what Categories to return." - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/categories", - "path": "v3/categories", - "id": "monitoring.categories.list", - "description": "List all Categories for a host project." - } - }, - "resources": { - "metricAssociations": { - "methods": { - "list": { - "id": "monitoring.categories.metricAssociations.list", - "path": "v3/{+parent}/metricAssociations", - "description": "List the MetricAssociations in a given Category.", - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListMetricAssociationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "filter": { - "location": "query", - "description": "A filter that specifies what MetricAssociations to return.", - "type": "string" - }, - "pageToken": { - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "parent": { - "description": "Resource parent of the category to get. Resource parent form is projects/{project_id_or_number}/categories/{short_name}.", - "required": true, - "type": "string", - "pattern": "^categories/[^/]+$", - "location": "path" - } - }, - "flatPath": "v3/categories/{categoriesId}/metricAssociations" - } - } - } - } - } - }, - "parameters": { - "callback": { - "location": "query", - "description": "JSONP", - "type": "string" - }, - "$.xgafv": { - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string" - }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" - }, - "access_token": { - "location": "query", - "description": "OAuth access token.", - "type": "string" - }, - "key": { - "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "location": "query", - "description": "Pretty-print response.", - "type": "boolean", - "default": "true" - }, - "bearer_token": { - "type": "string", - "location": "query", - "description": "OAuth bearer token." - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "location": "query", - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true" - }, - "fields": { - "location": "query", - "description": "Selector specifying which fields to include in a partial response.", - "type": "string" - }, - "uploadType": { - "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" - } - }, - "version": "v3", - "baseUrl": "https://monitoring.googleapis.com/", - "kind": "discovery#restDescription", - "description": "Manages your Stackdriver Monitoring data and configurations. Most projects must be associated with a Stackdriver account, with a few exceptions as noted on the individual method pages.", - "servicePath": "", - "basePath": "", - "id": "monitoring:v3", - "documentationLink": "https://cloud.google.com/monitoring/api/", - "revision": "20170124", "discoveryVersion": "v1", "version_module": "True", "schemas": { - "CollectdPayload": { - "description": "A collection of data points sent from a collectd-based plugin. See the collectd documentation for more information.", - "type": "object", - "properties": { - "metadata": { - "type": "object", - "additionalProperties": { - "$ref": "TypedValue" - }, - "description": "The measurement metadata. Example: \"process_id\" -\u003e 12345" - }, - "type": { - "description": "The measurement type. Example: \"memory\".", - "type": "string" - }, - "plugin": { - "description": "The name of the plugin. Example: \"disk\".", - "type": "string" - }, - "pluginInstance": { - "description": "The instance name of the plugin Example: \"hdcl\".", - "type": "string" - }, - "endTime": { - "description": "The end time of the interval.", - "format": "google-datetime", - "type": "string" - }, - "startTime": { - "description": "The start time of the interval.", - "format": "google-datetime", - "type": "string" - }, - "values": { - "type": "array", - "items": { - "$ref": "CollectdValue" - }, - "description": "The measured values during this time interval. Each value must have a different dataSourceName." - }, - "typeInstance": { - "description": "The measurement type instance. Example: \"used\".", - "type": "string" - } - }, - "id": "CollectdPayload" - }, - "Linear": { - "description": "Specify a sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.Defines num_finite_buckets + 2 (= N) buckets with these boundaries for bucket i:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", - "type": "object", - "properties": { - "width": { - "description": "Must be greater than 0.", - "format": "double", - "type": "number" - }, - "offset": { - "type": "number", - "description": "Lower bound of the first bucket.", - "format": "double" - }, - "numFiniteBuckets": { - "description": "Must be greater than 0.", - "format": "int32", - "type": "integer" - } - }, - "id": "Linear" - }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", "type": "object", @@ -1123,6 +12,10 @@ "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", "type": "object", "properties": { + "name": { + "description": "The option's name. For protobuf built-in options (options defined in descriptor.proto), this is the short name. For example, \"map_entry\". For custom options, it should be the fully-qualified name. For example, \"google.api.http\".", + "type": "string" + }, "value": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -1130,10 +23,6 @@ }, "description": "The option's value packed in an Any message. If the value is a primitive, the corresponding wrapper type defined in google/protobuf/wrappers.proto should be used. If the value is an enum, it should be stored as an int32 value using the google.protobuf.Int32Value type.", "type": "object" - }, - "name": { - "description": "The option's name. For protobuf built-in options (options defined in descriptor.proto), this is the short name. For example, \"map_entry\". For custom options, it should be the fully-qualified name. For example, \"google.api.http\".", - "type": "string" } }, "id": "Option" @@ -1180,9 +69,9 @@ "type": "integer" }, "growthFactor": { - "type": "number", "description": "Must be greater than 1.", - "format": "double" + "format": "double", + "type": "number" }, "scale": { "description": "Must be greater than 0.", @@ -1197,12 +86,12 @@ "type": "object", "properties": { "value": { - "description": "The value of the data point.", - "$ref": "TypedValue" + "$ref": "TypedValue", + "description": "The value of the data point." }, "interval": { - "$ref": "TimeInterval", - "description": "The time interval to which the data point applies. For GAUGE metrics, only the end time of the interval is used. For DELTA metrics, the start and end time should specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For CUMULATIVE metrics, the start and end time should specify a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." + "description": "The time interval to which the data point applies. For GAUGE metrics, only the end time of the interval is used. For DELTA metrics, the start and end time should specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For CUMULATIVE metrics, the start and end time should specify a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points.", + "$ref": "TimeInterval" } }, "id": "Point" @@ -1211,16 +100,16 @@ "description": "A specific metric, identified by specifying values for all of the labels of a MetricDescriptor.", "type": "object", "properties": { - "type": { - "description": "An existing metric type, see google.api.MetricDescriptor. For example, custom.googleapis.com/invoice/paid/amount.", - "type": "string" - }, "labels": { + "description": "The set of label values that uniquely identify this metric. All labels listed in the MetricDescriptor must be assigned values.", + "type": "object", "additionalProperties": { "type": "string" - }, - "description": "The set of label values that uniquely identify this metric. All labels listed in the MetricDescriptor must be assigned values.", - "type": "object" + } + }, + "type": { + "description": "An existing metric type, see google.api.MetricDescriptor. For example, custom.googleapis.com/invoice/paid/amount.", + "type": "string" } }, "id": "Metric" @@ -1229,8 +118,11 @@ "description": "A single field of a message type.", "type": "object", "properties": { + "jsonName": { + "description": "The field JSON name.", + "type": "string" + }, "kind": { - "type": "string", "enumDescriptions": [ "Field type unknown.", "Field type double.", @@ -1273,10 +165,7 @@ "TYPE_SINT32", "TYPE_SINT64" ], - "description": "The field type." - }, - "jsonName": { - "description": "The field JSON name.", + "description": "The field type.", "type": "string" }, "options": { @@ -1291,10 +180,6 @@ "format": "int32", "type": "integer" }, - "packed": { - "description": "Whether to use alternative packed wire representation.", - "type": "boolean" - }, "cardinality": { "description": "The field cardinality.", "type": "string", @@ -1311,9 +196,13 @@ "CARDINALITY_REPEATED" ] }, + "packed": { + "description": "Whether to use alternative packed wire representation.", + "type": "boolean" + }, "defaultValue": { - "type": "string", - "description": "The string value of the default value of this field. Proto2 syntax only." + "description": "The string value of the default value of this field. Proto2 syntax only.", + "type": "string" }, "name": { "description": "The field name.", @@ -1324,53 +213,42 @@ "type": "string" }, "number": { - "type": "integer", "description": "The field number.", - "format": "int32" + "format": "int32", + "type": "integer" } }, "id": "Field" }, - "Category": { - "description": "A Vital Signs Category.", + "ListTimeSeriesResponse": { + "description": "The ListTimeSeries response.", "type": "object", "properties": { - "isDefault": { - "description": "A flag to indicate whether this category is part of Stackdriver's default taxonomy.", - "type": "boolean" - }, - "shortName": { - "description": "Unique usually one-word name for this category. e.g. latency or custom:goodness", - "type": "string" - }, - "name": { - "description": "Resource name for the category. e.g. projects/91091/categories/latency or projects/91091/categories/custom:goodness", - "type": "string" - }, - "displayName": { - "description": "A human-readable name for the category.", - "type": "string" + "timeSeries": { + "description": "One or more time series that match the filter included in the request.", + "type": "array", + "items": { + "$ref": "TimeSeries" + } }, - "description": { - "description": "A human-readable description for the category. The description can be longer and contain more details.", + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", "type": "string" } }, - "id": "Category" + "id": "ListTimeSeriesResponse" }, "LabelDescriptor": { "description": "A description of a label.", "type": "object", "properties": { - "key": { - "type": "string", - "description": "The label key." - }, "description": { "description": "A human-readable description for the label.", "type": "string" }, "valueType": { + "description": "The type of data that can be assigned to the label.", + "type": "string", "enumDescriptions": [ "A variable-length string. This is the default.", "Boolean; true or false.", @@ -1379,122 +257,106 @@ "enum": [ "STRING", "BOOL", - "INT64" - ], - "description": "The type of data that can be assigned to the label.", - "type": "string" - } - }, - "id": "LabelDescriptor" - }, - "ListTimeSeriesResponse": { - "description": "The ListTimeSeries response.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "timeSeries": { - "description": "One or more time series that match the filter included in the request.", - "type": "array", - "items": { - "$ref": "TimeSeries" - } - } - }, - "id": "ListTimeSeriesResponse" - }, - "Group": { - "properties": { - "name": { - "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", - "type": "string" - }, - "parentName": { - "description": "The name of the group's parent, if it has one. The format is \"projects/{project_id_or_number}/groups/{group_id}\". For groups with no parent, parentName is the empty string, \"\".", - "type": "string" - }, - "displayName": { - "description": "A user-assigned name for this group, used only for display purposes.", - "type": "string" - }, - "isCluster": { - "description": "If true, the members of this group are considered to be a cluster. The system can perform additional analysis on groups that are clusters.", - "type": "boolean" + "INT64" + ] }, - "filter": { - "description": "The filter used to determine which monitored resources belong to this group.", + "key": { + "description": "The label key.", "type": "string" } }, - "id": "Group", - "description": "The description of a dynamic collection of monitored resources. Each group has a filter that is matched against monitored resources and their associated metadata. If a group's filter matches an available monitored resource, then that resource is a member of that group. Groups can contain any number of monitored resources, and each monitored resource can be a member of any number of groups.Groups can be nested in parent-child hierarchies. The parentName field identifies an optional parent for each group. If a group has a parent, then the only monitored resources available to be matched by the group's filter are the resources contained in the parent group. In other words, a group contains the monitored resources that match its filter and the filters of all the group's ancestors. A group without a parent can contain any monitored resource.For example, consider an infrastructure running a set of instances with two user-defined tags: \"environment\" and \"role\". A parent group has a filter, environment=\"production\". A child of that parent group has a filter, role=\"transcoder\". The parent group contains all instances in the production environment, regardless of their roles. The child group contains instances that have the transcoder role and are in the production environment.The monitored resources contained in a group can change at any moment, depending on what resources exist and what filters are associated with the group and its ancestors.", - "type": "object" + "id": "LabelDescriptor" }, "Type": { "description": "A protocol buffer message type.", "type": "object", "properties": { + "fields": { + "description": "The list of fields.", + "type": "array", + "items": { + "$ref": "Field" + } + }, + "name": { + "description": "The fully qualified message name.", + "type": "string" + }, + "oneofs": { + "description": "The list of types appearing in oneof definitions in this type.", + "type": "array", + "items": { + "type": "string" + } + }, "sourceContext": { "$ref": "SourceContext", "description": "The source context." }, "syntax": { + "enumDescriptions": [ + "Syntax proto2.", + "Syntax proto3." + ], "enum": [ "SYNTAX_PROTO2", "SYNTAX_PROTO3" ], "description": "The source syntax.", - "type": "string", - "enumDescriptions": [ - "Syntax proto2.", - "Syntax proto3." - ] + "type": "string" }, "options": { + "description": "The protocol buffer options.", "type": "array", "items": { "$ref": "Option" - }, - "description": "The protocol buffer options." - }, - "fields": { - "description": "The list of fields.", - "type": "array", - "items": { - "$ref": "Field" } + } + }, + "id": "Type" + }, + "Group": { + "description": "The description of a dynamic collection of monitored resources. Each group has a filter that is matched against monitored resources and their associated metadata. If a group's filter matches an available monitored resource, then that resource is a member of that group. Groups can contain any number of monitored resources, and each monitored resource can be a member of any number of groups.Groups can be nested in parent-child hierarchies. The parentName field identifies an optional parent for each group. If a group has a parent, then the only monitored resources available to be matched by the group's filter are the resources contained in the parent group. In other words, a group contains the monitored resources that match its filter and the filters of all the group's ancestors. A group without a parent can contain any monitored resource.For example, consider an infrastructure running a set of instances with two user-defined tags: \"environment\" and \"role\". A parent group has a filter, environment=\"production\". A child of that parent group has a filter, role=\"transcoder\". The parent group contains all instances in the production environment, regardless of their roles. The child group contains instances that have the transcoder role and are in the production environment.The monitored resources contained in a group can change at any moment, depending on what resources exist and what filters are associated with the group and its ancestors.", + "type": "object", + "properties": { + "filter": { + "description": "The filter used to determine which monitored resources belong to this group.", + "type": "string" }, "name": { - "description": "The fully qualified message name.", + "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", "type": "string" }, - "oneofs": { - "description": "The list of types appearing in oneof definitions in this type.", - "type": "array", - "items": { - "type": "string" - } + "parentName": { + "description": "The name of the group's parent, if it has one. The format is \"projects/{project_id_or_number}/groups/{group_id}\". For groups with no parent, parentName is the empty string, \"\".", + "type": "string" + }, + "displayName": { + "description": "A user-assigned name for this group, used only for display purposes.", + "type": "string" + }, + "isCluster": { + "description": "If true, the members of this group are considered to be a cluster. The system can perform additional analysis on groups that are clusters.", + "type": "boolean" } }, - "id": "Type" + "id": "Group" }, "BucketOptions": { "description": "A Distribution may optionally contain a histogram of the values in the population. The histogram is given in bucket_counts as counts of values that fall into one of a sequence of non-overlapping buckets. The sequence of buckets is described by bucket_options.A bucket specifies an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket is strictly greater than the lower bound.The sequence of N buckets for a Distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i \u003e 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.BucketOptions describes bucket boundaries in one of three ways. Two describe the boundaries by giving parameters for a formula to generate boundaries and one gives the bucket boundaries explicitly.If bucket_options is not given, then no bucket_counts may be given.", "type": "object", "properties": { - "exponentialBuckets": { - "description": "The exponential buckets.", - "$ref": "Exponential" - }, "linearBuckets": { - "$ref": "Linear", - "description": "The linear bucket." + "description": "The linear bucket.", + "$ref": "Linear" }, "explicitBuckets": { "description": "The explicit buckets.", "$ref": "Explicit" + }, + "exponentialBuckets": { + "$ref": "Exponential", + "description": "The exponential buckets." } }, "id": "BucketOptions" @@ -1503,10 +365,6 @@ "description": "A single data point from a collectd-based plugin.", "type": "object", "properties": { - "value": { - "$ref": "TypedValue", - "description": "The measurement value." - }, "dataSourceType": { "enumDescriptions": [ "An unspecified data source type. This corresponds to google.api.MetricDescriptor.MetricKind.METRIC_KIND_UNSPECIFIED.", @@ -1525,44 +383,209 @@ "description": "The type of measurement.", "type": "string" }, - "dataSourceName": { - "description": "The data source for the collectd value. For example there are two data sources for network measurements: \"rx\" and \"tx\".", - "type": "string" + "dataSourceName": { + "description": "The data source for the collectd value. For example there are two data sources for network measurements: \"rx\" and \"tx\".", + "type": "string" + }, + "value": { + "description": "The measurement value.", + "$ref": "TypedValue" + } + }, + "id": "CollectdValue" + }, + "MetricDescriptor": { + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", + "type": "object", + "properties": { + "metricKind": { + "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metric_kind and value_type might not be supported.", + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "An instantaneous measurement of a value.", + "The change in a value during a time interval.", + "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." + ], + "enum": [ + "METRIC_KIND_UNSPECIFIED", + "GAUGE", + "DELTA", + "CUMULATIVE" + ] + }, + "description": { + "description": "A detailed description of the metric, which can be used in documentation.", + "type": "string" + }, + "displayName": { + "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\".", + "type": "string" + }, + "unit": { + "description": "The unit in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)\nbit bit\nBy byte\ns second\nmin minute\nh hour\nd dayPrefixes (PREFIX)\nk kilo (10**3)\nM mega (10**6)\nG giga (10**9)\nT tera (10**12)\nP peta (10**15)\nE exa (10**18)\nZ zetta (10**21)\nY yotta (10**24)\nm milli (10**-3)\nu micro (10**-6)\nn nano (10**-9)\np pico (10**-12)\nf femto (10**-15)\na atto (10**-18)\nz zepto (10**-21)\ny yocto (10**-24)\nKi kibi (2**10)\nMi mebi (2**20)\nGi gibi (2**30)\nTi tebi (2**40)GrammarThe grammar includes the dimensionless unit 1, such as 1/s.The grammar also includes these connectors:\n/ division (as an infix operator, e.g. 1/s).\n. multiplication (as an infix operator, e.g. GBy.d)The grammar for a unit is as follows:\nExpression = Component { \".\" Component } { \"/\" Component } ;\n\nComponent = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\nAnnotation = \"{\" NAME \"}\" ;\nNotes:\nAnnotation is just a comment if it follows a UNIT and is equivalent to 1 if it is used alone. For examples, {requests}/s == 1/s, By{transmitted}/s == By/s.\nNAME is a sequence of non-blank printable ASCII characters not containing '{' or '}'.", + "type": "string" + }, + "labels": { + "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } + }, + "name": { + "description": "The resource name of the metric descriptor. Depending on the implementation, the name typically includes: (1) the parent resource name that defines the scope of the metric type or of its data; and (2) the metric's URL-encoded type, which also appears in the type field of this descriptor. For example, following is the resource name of a custom metric within the GCP project my-project-id:\n\"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"\n", + "type": "string" + }, + "type": { + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined custom metric types have the DNS name custom.googleapis.com. Metric types should use a natural hierarchical grouping. For example:\n\"custom.googleapis.com/invoice/paid/amount\"\n\"appengine.googleapis.com/http/server/response_latencies\"\n", + "type": "string" + }, + "valueType": { + "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of metric_kind and value_type might not be supported.", + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", + "The value is a signed 64-bit integer.", + "The value is a double precision floating point number.", + "The value is a text string. This value type can be used only if the metric kind is GAUGE.", + "The value is a Distribution.", + "The value is money." + ], + "enum": [ + "VALUE_TYPE_UNSPECIFIED", + "BOOL", + "INT64", + "DOUBLE", + "STRING", + "DISTRIBUTION", + "MONEY" + ] + } + }, + "id": "MetricDescriptor" + }, + "SourceContext": { + "description": "SourceContext represents information about the source of a protobuf element, like the file in which it is defined.", + "type": "object", + "properties": { + "fileName": { + "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: \"google/protobuf/source_context.proto\".", + "type": "string" + } + }, + "id": "SourceContext" + }, + "Range": { + "description": "The range of the population values.", + "type": "object", + "properties": { + "min": { + "description": "The minimum of the population values.", + "format": "double", + "type": "number" + }, + "max": { + "description": "The maximum of the population values.", + "format": "double", + "type": "number" + } + }, + "id": "Range" + }, + "ListGroupsResponse": { + "description": "The ListGroups response.", + "type": "object", + "properties": { + "group": { + "description": "The groups that match the specified filters.", + "type": "array", + "items": { + "$ref": "Group" + } + }, + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" + } + }, + "id": "ListGroupsResponse" + }, + "CreateCollectdTimeSeriesRequest": { + "description": "The CreateCollectdTimeSeries request.", + "type": "object", + "properties": { + "resource": { + "$ref": "MonitoredResource", + "description": "The monitored resource associated with the time series." + }, + "collectdPayloads": { + "description": "The collectd payloads representing the time series data. You must not include more than a single point for each time series, so no two payloads can have the same values for all of the fields plugin, plugin_instance, type, and type_instance.", + "type": "array", + "items": { + "$ref": "CollectdPayload" + } + }, + "collectdVersion": { + "description": "The version of collectd that collected the data. Example: \"5.3.0-192.el6\".", + "type": "string" + } + }, + "id": "CreateCollectdTimeSeriesRequest" + }, + "ListGroupMembersResponse": { + "description": "The ListGroupMembers response.", + "type": "object", + "properties": { + "members": { + "description": "A set of monitored resources in the group.", + "type": "array", + "items": { + "$ref": "MonitoredResource" + } + }, + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" + }, + "totalSize": { + "description": "The total number of elements matching this request.", + "format": "int32", + "type": "integer" + } + }, + "id": "ListGroupMembersResponse" + }, + "ListMonitoredResourceDescriptorsResponse": { + "description": "The ListMonitoredResourcDescriptors response.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" + }, + "resourceDescriptors": { + "description": "The monitored resource descriptors that are available to this project and that match filter, if present.", + "type": "array", + "items": { + "$ref": "MonitoredResourceDescriptor" + } } }, - "id": "CollectdValue" + "id": "ListMonitoredResourceDescriptorsResponse" }, - "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", + "TimeSeries": { + "description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", "type": "object", "properties": { - "type": { - "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined custom metric types have the DNS name custom.googleapis.com. Metric types should use a natural hierarchical grouping. For example:\n\"custom.googleapis.com/invoice/paid/amount\"\n\"appengine.googleapis.com/http/server/response_latencies\"\n", - "type": "string" - }, - "valueType": { - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string. This value type can be used only if the metric kind is GAUGE.", - "The value is a Distribution.", - "The value is money." - ], - "enum": [ - "VALUE_TYPE_UNSPECIFIED", - "BOOL", - "INT64", - "DOUBLE", - "STRING", - "DISTRIBUTION", - "MONEY" - ], - "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of metric_kind and value_type might not be supported.", - "type": "string" + "resource": { + "$ref": "MonitoredResource", + "description": "The associated resource. A fully-specified monitored resource used to identify the time series." }, "metricKind": { + "description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either GAUGE (the default) or CUMULATIVE.", + "type": "string", "enumDescriptions": [ "Do not use this default value.", "An instantaneous measurement of a value.", @@ -1574,435 +597,1032 @@ "GAUGE", "DELTA", "CUMULATIVE" - ], - "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metric_kind and value_type might not be supported.", - "type": "string" - }, - "description": { - "description": "A detailed description of the metric, which can be used in documentation.", - "type": "string" - }, - "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\".", - "type": "string" + ] }, - "unit": { - "description": "The unit in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)\nbit bit\nBy byte\ns second\nmin minute\nh hour\nd dayPrefixes (PREFIX)\nk kilo (10**3)\nM mega (10**6)\nG giga (10**9)\nT tera (10**12)\nP peta (10**15)\nE exa (10**18)\nZ zetta (10**21)\nY yotta (10**24)\nm milli (10**-3)\nu micro (10**-6)\nn nano (10**-9)\np pico (10**-12)\nf femto (10**-15)\na atto (10**-18)\nz zepto (10**-21)\ny yocto (10**-24)\nKi kibi (2**10)\nMi mebi (2**20)\nGi gibi (2**30)\nTi tebi (2**40)GrammarThe grammar includes the dimensionless unit 1, such as 1/s.The grammar also includes these connectors:\n/ division (as an infix operator, e.g. 1/s).\n. multiplication (as an infix operator, e.g. GBy.d)The grammar for a unit is as follows:\nExpression = Component { \".\" Component } { \"/\" Component } ;\n\nComponent = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\nAnnotation = \"{\" NAME \"}\" ;\nNotes:\nAnnotation is just a comment if it follows a UNIT and is equivalent to 1 if it is used alone. For examples, {requests}/s == 1/s, By{transmitted}/s == By/s.\nNAME is a sequence of non-blank printable ASCII characters not containing '{' or '}'.", - "type": "string" + "metric": { + "$ref": "Metric", + "description": "The associated metric. A fully-specified metric used to identify the time series." }, - "labels": { - "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", + "points": { + "description": "The data points of this time series. When listing time series, the order of the points is specified by the list method.When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.", "type": "array", "items": { - "$ref": "LabelDescriptor" + "$ref": "Point" } }, - "name": { - "description": "The resource name of the metric descriptor. Depending on the implementation, the name typically includes: (1) the parent resource name that defines the scope of the metric type or of its data; and (2) the metric's URL-encoded type, which also appears in the type field of this descriptor. For example, following is the resource name of a custom metric within the GCP project my-project-id:\n\"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"\n", - "type": "string" + "valueType": { + "description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the type of the data in the points field.", + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", + "The value is a signed 64-bit integer.", + "The value is a double precision floating point number.", + "The value is a text string. This value type can be used only if the metric kind is GAUGE.", + "The value is a Distribution.", + "The value is money." + ], + "enum": [ + "VALUE_TYPE_UNSPECIFIED", + "BOOL", + "INT64", + "DOUBLE", + "STRING", + "DISTRIBUTION", + "MONEY" + ] } }, - "id": "MetricDescriptor" + "id": "TimeSeries" }, - "SourceContext": { - "description": "SourceContext represents information about the source of a protobuf element, like the file in which it is defined.", + "CreateTimeSeriesRequest": { + "description": "The CreateTimeSeries request.", "type": "object", "properties": { - "fileName": { - "type": "string", - "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: \"google/protobuf/source_context.proto\"." + "timeSeries": { + "description": "The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each TimeSeries value must fully specify a unique time series by supplying all label values for the metric and the monitored resource.", + "type": "array", + "items": { + "$ref": "TimeSeries" + } } }, - "id": "SourceContext" + "id": "CreateTimeSeriesRequest" }, - "Range": { - "description": "The range of the population values.", + "Distribution": { + "description": "Distribution contains summary statistics for a population of values and, optionally, a histogram representing the distribution of those values across a specified set of histogram buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values.The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by specifying parameters for a method of computing them (buckets of fixed width or buckets of exponentially increasing width).Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless.", "type": "object", "properties": { - "min": { - "description": "The minimum of the population values.", + "bucketCounts": { + "description": "If bucket_options is given, then the sum of the values in bucket_counts must equal the value in count. If bucket_options is not given, no bucket_counts fields may be given.Bucket counts are given in order under the numbering scheme described above (the underflow bucket has number 0; the finite buckets, if any, have numbers 1 through N-2; the overflow bucket has number N-1).The size of bucket_counts must be no greater than N as defined in bucket_options.Any suffix of trailing zero bucket_count fields may be omitted.", + "type": "array", + "items": { + "format": "int64", + "type": "string" + } + }, + "bucketOptions": { + "$ref": "BucketOptions", + "description": "Defines the histogram bucket boundaries." + }, + "sumOfSquaredDeviation": { + "description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is:\nSum[i=1..n]((x_i - mean)^2)\nKnuth, \"The Art of Computer Programming\", Vol. 2, page 323, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.", "format": "double", "type": "number" }, - "max": { - "description": "The maximum of the population values.", + "range": { + "$ref": "Range", + "description": "If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Stackdriver Monitoring API v3." + }, + "count": { + "description": "The number of values in the population. Must be non-negative.", + "format": "int64", + "type": "string" + }, + "mean": { + "description": "The arithmetic mean of the values in the population. If count is zero then this field must be zero.", "format": "double", "type": "number" } }, - "id": "Range" + "id": "Distribution" }, - "ListGroupsResponse": { - "description": "The ListGroups response.", + "MonitoredResource": { + "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", "type": "object", "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" + "labels": { + "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Cloud SQL databases use the labels \"database_id\" and \"zone\".", + "type": "object", + "additionalProperties": { + "type": "string" + } }, - "group": { - "description": "The groups that match the specified filters.", + "type": { + "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Cloud SQL database is \"cloudsql_database\".", + "type": "string" + } + }, + "id": "MonitoredResource" + }, + "ListMetricDescriptorsResponse": { + "description": "The ListMetricDescriptors response.", + "type": "object", + "properties": { + "metricDescriptors": { + "description": "The metric descriptors that are available to the project and that match the value of filter, if present.", "type": "array", "items": { - "$ref": "Group" + "$ref": "MetricDescriptor" } + }, + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" } }, - "id": "ListGroupsResponse" + "id": "ListMetricDescriptorsResponse" }, - "ListMetricAssociationsResponse": { - "description": "The MetricAssociations response.", + "MonitoredResourceDescriptor": { + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", "type": "object", "properties": { - "metricAssociations": { + "displayName": { + "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", + "type": "string" + }, + "description": { + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", + "type": "string" + }, + "type": { + "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", + "type": "string" + }, + "labels": { + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", "type": "array", "items": { - "$ref": "MetricAssociation" - }, - "description": "The MetricAssociations that match the specified filters." + "$ref": "LabelDescriptor" + } }, - "nextPageToken": { - "type": "string", - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method." + "name": { + "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", + "type": "string" } }, - "id": "ListMetricAssociationsResponse" + "id": "MonitoredResourceDescriptor" }, - "CreateCollectdTimeSeriesRequest": { - "description": "The CreateCollectdTimeSeries request.", + "TypedValue": { + "description": "A single strongly-typed value.", "type": "object", "properties": { - "resource": { - "description": "The monitored resource associated with the time series.", - "$ref": "MonitoredResource" + "boolValue": { + "description": "A Boolean value: true or false.", + "type": "boolean" }, - "collectdPayloads": { - "description": "The collectd payloads representing the time series data. You must not include more than a single point for each time series, so no two payloads can have the same values for all of the fields plugin, plugin_instance, type, and type_instance.", + "stringValue": { + "description": "A variable-length string value.", + "type": "string" + }, + "doubleValue": { + "description": "A 64-bit double-precision floating-point number. Its magnitude is approximately ±10\u003csup\u003e±300\u003c/sup\u003e and it has 16 significant digits of precision.", + "format": "double", + "type": "number" + }, + "int64Value": { + "description": "A 64-bit integer. Its range is approximately ±9.2x10\u003csup\u003e18\u003c/sup\u003e.", + "format": "int64", + "type": "string" + }, + "distributionValue": { + "$ref": "Distribution", + "description": "A distribution value." + } + }, + "id": "TypedValue" + }, + "CollectdPayload": { + "description": "A collection of data points sent from a collectd-based plugin. See the collectd documentation for more information.", + "type": "object", + "properties": { + "values": { + "description": "The measured values during this time interval. Each value must have a different dataSourceName.", "type": "array", "items": { - "$ref": "CollectdPayload" + "$ref": "CollectdValue" + } + }, + "typeInstance": { + "description": "The measurement type instance. Example: \"used\".", + "type": "string" + }, + "metadata": { + "description": "The measurement metadata. Example: \"process_id\" -\u003e 12345", + "type": "object", + "additionalProperties": { + "$ref": "TypedValue" } }, - "collectdVersion": { - "description": "The version of collectd that collected the data. Example: \"5.3.0-192.el6\".", + "type": { + "description": "The measurement type. Example: \"memory\".", + "type": "string" + }, + "plugin": { + "description": "The name of the plugin. Example: \"disk\".", + "type": "string" + }, + "pluginInstance": { + "description": "The instance name of the plugin Example: \"hdcl\".", + "type": "string" + }, + "endTime": { + "description": "The end time of the interval.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "The start time of the interval.", + "format": "google-datetime", "type": "string" } }, - "id": "CreateCollectdTimeSeriesRequest" + "id": "CollectdPayload" }, - "ListGroupMembersResponse": { - "description": "The ListGroupMembers response.", + "Linear": { + "description": "Specify a sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.Defines num_finite_buckets + 2 (= N) buckets with these boundaries for bucket i:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", "type": "object", "properties": { - "members": { - "description": "A set of monitored resources in the group.", - "type": "array", - "items": { - "$ref": "MonitoredResource" + "numFiniteBuckets": { + "description": "Must be greater than 0.", + "format": "int32", + "type": "integer" + }, + "width": { + "description": "Must be greater than 0.", + "format": "double", + "type": "number" + }, + "offset": { + "description": "Lower bound of the first bucket.", + "format": "double", + "type": "number" + } + }, + "id": "Linear" + } + }, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "protocol": "rest", + "canonicalName": "Monitoring", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/monitoring.write": { + "description": "Publish metric data to your Google Cloud projects" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/monitoring.read": { + "description": "View monitoring data for all of your Google Cloud and third-party projects" + }, + "https://www.googleapis.com/auth/monitoring": { + "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" + } + } + } + }, + "rootUrl": "https://monitoring.googleapis.com/", + "ownerDomain": "google.com", + "name": "monitoring", + "batchPath": "batch", + "title": "Stackdriver Monitoring API", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "groups": { + "methods": { + "delete": { + "description": "Deletes an existing group.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "parameters": { + "name": { + "location": "path", + "description": "The group to delete. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.groups.delete" + }, + "list": { + "flatPath": "v3/projects/{projectsId}/groups", + "path": "v3/{+name}/groups", + "id": "monitoring.projects.groups.list", + "description": "Lists the existing groups.", + "response": { + "$ref": "ListGroupsResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "parameters": { + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string", + "location": "query" + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "ancestorsOfGroup": { + "location": "query", + "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", + "type": "string" + }, + "name": { + "description": "The project whose groups are to be listed. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "childrenOfGroup": { + "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups whose parentName field contains the group name. If no groups have this parent, the results are empty.", + "type": "string", + "location": "query" + }, + "descendantsOfGroup": { + "location": "query", + "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns the descendants of the specified group. This is a superset of the results returned by the childrenOfGroup filter, and includes children-of-children, and so forth.", + "type": "string" + } + } + }, + "get": { + "response": { + "$ref": "Group" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "location": "path", + "description": "The group to retrieve. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.groups.get", + "description": "Gets a single group." + }, + "update": { + "description": "Updates an existing group. You can change any group attributes except name.", + "request": { + "$ref": "Group" + }, + "response": { + "$ref": "Group" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "parameters": { + "name": { + "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$", + "location": "path" + }, + "validateOnly": { + "location": "query", + "description": "If true, validate this request but do not update the existing group.", + "type": "boolean" + } + }, + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.groups.update" + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Group" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "parameters": { + "name": { + "description": "The project in which to create the group. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "validateOnly": { + "description": "If true, validate this request but do not create the group.", + "type": "boolean", + "location": "query" + } + }, + "flatPath": "v3/projects/{projectsId}/groups", + "id": "monitoring.projects.groups.create", + "path": "v3/{+name}/groups", + "description": "Creates a new group.", + "request": { + "$ref": "Group" + } + } + }, + "resources": { + "members": { + "methods": { + "list": { + "description": "Lists the monitored resources that are members of a group.", + "response": { + "$ref": "ListGroupMembersResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "filter": { + "location": "query", + "description": "An optional list filter describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\nresource.type = \"gce_instance\"\n", + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string" + }, + "interval.startTime": { + "location": "query", + "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + "format": "google-datetime", + "type": "string" + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "name": { + "location": "path", + "description": "The group whose members are listed. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$" + }, + "interval.endTime": { + "description": "Required. The end of the time interval.", + "format": "google-datetime", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members", + "path": "v3/{+name}/members", + "id": "monitoring.projects.groups.members.list" + } + } + } } }, - "nextPageToken": { - "type": "string", - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method." - }, - "totalSize": { - "description": "The total number of elements matching this request.", - "format": "int32", - "type": "integer" - } - }, - "id": "ListGroupMembersResponse" - }, - "ListMonitoredResourceDescriptorsResponse": { - "description": "The ListMonitoredResourcDescriptors response.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "resourceDescriptors": { - "description": "The monitored resource descriptors that are available to this project and that match filter, if present.", - "type": "array", - "items": { - "$ref": "MonitoredResourceDescriptor" + "collectdTimeSeries": { + "methods": { + "create": { + "request": { + "$ref": "CreateCollectdTimeSeriesRequest" + }, + "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e", + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "description": "The project in which to create the time series. The format is \"projects/PROJECT_ID_OR_NUMBER\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", + "id": "monitoring.projects.collectdTimeSeries.create", + "path": "v3/{+name}/collectdTimeSeries" + } } - } - }, - "id": "ListMonitoredResourceDescriptorsResponse" - }, - "MetricAssociation": { - "description": "A Vital Signs MetricAssociation, representing the inclusion of its referenced metric type within its parent category.", - "type": "object", - "properties": { - "name": { - "description": "Resource name for the metric association.", - "type": "string" - }, - "isDefault": { - "description": "A flag to indicate whether this association is part of Stackdriver's default taxonomy.", - "type": "boolean" - }, - "metricType": { - "description": "Resource name of the metric. It must be the full resource name. For example, \"compute.googleapis.com/instance/cpu/utilization\".", - "type": "string" - } - }, - "id": "MetricAssociation" - }, - "TimeSeries": { - "description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", - "type": "object", - "properties": { - "valueType": { - "type": "string", - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string. This value type can be used only if the metric kind is GAUGE.", - "The value is a Distribution.", - "The value is money." - ], - "enum": [ - "VALUE_TYPE_UNSPECIFIED", - "BOOL", - "INT64", - "DOUBLE", - "STRING", - "DISTRIBUTION", - "MONEY" - ], - "description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the type of the data in the points field." - }, - "resource": { - "description": "The associated resource. A fully-specified monitored resource used to identify the time series.", - "$ref": "MonitoredResource" }, - "metricKind": { - "enumDescriptions": [ - "Do not use this default value.", - "An instantaneous measurement of a value.", - "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." - ], - "enum": [ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE" - ], - "description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either GAUGE (the default) or CUMULATIVE.", - "type": "string" + "timeSeries": { + "methods": { + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListTimeSeriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "parameters": { + "interval.endTime": { + "description": "Required. The end of the time interval.", + "format": "google-datetime", + "type": "string", + "location": "query" + }, + "aggregation.alignmentPeriod": { + "location": "query", + "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", + "format": "google-duration", + "type": "string" + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return. When view field sets to FULL, it limits the number of Points server will return; if view field is HEADERS, it limits the number of TimeSeries server will return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "orderBy": { + "description": "Specifies the order in which the points of the time series should be returned. By default, results are not ordered. Currently, this field must be left blank.", + "type": "string", + "location": "query" + }, + "aggregation.crossSeriesReducer": { + "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", + "type": "string", + "location": "query", + "enum": [ + "REDUCE_NONE", + "REDUCE_MEAN", + "REDUCE_MIN", + "REDUCE_MAX", + "REDUCE_SUM", + "REDUCE_STDDEV", + "REDUCE_COUNT", + "REDUCE_COUNT_TRUE", + "REDUCE_FRACTION_TRUE", + "REDUCE_PERCENTILE_99", + "REDUCE_PERCENTILE_95", + "REDUCE_PERCENTILE_50", + "REDUCE_PERCENTILE_05" + ] + }, + "filter": { + "description": "A monitoring filter that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.label.instance_name = \"my-instance-name\"\n", + "type": "string", + "location": "query" + }, + "pageToken": { + "location": "query", + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string" + }, + "aggregation.perSeriesAligner": { + "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", + "type": "string", + "location": "query", + "enum": [ + "ALIGN_NONE", + "ALIGN_DELTA", + "ALIGN_RATE", + "ALIGN_INTERPOLATE", + "ALIGN_NEXT_OLDER", + "ALIGN_MIN", + "ALIGN_MAX", + "ALIGN_MEAN", + "ALIGN_COUNT", + "ALIGN_SUM", + "ALIGN_STDDEV", + "ALIGN_COUNT_TRUE", + "ALIGN_FRACTION_TRUE", + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + "ALIGN_PERCENTILE_05" + ] + }, + "interval.startTime": { + "location": "query", + "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + "format": "google-datetime", + "type": "string" + }, + "view": { + "description": "Specifies which information is returned about the time series.", + "type": "string", + "location": "query", + "enum": [ + "FULL", + "HEADERS" + ] + }, + "name": { + "location": "path", + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + }, + "aggregation.groupByFields": { + "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.", + "type": "string", + "repeated": true, + "location": "query" + } + }, + "flatPath": "v3/projects/{projectsId}/timeSeries", + "id": "monitoring.projects.timeSeries.list", + "path": "v3/{+name}/timeSeries", + "description": "Lists time series that match a filter. This method does not require a Stackdriver account." + }, + "create": { + "request": { + "$ref": "CreateTimeSeriesRequest" + }, + "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/timeSeries", + "id": "monitoring.projects.timeSeries.create", + "path": "v3/{+name}/timeSeries" + } + } }, - "metric": { - "$ref": "Metric", - "description": "The associated metric. A fully-specified metric used to identify the time series." + "metricDescriptors": { + "methods": { + "delete": { + "description": "Deletes a metric descriptor. Only user-created custom metrics can be deleted.", + "httpMethod": "DELETE", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "parameters": { + "name": { + "location": "path", + "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example of {metric_id} is: \"custom.googleapis.com/my_test_metric\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metricDescriptors/.+$" + } + }, + "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", + "id": "monitoring.projects.metricDescriptors.delete", + "path": "v3/{+name}" + }, + "list": { + "flatPath": "v3/projects/{projectsId}/metricDescriptors", + "id": "monitoring.projects.metricDescriptors.list", + "path": "v3/{+name}/metricDescriptors", + "description": "Lists metric descriptors that match a filter. This method does not require a Stackdriver account.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListMetricDescriptorsResponse" + }, + "parameters": { + "pageSize": { + "location": "query", + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer" + }, + "filter": { + "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics:\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", + "type": "string", + "location": "query" + }, + "name": { + "location": "path", + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + }, + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ] + }, + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "MetricDescriptor" + }, + "parameters": { + "name": { + "location": "path", + "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example value of {metric_id} is \"compute.googleapis.com/instance/disk/read_bytes_count\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metricDescriptors/.+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", + "id": "monitoring.projects.metricDescriptors.get", + "path": "v3/{+name}", + "description": "Gets a single metric descriptor. This method does not require a Stackdriver account." + }, + "create": { + "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics.", + "request": { + "$ref": "MetricDescriptor" + }, + "response": { + "$ref": "MetricDescriptor" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.write" + ], + "parameters": { + "name": { + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "flatPath": "v3/projects/{projectsId}/metricDescriptors", + "path": "v3/{+name}/metricDescriptors", + "id": "monitoring.projects.metricDescriptors.create" + } + } }, - "points": { - "description": "The data points of this time series. When listing time series, the order of the points is specified by the list method.When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.", - "type": "array", - "items": { - "$ref": "Point" + "monitoredResourceDescriptors": { + "methods": { + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListMonitoredResourceDescriptorsResponse" + }, + "parameters": { + "pageSize": { + "location": "query", + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer" + }, + "filter": { + "location": "query", + "description": "An optional filter describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n", + "type": "string" + }, + "name": { + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", + "id": "monitoring.projects.monitoredResourceDescriptors.list", + "path": "v3/{+name}/monitoredResourceDescriptors", + "description": "Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account." + }, + "get": { + "response": { + "$ref": "MonitoredResourceDescriptor" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ], + "parameters": { + "name": { + "description": "The monitored resource descriptor to get. The format is \"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\". The {resource_type} is a predefined type, such as cloudsql_database.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/monitoredResourceDescriptors/[^/]+$", + "location": "path" + } + }, + "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.monitoredResourceDescriptors.get", + "description": "Gets a single monitored resource descriptor. This method does not require a Stackdriver account." + } } } - }, - "id": "TimeSeries" + } + } + }, + "parameters": { + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" }, - "CreateTimeSeriesRequest": { - "description": "The CreateTimeSeries request.", - "type": "object", - "properties": { - "timeSeries": { - "description": "The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each TimeSeries value must fully specify a unique time series by supplying all label values for the metric and the monitored resource.", - "type": "array", - "items": { - "$ref": "TimeSeries" - } - } - }, - "id": "CreateTimeSeriesRequest" + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" }, - "ListCategoriesResponse": { - "description": "The ListCategories response.", - "type": "object", - "properties": { - "category": { - "description": "The Categories that match the specified filters.", - "type": "array", - "items": { - "$ref": "Category" - } - }, - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - } - }, - "id": "ListCategoriesResponse" + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" }, - "Distribution": { - "type": "object", - "properties": { - "bucketCounts": { - "description": "If bucket_options is given, then the sum of the values in bucket_counts must equal the value in count. If bucket_options is not given, no bucket_counts fields may be given.Bucket counts are given in order under the numbering scheme described above (the underflow bucket has number 0; the finite buckets, if any, have numbers 1 through N-2; the overflow bucket has number N-1).The size of bucket_counts must be no greater than N as defined in bucket_options.Any suffix of trailing zero bucket_count fields may be omitted.", - "type": "array", - "items": { - "type": "string", - "format": "int64" - } - }, - "bucketOptions": { - "description": "Defines the histogram bucket boundaries.", - "$ref": "BucketOptions" - }, - "sumOfSquaredDeviation": { - "description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is:\nSum[i=1..n]((x_i - mean)^2)\nKnuth, \"The Art of Computer Programming\", Vol. 2, page 323, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.", - "format": "double", - "type": "number" - }, - "range": { - "$ref": "Range", - "description": "If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Stackdriver Monitoring API v3." - }, - "count": { - "description": "The number of values in the population. Must be non-negative.", - "format": "int64", - "type": "string" - }, - "mean": { - "description": "The arithmetic mean of the values in the population. If count is zero then this field must be zero.", - "format": "double", - "type": "number" - } - }, - "id": "Distribution", - "description": "Distribution contains summary statistics for a population of values and, optionally, a histogram representing the distribution of those values across a specified set of histogram buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values.The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by specifying parameters for a method of computing them (buckets of fixed width or buckets of exponentially increasing width).Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless." + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "MonitoredResource": { - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", - "type": "object", - "properties": { - "type": { - "type": "string", - "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Cloud SQL database is \"cloudsql_database\"." - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Cloud SQL databases use the labels \"database_id\" and \"zone\"." - } - }, - "id": "MonitoredResource" + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, - "ListMetricDescriptorsResponse": { - "description": "The ListMetricDescriptors response.", - "type": "object", - "properties": { - "metricDescriptors": { - "description": "The metric descriptors that are available to the project and that match the value of filter, if present.", - "type": "array", - "items": { - "$ref": "MetricDescriptor" - } - }, - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - } - }, - "id": "ListMetricDescriptorsResponse" + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, - "MonitoredResourceDescriptor": { - "type": "object", - "properties": { - "name": { - "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", - "type": "string" - }, - "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", - "type": "string" - }, - "description": { - "type": "string", - "description": "Optional. A detailed description of the monitored resource type that might be used in documentation." - }, - "type": { - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", - "type": "string" - }, - "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", - "type": "array", - "items": { - "$ref": "LabelDescriptor" - } - } - }, - "id": "MonitoredResourceDescriptor", - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API." + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" }, - "TypedValue": { - "id": "TypedValue", - "description": "A single strongly-typed value.", - "type": "object", - "properties": { - "boolValue": { - "description": "A Boolean value: true or false.", - "type": "boolean" - }, - "stringValue": { - "description": "A variable-length string value.", - "type": "string" - }, - "doubleValue": { - "description": "A 64-bit double-precision floating-point number. Its magnitude is approximately ±10\u003csup\u003e±300\u003c/sup\u003e and it has 16 significant digits of precision.", - "format": "double", - "type": "number" - }, - "int64Value": { - "description": "A 64-bit integer. Its range is approximately ±9.2x10\u003csup\u003e18\u003c/sup\u003e.", - "format": "int64", - "type": "string" - }, - "distributionValue": { - "$ref": "Distribution", - "description": "A distribution value." - } - } - } - }, - "protocol": "rest", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "canonicalName": "Monitoring", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/monitoring.write": { - "description": "Publish metric data to your Google Cloud projects" - }, - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/monitoring.read": { - "description": "View monitoring data for all of your Google Cloud and third-party projects" - }, - "https://www.googleapis.com/auth/monitoring": { - "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" - } - } + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "alt": { + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" } }, - "rootUrl": "https://monitoring.googleapis.com/", - "ownerDomain": "google.com", - "name": "monitoring", - "batchPath": "batch", - "title": "Stackdriver Monitoring API" + "version": "v3", + "baseUrl": "https://monitoring.googleapis.com/", + "description": "Manages your Stackdriver Monitoring data and configurations. Most projects must be associated with a Stackdriver account, with a few exceptions as noted on the individual method pages.", + "kind": "discovery#restDescription", + "servicePath": "", + "basePath": "", + "revision": "20170206", + "documentationLink": "https://cloud.google.com/monitoring/api/", + "id": "monitoring:v3" } diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go index 711d9eb4c..a8b8cf249 100644 --- a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go @@ -67,17 +67,15 @@ func New(client *http.Client) (*Service, error) { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} - s.Categories = NewCategoriesService(s) s.Projects = NewProjectsService(s) return s, nil } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Categories *CategoriesService + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -89,30 +87,12 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } -func NewCategoriesService(s *Service) *CategoriesService { - rs := &CategoriesService{s: s} - rs.MetricAssociations = NewCategoriesMetricAssociationsService(s) - return rs -} - -type CategoriesService struct { - s *Service - - MetricAssociations *CategoriesMetricAssociationsService -} - -func NewCategoriesMetricAssociationsService(s *Service) *CategoriesMetricAssociationsService { - rs := &CategoriesMetricAssociationsService{s: s} - return rs -} - -type CategoriesMetricAssociationsService struct { - s *Service +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) } func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} - rs.Categories = NewProjectsCategoriesService(s) rs.CollectdTimeSeries = NewProjectsCollectdTimeSeriesService(s) rs.Groups = NewProjectsGroupsService(s) rs.MetricDescriptors = NewProjectsMetricDescriptorsService(s) @@ -124,8 +104,6 @@ func NewProjectsService(s *Service) *ProjectsService { type ProjectsService struct { s *Service - Categories *ProjectsCategoriesService - CollectdTimeSeries *ProjectsCollectdTimeSeriesService Groups *ProjectsGroupsService @@ -137,27 +115,6 @@ type ProjectsService struct { TimeSeries *ProjectsTimeSeriesService } -func NewProjectsCategoriesService(s *Service) *ProjectsCategoriesService { - rs := &ProjectsCategoriesService{s: s} - rs.MetricAssociations = NewProjectsCategoriesMetricAssociationsService(s) - return rs -} - -type ProjectsCategoriesService struct { - s *Service - - MetricAssociations *ProjectsCategoriesMetricAssociationsService -} - -func NewProjectsCategoriesMetricAssociationsService(s *Service) *ProjectsCategoriesMetricAssociationsService { - rs := &ProjectsCategoriesMetricAssociationsService{s: s} - return rs -} - -type ProjectsCategoriesMetricAssociationsService struct { - s *Service -} - func NewProjectsCollectdTimeSeriesService(s *Service) *ProjectsCollectdTimeSeriesService { rs := &ProjectsCollectdTimeSeriesService{s: s} return rs @@ -268,55 +225,6 @@ func (s *BucketOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Category: A Vital Signs Category. -type Category struct { - // Description: A human-readable description for the category. The - // description can be longer and contain more details. - Description string `json:"description,omitempty"` - - // DisplayName: A human-readable name for the category. - DisplayName string `json:"displayName,omitempty"` - - // IsDefault: A flag to indicate whether this category is part of - // Stackdriver's default taxonomy. - IsDefault bool `json:"isDefault,omitempty"` - - // Name: Resource name for the category. e.g. - // projects/91091/categories/latency or - // projects/91091/categories/custom:goodness - Name string `json:"name,omitempty"` - - // ShortName: Unique usually one-word name for this category. e.g. - // latency or custom:goodness - ShortName string `json:"shortName,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Category) MarshalJSON() ([]byte, error) { - type noMethod Category - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // CollectdPayload: A collection of data points sent from a // collectd-based plugin. See the collectd documentation for more // information. @@ -941,43 +849,6 @@ func (s *Linear) UnmarshalJSON(data []byte) error { return nil } -// ListCategoriesResponse: The ListCategories response. -type ListCategoriesResponse struct { - // Category: The Categories that match the specified filters. - Category []*Category `json:"category,omitempty"` - - // NextPageToken: If there are more results than have been returned, - // then this field is set to a non-empty value. To see the additional - // results, use that value as pageToken in the next call to this method. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Category") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Category") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListCategoriesResponse) MarshalJSON() ([]byte, error) { - type noMethod ListCategoriesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // ListGroupMembersResponse: The ListGroupMembers response. type ListGroupMembersResponse struct { // Members: A set of monitored resources in the group. @@ -1055,45 +926,6 @@ func (s *ListGroupsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListMetricAssociationsResponse: The MetricAssociations response. -type ListMetricAssociationsResponse struct { - // MetricAssociations: The MetricAssociations that match the specified - // filters. - MetricAssociations []*MetricAssociation `json:"metricAssociations,omitempty"` - - // NextPageToken: If there are more results than have been returned, - // then this field is set to a non-empty value. To see the additional - // results, use that value as pageToken in the next call to this method. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "MetricAssociations") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MetricAssociations") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ListMetricAssociationsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMetricAssociationsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // ListMetricDescriptorsResponse: The ListMetricDescriptors response. type ListMetricDescriptorsResponse struct { // MetricDescriptors: The metric descriptors that are available to the @@ -1244,47 +1076,6 @@ func (s *Metric) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MetricAssociation: A Vital Signs MetricAssociation, representing the -// inclusion of its referenced metric type within its parent category. -type MetricAssociation struct { - // IsDefault: A flag to indicate whether this association is part of - // Stackdriver's default taxonomy. - IsDefault bool `json:"isDefault,omitempty"` - - // MetricType: Resource name of the metric. It must be the full resource - // name. For example, "compute.googleapis.com/instance/cpu/utilization". - MetricType string `json:"metricType,omitempty"` - - // Name: Resource name for the metric association. - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "IsDefault") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IsDefault") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetricAssociation) MarshalJSON() ([]byte, error) { - type noMethod MetricAssociation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // MetricDescriptor: Defines a metric type and its schema. Once a metric // descriptor is created, deleting or altering it stops data collection // and makes the metric type's existing data unusable. @@ -1933,1337 +1724,6 @@ func (s *TypedValue) UnmarshalJSON(data []byte) error { return nil } -// method id "monitoring.categories.list": - -type CategoriesListCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List all Categories for a host project. -func (r *CategoriesService) List() *CategoriesListCall { - c := &CategoriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - return c -} - -// Filter sets the optional parameter "filter": A filter that specifies -// what Categories to return. -func (c *CategoriesListCall) Filter(filter string) *CategoriesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. When 0, use default -// page size. -func (c *CategoriesListCall) PageSize(pageSize int64) *CategoriesListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *CategoriesListCall) PageToken(pageToken string) *CategoriesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Parent sets the optional parameter "parent": Resource parent of the -// project to get. Resource parent form is -// projects/{project_id_or_number}. -func (c *CategoriesListCall) Parent(parent string) *CategoriesListCall { - c.urlParams_.Set("parent", parent) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *CategoriesListCall) Fields(s ...googleapi.Field) *CategoriesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *CategoriesListCall) IfNoneMatch(entityTag string) *CategoriesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *CategoriesListCall) Context(ctx context.Context) *CategoriesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *CategoriesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *CategoriesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/categories") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.categories.list" call. -// Exactly one of *ListCategoriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListCategoriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *CategoriesListCall) Do(opts ...googleapi.CallOption) (*ListCategoriesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListCategoriesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List all Categories for a host project.", - // "flatPath": "v3/categories", - // "httpMethod": "GET", - // "id": "monitoring.categories.list", - // "parameterOrder": [], - // "parameters": { - // "filter": { - // "description": "A filter that specifies what Categories to return.", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Resource parent of the project to get. Resource parent form is projects/{project_id_or_number}.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v3/categories", - // "response": { - // "$ref": "ListCategoriesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *CategoriesListCall) Pages(ctx context.Context, f func(*ListCategoriesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.categories.metricAssociations.list": - -type CategoriesMetricAssociationsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List the MetricAssociations in a given Category. -func (r *CategoriesMetricAssociationsService) List(parent string) *CategoriesMetricAssociationsListCall { - c := &CategoriesMetricAssociationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": A filter that specifies -// what MetricAssociations to return. -func (c *CategoriesMetricAssociationsListCall) Filter(filter string) *CategoriesMetricAssociationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. When 0, use default -// page size. -func (c *CategoriesMetricAssociationsListCall) PageSize(pageSize int64) *CategoriesMetricAssociationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *CategoriesMetricAssociationsListCall) PageToken(pageToken string) *CategoriesMetricAssociationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *CategoriesMetricAssociationsListCall) Fields(s ...googleapi.Field) *CategoriesMetricAssociationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *CategoriesMetricAssociationsListCall) IfNoneMatch(entityTag string) *CategoriesMetricAssociationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *CategoriesMetricAssociationsListCall) Context(ctx context.Context) *CategoriesMetricAssociationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *CategoriesMetricAssociationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *CategoriesMetricAssociationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/metricAssociations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.categories.metricAssociations.list" call. -// Exactly one of *ListMetricAssociationsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListMetricAssociationsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *CategoriesMetricAssociationsListCall) Do(opts ...googleapi.CallOption) (*ListMetricAssociationsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListMetricAssociationsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List the MetricAssociations in a given Category.", - // "flatPath": "v3/categories/{categoriesId}/metricAssociations", - // "httpMethod": "GET", - // "id": "monitoring.categories.metricAssociations.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "filter": { - // "description": "A filter that specifies what MetricAssociations to return.", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Resource parent of the category to get. Resource parent form is projects/{project_id_or_number}/categories/{short_name}.", - // "location": "path", - // "pattern": "^categories/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+parent}/metricAssociations", - // "response": { - // "$ref": "ListMetricAssociationsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *CategoriesMetricAssociationsListCall) Pages(ctx context.Context, f func(*ListMetricAssociationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.categories.create": - -type ProjectsCategoriesCreateCall struct { - s *Service - parent string - category *Category - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new Category. -func (r *ProjectsCategoriesService) Create(parent string, category *Category) *ProjectsCategoriesCreateCall { - c := &ProjectsCategoriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.category = category - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsCategoriesCreateCall) Fields(s ...googleapi.Field) *ProjectsCategoriesCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsCategoriesCreateCall) Context(ctx context.Context) *ProjectsCategoriesCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsCategoriesCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsCategoriesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.category) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/categories") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.categories.create" call. -// Exactly one of *Category or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Category.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsCategoriesCreateCall) Do(opts ...googleapi.CallOption) (*Category, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Category{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new Category.", - // "flatPath": "v3/projects/{projectsId}/categories", - // "httpMethod": "POST", - // "id": "monitoring.projects.categories.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "Resource parent of the project to get. Resource parent form is projects/{project_id_or_number}.", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+parent}/categories", - // "request": { - // "$ref": "Category" - // }, - // "response": { - // "$ref": "Category" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.categories.delete": - -type ProjectsCategoriesDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Delete a Category. -func (r *ProjectsCategoriesService) Delete(name string) *ProjectsCategoriesDeleteCall { - c := &ProjectsCategoriesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsCategoriesDeleteCall) Fields(s ...googleapi.Field) *ProjectsCategoriesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsCategoriesDeleteCall) Context(ctx context.Context) *ProjectsCategoriesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsCategoriesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsCategoriesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.categories.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsCategoriesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Delete a Category.", - // "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.categories.delete", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Resource name of category to delete. Resource name form is projects/{project_id_or_number}/categories/{short_name}.", - // "location": "path", - // "pattern": "^projects/[^/]+/categories/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.categories.list": - -type ProjectsCategoriesListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List all Categories for a host project. -func (r *ProjectsCategoriesService) List(parent string) *ProjectsCategoriesListCall { - c := &ProjectsCategoriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": A filter that specifies -// what Categories to return. -func (c *ProjectsCategoriesListCall) Filter(filter string) *ProjectsCategoriesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. When 0, use default -// page size. -func (c *ProjectsCategoriesListCall) PageSize(pageSize int64) *ProjectsCategoriesListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsCategoriesListCall) PageToken(pageToken string) *ProjectsCategoriesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsCategoriesListCall) Fields(s ...googleapi.Field) *ProjectsCategoriesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsCategoriesListCall) IfNoneMatch(entityTag string) *ProjectsCategoriesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsCategoriesListCall) Context(ctx context.Context) *ProjectsCategoriesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsCategoriesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsCategoriesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/categories") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.categories.list" call. -// Exactly one of *ListCategoriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListCategoriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsCategoriesListCall) Do(opts ...googleapi.CallOption) (*ListCategoriesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListCategoriesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List all Categories for a host project.", - // "flatPath": "v3/projects/{projectsId}/categories", - // "httpMethod": "GET", - // "id": "monitoring.projects.categories.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "filter": { - // "description": "A filter that specifies what Categories to return.", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Resource parent of the project to get. Resource parent form is projects/{project_id_or_number}.", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+parent}/categories", - // "response": { - // "$ref": "ListCategoriesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsCategoriesListCall) Pages(ctx context.Context, f func(*ListCategoriesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.categories.metricAssociations.create": - -type ProjectsCategoriesMetricAssociationsCreateCall struct { - s *Service - parent string - metricassociation *MetricAssociation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a MetricAssociation. -func (r *ProjectsCategoriesMetricAssociationsService) Create(parent string, metricassociation *MetricAssociation) *ProjectsCategoriesMetricAssociationsCreateCall { - c := &ProjectsCategoriesMetricAssociationsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.metricassociation = metricassociation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsCategoriesMetricAssociationsCreateCall) Fields(s ...googleapi.Field) *ProjectsCategoriesMetricAssociationsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsCategoriesMetricAssociationsCreateCall) Context(ctx context.Context) *ProjectsCategoriesMetricAssociationsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsCategoriesMetricAssociationsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsCategoriesMetricAssociationsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricassociation) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/metricAssociations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.categories.metricAssociations.create" call. -// Exactly one of *MetricAssociation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *MetricAssociation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsCategoriesMetricAssociationsCreateCall) Do(opts ...googleapi.CallOption) (*MetricAssociation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MetricAssociation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a MetricAssociation.", - // "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}/metricAssociations", - // "httpMethod": "POST", - // "id": "monitoring.projects.categories.metricAssociations.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "Resource parent of the category to get. Resource parent form is projects/{project_id_or_number}/categories/{short_name}.", - // "location": "path", - // "pattern": "^projects/[^/]+/categories/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+parent}/metricAssociations", - // "request": { - // "$ref": "MetricAssociation" - // }, - // "response": { - // "$ref": "MetricAssociation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.categories.metricAssociations.delete": - -type ProjectsCategoriesMetricAssociationsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Delete a MetricAssociation. -func (r *ProjectsCategoriesMetricAssociationsService) Delete(name string) *ProjectsCategoriesMetricAssociationsDeleteCall { - c := &ProjectsCategoriesMetricAssociationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsCategoriesMetricAssociationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsCategoriesMetricAssociationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsCategoriesMetricAssociationsDeleteCall) Context(ctx context.Context) *ProjectsCategoriesMetricAssociationsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsCategoriesMetricAssociationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsCategoriesMetricAssociationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.categories.metricAssociations.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsCategoriesMetricAssociationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Delete a MetricAssociation.", - // "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}/metricAssociations/{metricAssociationsId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.categories.metricAssociations.delete", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Resource name of metric association to delete. Resource name form is projects/{project_id_or_number}/\n categories/{short_name}/metricAssociations/{metric_name}.", - // "location": "path", - // "pattern": "^projects/[^/]+/categories/[^/]+/metricAssociations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.categories.metricAssociations.list": - -type ProjectsCategoriesMetricAssociationsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List the MetricAssociations in a given Category. -func (r *ProjectsCategoriesMetricAssociationsService) List(parent string) *ProjectsCategoriesMetricAssociationsListCall { - c := &ProjectsCategoriesMetricAssociationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": A filter that specifies -// what MetricAssociations to return. -func (c *ProjectsCategoriesMetricAssociationsListCall) Filter(filter string) *ProjectsCategoriesMetricAssociationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. When 0, use default -// page size. -func (c *ProjectsCategoriesMetricAssociationsListCall) PageSize(pageSize int64) *ProjectsCategoriesMetricAssociationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsCategoriesMetricAssociationsListCall) PageToken(pageToken string) *ProjectsCategoriesMetricAssociationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsCategoriesMetricAssociationsListCall) Fields(s ...googleapi.Field) *ProjectsCategoriesMetricAssociationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsCategoriesMetricAssociationsListCall) IfNoneMatch(entityTag string) *ProjectsCategoriesMetricAssociationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsCategoriesMetricAssociationsListCall) Context(ctx context.Context) *ProjectsCategoriesMetricAssociationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsCategoriesMetricAssociationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsCategoriesMetricAssociationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/metricAssociations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.categories.metricAssociations.list" call. -// Exactly one of *ListMetricAssociationsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListMetricAssociationsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsCategoriesMetricAssociationsListCall) Do(opts ...googleapi.CallOption) (*ListMetricAssociationsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListMetricAssociationsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List the MetricAssociations in a given Category.", - // "flatPath": "v3/projects/{projectsId}/categories/{categoriesId}/metricAssociations", - // "httpMethod": "GET", - // "id": "monitoring.projects.categories.metricAssociations.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "filter": { - // "description": "A filter that specifies what MetricAssociations to return.", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return. When 0, use default page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Resource parent of the category to get. Resource parent form is projects/{project_id_or_number}/categories/{short_name}.", - // "location": "path", - // "pattern": "^projects/[^/]+/categories/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+parent}/metricAssociations", - // "response": { - // "$ref": "ListMetricAssociationsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsCategoriesMetricAssociationsListCall) Pages(ctx context.Context, f func(*ListMetricAssociationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - // method id "monitoring.projects.collectdTimeSeries.create": type ProjectsCollectdTimeSeriesCreateCall struct { @@ -3317,6 +1777,7 @@ func (c *ProjectsCollectdTimeSeriesCreateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createcollectdtimeseriesrequest) if err != nil { @@ -3461,6 +1922,7 @@ func (c *ProjectsGroupsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -3600,6 +2062,7 @@ func (c *ProjectsGroupsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") @@ -3737,6 +2200,7 @@ func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3924,6 +2388,7 @@ func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4111,6 +2576,7 @@ func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -4304,6 +2770,7 @@ func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4486,6 +2953,7 @@ func (c *ProjectsMetricDescriptorsCreateCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) if err != nil { @@ -4622,6 +3090,7 @@ func (c *ProjectsMetricDescriptorsDeleteCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") @@ -4760,6 +3229,7 @@ func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4930,6 +3400,7 @@ func (c *ProjectsMetricDescriptorsListCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5110,6 +3581,7 @@ func (c *ProjectsMonitoredResourceDescriptorsGetCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5280,6 +3752,7 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*h reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5455,6 +3928,7 @@ func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtimeseriesrequest) if err != nil { @@ -5764,6 +4238,7 @@ func (c *ProjectsTimeSeriesListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/oauth2/v1/oauth2-gen.go b/vendor/google.golang.org/api/oauth2/v1/oauth2-gen.go index 9b4a6b450..4307771db 100644 --- a/vendor/google.golang.org/api/oauth2/v1/oauth2-gen.go +++ b/vendor/google.golang.org/api/oauth2/v1/oauth2-gen.go @@ -70,9 +70,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Userinfo *UserinfoService } @@ -84,6 +85,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewUserinfoService(s *Service) *UserinfoService { rs := &UserinfoService{s: s} rs.V2 = NewUserinfoV2Service(s) @@ -439,6 +444,7 @@ func (c *GetCertForOpenIdConnectCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -536,6 +542,7 @@ func (c *GetCertForOpenIdConnectRawCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -655,6 +662,7 @@ func (c *GetRobotJwkCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -788,6 +796,7 @@ func (c *GetRobotMetadataRawCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -921,6 +930,7 @@ func (c *GetRobotMetadataX509Call) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1034,6 +1044,7 @@ func (c *TokeninfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v1/tokeninfo") @@ -1161,6 +1172,7 @@ func (c *UserinfoGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1285,6 +1297,7 @@ func (c *UserinfoV2MeGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go index 744046815..c12d1e867 100644 --- a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go +++ b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go @@ -70,9 +70,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Userinfo *UserinfoService } @@ -84,6 +85,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewUserinfoService(s *Service) *UserinfoService { rs := &UserinfoService{s: s} rs.V2 = NewUserinfoV2Service(s) @@ -367,6 +372,7 @@ func (c *GetCertForOpenIdConnectCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -491,6 +497,7 @@ func (c *TokeninfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/tokeninfo") @@ -619,6 +626,7 @@ func (c *UserinfoGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -742,6 +750,7 @@ func (c *UserinfoV2MeGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/pagespeedonline/v1/pagespeedonline-gen.go b/vendor/google.golang.org/api/pagespeedonline/v1/pagespeedonline-gen.go index 7fcf5f721..518fdb95f 100644 --- a/vendor/google.golang.org/api/pagespeedonline/v1/pagespeedonline-gen.go +++ b/vendor/google.golang.org/api/pagespeedonline/v1/pagespeedonline-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Pagespeedapi *PagespeedapiService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewPagespeedapiService(s *Service) *PagespeedapiService { rs := &PagespeedapiService{s: s} return rs @@ -744,6 +749,7 @@ func (c *PagespeedapiRunpagespeedCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/pagespeedonline/v2/pagespeedonline-gen.go b/vendor/google.golang.org/api/pagespeedonline/v2/pagespeedonline-gen.go index 94b1dfe17..ab8a76107 100644 --- a/vendor/google.golang.org/api/pagespeedonline/v2/pagespeedonline-gen.go +++ b/vendor/google.golang.org/api/pagespeedonline/v2/pagespeedonline-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Pagespeedapi *PagespeedapiService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewPagespeedapiService(s *Service) *PagespeedapiService { rs := &PagespeedapiService{s: s} return rs @@ -783,6 +788,7 @@ func (c *PagespeedapiRunpagespeedCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/partners/v2/partners-gen.go b/vendor/google.golang.org/api/partners/v2/partners-gen.go index e71dfda17..6ade605e1 100644 --- a/vendor/google.golang.org/api/partners/v2/partners-gen.go +++ b/vendor/google.golang.org/api/partners/v2/partners-gen.go @@ -58,9 +58,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only ClientMessages *ClientMessagesService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewClientMessagesService(s *Service) *ClientMessagesService { rs := &ClientMessagesService{s: s} return rs @@ -1507,6 +1512,7 @@ func (c *ClientMessagesLogCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmessagerequest) if err != nil { @@ -1726,6 +1732,7 @@ func (c *CompaniesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2147,6 +2154,7 @@ func (c *CompaniesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2442,6 +2450,7 @@ func (c *CompaniesLeadsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createleadrequest) if err != nil { @@ -2570,6 +2579,7 @@ func (c *UserEventsLogCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.logusereventrequest) if err != nil { @@ -2749,6 +2759,7 @@ func (c *UserStatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/people/v1/people-api.json b/vendor/google.golang.org/api/people/v1/people-api.json index 07fbc1b51..3ea223214 100644 --- a/vendor/google.golang.org/api/people/v1/people-api.json +++ b/vendor/google.golang.org/api/people/v1/people-api.json @@ -1,1256 +1,1342 @@ { - "kind": "discovery#restDescription", - "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/tzwJjoO4XDFgwNQjmiM8lPsFMzA\"", - "discoveryVersion": "v1", - "id": "people:v1", - "name": "people", - "version": "v1", - "revision": "20160210", - "title": "Google People API", - "description": "The Google People API service gives access to information about profiles and contacts.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://developers.google.com/people/", - "protocol": "rest", - "baseUrl": "https://people.googleapis.com/", - "basePath": "", - "rootUrl": "https://people.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "version_module": true, - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" - }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" - }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" - }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/contacts": { - "description": "Manage your contacts" - }, - "https://www.googleapis.com/auth/contacts.readonly": { - "description": "View your contacts" - }, - "https://www.googleapis.com/auth/plus.login": { - "description": "Know your basic profile info and list of people in your circles." - }, - "https://www.googleapis.com/auth/user.addresses.read": { - "description": "View your street addresses" - }, - "https://www.googleapis.com/auth/user.birthday.read": { - "description": "View your complete date of birth" - }, - "https://www.googleapis.com/auth/user.emails.read": { - "description": "View your email addresses" - }, - "https://www.googleapis.com/auth/user.phonenumbers.read": { - "description": "View your phone numbers" - }, - "https://www.googleapis.com/auth/userinfo.email": { - "description": "View your email address" - }, - "https://www.googleapis.com/auth/userinfo.profile": { - "description": "View your basic profile info" - } - } - } - }, - "schemas": { - "Person": { - "id": "Person", - "type": "object", - "description": "Information about a person merged from various data sources such as the authenticated user's contacts and profile data. Fields other than IDs, metadata, and group memberships are user-edited. Most fields can have multiple items. The items in a field have no guaranteed order, but each non-empty field is guaranteed to have exactly one field with `metadata.primary` set to true.", - "properties": { - "resourceName": { - "type": "string", - "description": "The resource name for the person, assigned by the server. An ASCII string with a max length of 27 characters. Always starts with `people/`." - }, - "etag": { - "type": "string", - "description": "The [HTTP entity tag](https://en.wikipedia.org/wiki/HTTP_ETag) of the resource. Used for web cache validation." - }, - "metadata": { - "$ref": "PersonMetadata", - "description": "Metadata about the person." - }, - "locales": { - "type": "array", - "description": "The person's locale preferences.", - "items": { - "$ref": "Locale" - } - }, - "names": { - "type": "array", - "description": "The person's names.", - "items": { - "$ref": "Name" - } - }, - "nicknames": { - "type": "array", - "description": "The person's nicknames.", - "items": { - "$ref": "Nickname" - } - }, - "coverPhotos": { - "type": "array", - "description": "The person's cover photos.", - "items": { - "$ref": "CoverPhoto" - } - }, - "photos": { - "type": "array", - "description": "The person's photos.", - "items": { - "$ref": "Photo" - } - }, - "genders": { - "type": "array", - "description": "The person's genders.", - "items": { - "$ref": "Gender" - } - }, - "ageRange": { - "type": "string", - "description": "The person's age range.", - "enum": [ - "AGE_RANGE_UNSPECIFIED", - "LESS_THAN_EIGHTEEN", - "EIGHTEEN_TO_TWENTY", - "TWENTY_ONE_OR_OLDER" - ] - }, - "birthdays": { - "type": "array", - "description": "The person's birthdays.", - "items": { - "$ref": "Birthday" - } - }, - "events": { - "type": "array", - "description": "The person's events.", - "items": { - "$ref": "Event" - } - }, - "addresses": { - "type": "array", - "description": "The person's street addresses.", - "items": { - "$ref": "Address" - } - }, - "residences": { - "type": "array", - "description": "The person's residences.", - "items": { - "$ref": "Residence" - } - }, - "emailAddresses": { - "type": "array", - "description": "The person's email addresses.", - "items": { - "$ref": "EmailAddress" - } - }, - "phoneNumbers": { - "type": "array", - "description": "The person's phone numbers.", - "items": { - "$ref": "PhoneNumber" - } - }, - "imClients": { - "type": "array", - "description": "The person's instant messaging clients.", - "items": { - "$ref": "ImClient" - } - }, - "taglines": { - "type": "array", - "description": "The person's taglines.", - "items": { - "$ref": "Tagline" - } - }, - "biographies": { - "type": "array", - "description": "The person's biographies.", - "items": { - "$ref": "Biography" - } - }, - "urls": { - "type": "array", - "description": "The person's associated URLs.", - "items": { - "$ref": "Url" - } - }, - "organizations": { - "type": "array", - "description": "The person's past or current organizations.", - "items": { - "$ref": "Organization" - } - }, - "occupations": { - "type": "array", - "description": "The person's occupations.", - "items": { - "$ref": "Occupation" - } - }, - "interests": { - "type": "array", - "description": "The person's interests.", - "items": { - "$ref": "Interest" - } - }, - "skills": { - "type": "array", - "description": "The person's skills.", - "items": { - "$ref": "Skill" - } - }, - "braggingRights": { - "type": "array", - "description": "The person's bragging rights.", - "items": { - "$ref": "BraggingRights" - } - }, - "relations": { - "type": "array", - "description": "The person's relations.", - "items": { - "$ref": "Relation" - } - }, - "relationshipInterests": { - "type": "array", - "description": "The kind of relationship the person is looking for.", - "items": { - "$ref": "RelationshipInterest" - } - }, - "relationshipStatuses": { - "type": "array", - "description": "The person's relationship statuses.", - "items": { - "$ref": "RelationshipStatus" - } - }, - "memberships": { - "type": "array", - "description": "The person's group memberships.", - "items": { - "$ref": "Membership" - } + "batchPath": "batch", + "id": "people:v1", + "documentationLink": "https://developers.google.com/people/", + "revision": "20170223", + "title": "Google People API", + "discoveryVersion": "v1", + "ownerName": "Google", + "version_module": "True", + "resources": { + "people": { + "methods": { + "getBatchGet": { + "httpMethod": "GET", + "response": { + "$ref": "GetPeopleResponse" + }, + "parameterOrder": [], + "parameters": { + "requestMask.includeField": { + "description": "Comma-separated list of fields to be included in the response. Omitting\nthis field will include all fields except for connections.list requests,\nwhich have a default mask that includes common fields like metadata, name,\nphoto, and profile url.\nEach path should start with `person.`: for example, `person.names` or\n`person.photos`.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + }, + "resourceNames": { + "description": "The resource name, such as one returned by\n[`people.connections.list`](/people/api/rest/v1/people.connections/list),\nof one of the people to provide information about. You can include this\nparameter up to 50 times in one request.", + "type": "string", + "repeated": true, + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/contacts", + "https://www.googleapis.com/auth/contacts.readonly", + "https://www.googleapis.com/auth/plus.login", + "https://www.googleapis.com/auth/user.addresses.read", + "https://www.googleapis.com/auth/user.birthday.read", + "https://www.googleapis.com/auth/user.emails.read", + "https://www.googleapis.com/auth/user.phonenumbers.read", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile" + ], + "flatPath": "v1/people:batchGet", + "id": "people.people.getBatchGet", + "path": "v1/people:batchGet", + "description": "Provides information about a list of specific people by specifying a list\nof requested resource names. Use `people/me` to indicate the authenticated\nuser." + }, + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "resourceName" + ], + "response": { + "$ref": "Person" + }, + "parameters": { + "resourceName": { + "location": "path", + "description": "The resource name of the person to provide information about.\n\n- To get information about the authenticated user, specify `people/me`.\n- To get information about any user, specify the resource name that\n identifies the user, such as the resource names returned by\n [`people.connections.list`](/people/api/rest/v1/people.connections/list).", + "required": true, + "type": "string", + "pattern": "^people/[^/]+$" + }, + "requestMask.includeField": { + "location": "query", + "description": "Comma-separated list of fields to be included in the response. Omitting\nthis field will include all fields except for connections.list requests,\nwhich have a default mask that includes common fields like metadata, name,\nphoto, and profile url.\nEach path should start with `person.`: for example, `person.names` or\n`person.photos`.", + "format": "google-fieldmask", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/contacts", + "https://www.googleapis.com/auth/contacts.readonly", + "https://www.googleapis.com/auth/plus.login", + "https://www.googleapis.com/auth/user.addresses.read", + "https://www.googleapis.com/auth/user.birthday.read", + "https://www.googleapis.com/auth/user.emails.read", + "https://www.googleapis.com/auth/user.phonenumbers.read", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile" + ], + "flatPath": "v1/people/{peopleId}", + "id": "people.people.get", + "path": "v1/{+resourceName}", + "description": "Provides information about a person resource for a resource name. Use\n`people/me` to indicate the authenticated user." + } + }, + "resources": { + "connections": { + "methods": { + "list": { + "httpMethod": "GET", + "response": { + "$ref": "ListConnectionsResponse" + }, + "parameterOrder": [ + "resourceName" + ], + "parameters": { + "sortOrder": { + "location": "query", + "enum": [ + "LAST_MODIFIED_ASCENDING", + "FIRST_NAME_ASCENDING", + "LAST_NAME_ASCENDING" + ], + "description": "The order in which the connections should be sorted. Defaults to\n`LAST_MODIFIED_ASCENDING`.", + "type": "string" + }, + "requestSyncToken": { + "type": "boolean", + "location": "query", + "description": "Whether the response should include a sync token, which can be used to get\nall changes since the last request." + }, + "resourceName": { + "description": "The resource name to return connections for. Only `people/me` is valid.", + "required": true, + "type": "string", + "pattern": "^people/[^/]+$", + "location": "path" + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "The token of the page to be returned." + }, + "pageSize": { + "location": "query", + "description": "The number of connections to include in the response. Valid values are\nbetween 1 and 500, inclusive. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "requestMask.includeField": { + "location": "query", + "description": "Comma-separated list of fields to be included in the response. Omitting\nthis field will include all fields except for connections.list requests,\nwhich have a default mask that includes common fields like metadata, name,\nphoto, and profile url.\nEach path should start with `person.`: for example, `person.names` or\n`person.photos`.", + "format": "google-fieldmask", + "type": "string" + }, + "syncToken": { + "location": "query", + "description": "A sync token, returned by a previous call to `people.connections.list`.\nOnly resources changed since the sync token was created will be returned.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/contacts", + "https://www.googleapis.com/auth/contacts.readonly" + ], + "flatPath": "v1/people/{peopleId}/connections", + "id": "people.people.connections.list", + "path": "v1/{+resourceName}/connections", + "description": "Provides a list of the authenticated user's contacts merged with any\nlinked profiles." + } + } + } + } } - } }, - "PersonMetadata": { - "id": "PersonMetadata", - "type": "object", - "description": "Metadata about a person.", - "properties": { - "sources": { - "type": "array", - "description": "The sources of data for the person.", - "items": { - "$ref": "Source" - } - }, - "previousResourceNames": { - "type": "array", - "description": "Any former resource names this person has had. Populated only for [`connections.list`](/people/api/rest/v1/people.connections/list) requests that include a sync token. The resource name may change when adding or removing fields that link a contact and profile such as a verified email, verified phone number, or profile URL.", - "items": { + "parameters": { + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string" - } - }, - "deleted": { - "type": "boolean", - "description": "True if the person resource has been deleted. Populated only for [`connections.list`](/people/api/rest/v1/people.connections/list) requests that include a sync token." - }, - "objectType": { - "type": "string", - "description": "The type of the person object.", - "enum": [ - "OBJECT_TYPE_UNSPECIFIED", - "PERSON", - "PAGE" - ] - } - } - }, - "Source": { - "id": "Source", - "type": "object", - "description": "The source of a field.", - "properties": { - "type": { - "type": "string", - "description": "The source type.", - "enum": [ - "OTHER", - "ACCOUNT", - "PROFILE", - "DOMAIN_PROFILE", - "CONTACT" - ] - }, - "id": { - "type": "string", - "description": "A unique identifier within the source type generated by the server." - } - } - }, - "Locale": { - "id": "Locale", - "type": "object", - "description": "A person's locale preference.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the locale." - }, - "value": { - "type": "string", - "description": "The well-formed [IETF BCP 47](https://tools.ietf.org/html/bcp47) language tag representing the locale." - } - } - }, - "FieldMetadata": { - "id": "FieldMetadata", - "type": "object", - "description": "Metadata about a field.", - "properties": { - "primary": { - "type": "boolean", - "description": "True if the field is the primary field; false if the field is a secondary field." - }, - "verified": { - "type": "boolean", - "description": "True if the field is verified; false if the field is unverified. A verified field is typically a name, email address, phone number, or website that has been confirmed to be owned by the person." - }, - "source": { - "$ref": "Source", - "description": "The source of the field." - } - } - }, - "Name": { - "id": "Name", - "type": "object", - "description": "A person's name. If the name is a mononym, the family name is empty.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the name." - }, - "displayName": { - "type": "string", - "description": "The display name formatted according to the locale specified by the viewer's account or the Accept-Language HTTP header." - }, - "familyName": { - "type": "string", - "description": "The family name." - }, - "givenName": { - "type": "string", - "description": "The given name." - }, - "middleName": { - "type": "string", - "description": "The middle name(s)." - }, - "honorificPrefix": { - "type": "string", - "description": "The honorific prefixes, such as `Mrs.` or `Dr.`" - }, - "honorificSuffix": { - "type": "string", - "description": "The honorific suffixes, such as `Jr.`" - }, - "phoneticFamilyName": { - "type": "string", - "description": "The family name spelled as it sounds." - }, - "phoneticGivenName": { - "type": "string", - "description": "The given name spelled as it sounds." - }, - "phoneticMiddleName": { - "type": "string", - "description": "The middle name(s) spelled as they sound." }, - "phoneticHonorificPrefix": { - "type": "string", - "description": "The honorific prefixes spelled as they sound." - }, - "phoneticHonorificSuffix": { - "type": "string", - "description": "The honorific suffixes spelled as they sound." - } - } - }, - "Nickname": { - "id": "Nickname", - "type": "object", - "description": "A person's nickname.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the nickname." - }, - "value": { - "type": "string", - "description": "The nickname." - }, - "type": { - "type": "string", - "description": "The type of the nickname.", - "enum": [ - "DEFAULT", - "MAIDEN_NAME", - "INITIALS", - "GPLUS", - "OTHER_NAME" - ] - } - } - }, - "CoverPhoto": { - "id": "CoverPhoto", - "type": "object", - "description": "A person's cover photo. A large image shown on the person's profile page that represents who they are or what they care about.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the cover photo." - }, - "url": { - "type": "string", - "description": "The URL of the cover photo." - }, - "default": { - "type": "boolean", - "description": "True if the cover photo is the default cover photo; false if the cover photo is a user-provided cover photo." - } - } - }, - "Photo": { - "id": "Photo", - "type": "object", - "description": "A person's photo. A picture shown next to the person's name to help others recognize the person.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the photo." - }, - "url": { - "type": "string", - "description": "The URL of the photo." - } - } - }, - "Gender": { - "id": "Gender", - "type": "object", - "description": "A person's gender.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the gender." - }, - "value": { - "type": "string", - "description": "The gender for the person. The gender can be custom or predefined. Possible values include, but are not limited to, the following: * `male` * `female` * `other` * `unknown`" - }, - "formattedValue": { - "type": "string", - "description": "The read-only value of the gender translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - } - } - }, - "Birthday": { - "id": "Birthday", - "type": "object", - "description": "A person's birthday. At least one of the `date` and `text` fields are specified. The `date` and `text` fields typically represent the same date, but are not guaranteed to.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the birthday." - }, - "date": { - "$ref": "Date", - "description": "The date of the birthday." - }, - "text": { - "type": "string", - "description": "A free-form string representing the user's birthday." - } - } - }, - "Date": { - "id": "Date", - "type": "object", - "description": "Represents a whole calendar date, for example a date of birth. The time of day and time zone are either specified elsewhere or are not significant. The date is relative to the [Proleptic Gregorian Calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar). The day may be 0 to represent a year and month where the day is not significant. The year may be 0 to represent a month and day independent of year; for example, anniversary date.", - "properties": { - "year": { - "type": "integer", - "description": "Year of date. Must be from 1 to 9999, or 0 if specifying a date without a year.", - "format": "int32" - }, - "month": { - "type": "integer", - "description": "Month of year. Must be from 1 to 12.", - "format": "int32" - }, - "day": { - "type": "integer", - "description": "Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a year/month where the day is not significant.", - "format": "int32" - } - } - }, - "Event": { - "id": "Event", - "type": "object", - "description": "An event related to the person.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the event." + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "date": { - "$ref": "Date", - "description": "The date of the event." - }, - "type": { - "type": "string", - "description": "The type of the event. The type can be custom or predefined. Possible values include, but are not limited to, the following: * `anniversary` * `other`" - }, - "formattedType": { - "type": "string", - "description": "The read-only type of the event translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - } - } - }, - "Address": { - "id": "Address", - "type": "object", - "description": "A person's physical address. May be a P.O. box or street address. All fields are optional.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the address." - }, - "formattedValue": { - "type": "string", - "description": "The read-only value of the address formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - }, - "type": { - "type": "string", - "description": "The type of the address. The type can be custom or predefined. Possible values include, but are not limited to, the following: * `home` * `work` * `other`" - }, - "formattedType": { - "type": "string", - "description": "The read-only type of the address translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - }, - "poBox": { - "type": "string", - "description": "The P.O. box of the address." - }, - "streetAddress": { - "type": "string", - "description": "The street address." - }, - "extendedAddress": { - "type": "string", - "description": "The extended address of the address; for example, the apartment number." - }, - "city": { - "type": "string", - "description": "The city of the address." - }, - "region": { - "type": "string", - "description": "The region of the address; for example, the state or province." - }, - "postalCode": { - "type": "string", - "description": "The postal code of the address." - }, - "country": { - "type": "string", - "description": "The country of the address." - }, - "countryCode": { - "type": "string", - "description": "The [ISO 3166-1 alpha-2](http://www.iso.org/iso/country_codes.htm) country code of the address." - } - } - }, - "Residence": { - "id": "Residence", - "type": "object", - "description": "A person's past or current residence.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the residence." - }, - "value": { - "type": "string", - "description": "The address of the residence." - }, - "current": { - "type": "boolean", - "description": "True if the residence is the person's current residence; false if the residence is a past residence." - } - } - }, - "EmailAddress": { - "id": "EmailAddress", - "type": "object", - "description": "A person's email address.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the email address." - }, - "value": { - "type": "string", - "description": "The email address." - }, - "type": { - "type": "string", - "description": "The type of the email address. The type can be custom or predefined. Possible values include, but are not limited to, the following: * `home` * `work` * `other`" - }, - "formattedType": { - "type": "string", - "description": "The read-only type of the email address translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - } - } - }, - "PhoneNumber": { - "id": "PhoneNumber", - "type": "object", - "description": "A person's phone number.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the phone number." - }, - "value": { - "type": "string", - "description": "The phone number." - }, - "canonicalForm": { - "type": "string", - "description": "The read-only canonicalized [ITU-T E.164](https://law.resource.org/pub/us/cfr/ibr/004/itu-t.E.164.1.2008.pdf) form of the phone number." - }, - "type": { - "type": "string", - "description": "The type of the phone number. The type can be custom or predefined. Possible values include, but are not limited to, the following: * `home` * `work` * `mobile` * `homeFax` * `workFax` * `otherFax` * `pager` * `workMobile` * `workPager` * `main` * `googleVoice` * `other`" - }, - "formattedType": { - "type": "string", - "description": "The read-only type of the phone number translated and formatted in the viewer's account locale or the the `Accept-Language` HTTP header locale." - } - } - }, - "ImClient": { - "id": "ImClient", - "type": "object", - "description": "A person's instant messaging client.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the IM client." - }, - "username": { - "type": "string", - "description": "The user name used in the IM client." - }, - "type": { - "type": "string", - "description": "The type of the IM client. The type can be custom or predefined. Possible values include, but are not limited to, the following: * `home` * `work` * `other`" - }, - "formattedType": { - "type": "string", - "description": "The read-only type of the IM client translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - }, - "protocol": { - "type": "string", - "description": "The protocol of the IM client. The protocol can be custom or predefined. Possible values include, but are not limited to, the following: * `aim` * `msn` * `yahoo` * `skype` * `qq` * `googleTalk` * `icq` * `jabber` * `netMeeting`" - }, - "formattedProtocol": { - "type": "string", - "description": "The read-only protocol of the IM client formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - } - } - }, - "Tagline": { - "id": "Tagline", - "type": "object", - "description": "A brief one-line description of the person.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the tagline." - }, - "value": { - "type": "string", - "description": "The tagline." - } - } - }, - "Biography": { - "id": "Biography", - "type": "object", - "description": "A person's short biography.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the biography." - }, - "value": { - "type": "string", - "description": "The short biography." - } - } - }, - "Url": { - "id": "Url", - "type": "object", - "description": "A person's associated URLs.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the URL." - }, - "value": { - "type": "string", - "description": "The URL." - }, - "type": { - "type": "string", - "description": "The type of the URL. The type can be custom or predefined. Possible values include, but are not limited to, the following: * `home` * `work` * `blog` * `profile` * `homePage` * `ftp` * `reservations` * `appInstallPage`: website for a Google+ application. * `other`" - }, - "formattedType": { - "type": "string", - "description": "The read-only type of the URL translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - } - } - }, - "Organization": { - "id": "Organization", - "type": "object", - "description": "A person's past or current organization. Overlapping date ranges are permitted.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the organization." - }, - "type": { - "type": "string", - "description": "The type of the organization. The type can be custom or predefined. Possible values include, but are not limited to, the following: * `work` * `school`" - }, - "formattedType": { - "type": "string", - "description": "The read-only type of the organization translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - }, - "startDate": { - "$ref": "Date", - "description": "The start date when the person joined the organization." - }, - "endDate": { - "$ref": "Date", - "description": "The end date when the person left the organization." - }, - "current": { - "type": "boolean", - "description": "True if the organization is the person's current organization; false if the organization is a past organization." - }, - "name": { - "type": "string", - "description": "The name of the organization." - }, - "phoneticName": { - "type": "string", - "description": "The phonetic name of the organization." - }, - "department": { - "type": "string", - "description": "The person's department at the organization." - }, - "title": { - "type": "string", - "description": "The person's job title at the organization." - }, - "jobDescription": { - "type": "string", - "description": "The person's job description at the organization." - }, - "symbol": { - "type": "string", - "description": "The symbol associated with the organization; for example, a stock ticker symbol, abbreviation, or acronym." - }, - "domain": { - "type": "string", - "description": "The domain name associated with the organization; for example, `google.com`." - }, - "location": { - "type": "string", - "description": "The location of the organization office the person works at." - } - } - }, - "Occupation": { - "id": "Occupation", - "type": "object", - "description": "A person's occupation.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the occupation." - }, - "value": { - "type": "string", - "description": "The occupation; for example, `carpenter`." - } - } - }, - "Interest": { - "id": "Interest", - "type": "object", - "description": "One of the person's interests.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the interest." - }, - "value": { - "type": "string", - "description": "The interest; for example, `stargazing`." - } - } - }, - "Skill": { - "id": "Skill", - "type": "object", - "description": "A skill that the person has.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the skill." - }, - "value": { - "type": "string", - "description": "The skill; for example, `underwater basket weaving`." - } - } - }, - "BraggingRights": { - "id": "BraggingRights", - "type": "object", - "description": "A person's bragging rights.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the bragging rights." - }, - "value": { - "type": "string", - "description": "The bragging rights; for example, `climbed mount everest`." - } - } - }, - "Relation": { - "id": "Relation", - "type": "object", - "description": "A person's relation to another person.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the relation." - }, - "person": { - "type": "string", - "description": "The name of the other person this relation refers to." - }, - "type": { - "type": "string", - "description": "The person's relation to the other person. The type can be custom or predefined. Possible values include, but are not limited to, the following values: * `spouse` * `child` * `mother` * `father` * `parent` * `brother` * `sister` * `friend` * `relative` * `domesticPartner` * `manager` * `assistant` * `referredBy` * `partner`" + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, - "formattedType": { - "type": "string", - "description": "The type of the relation translated and formatted in the viewer's account locale or the locale specified in the Accept-Language HTTP header." - } - } - }, - "RelationshipInterest": { - "id": "RelationshipInterest", - "type": "object", - "description": "The kind of relationship the person is looking for.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the relationship interest." + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, - "value": { - "type": "string", - "description": "The kind of relationship the person is looking for. The value can be custom or predefined. Possible values include, but are not limited to, the following values: * `friend` * `date` * `relationship` * `networking`" + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" }, - "formattedValue": { - "type": "string", - "description": "The value of the relationship interest translated and formatted in the viewer's account locale or the locale specified in the Accept-Language HTTP header." - } - } - }, - "RelationshipStatus": { - "id": "RelationshipStatus", - "type": "object", - "description": "A person's relationship status.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the relationship status." + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" }, - "value": { - "type": "string", - "description": "The relationship status. The value can be custom or predefined. Possible values include, but are not limited to, the following: * `single` * `inARelationship` * `engaged` * `married` * `itsComplicated` * `openRelationship` * `widowed` * `inDomesticPartnership` * `inCivilUnion`" + "quotaUser": { + "type": "string", + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." }, - "formattedValue": { - "type": "string", - "description": "The read-only value of the relationship status translated and formatted in the viewer's account locale or the `Accept-Language` HTTP header locale." - } - } - }, - "Membership": { - "id": "Membership", - "type": "object", - "description": "A person's membership in a group.", - "properties": { - "metadata": { - "$ref": "FieldMetadata", - "description": "Metadata about the membership." + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" }, - "contactGroupMembership": { - "$ref": "ContactGroupMembership", - "description": "The contact group membership." + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" }, - "domainMembership": { - "$ref": "DomainMembership", - "description": "The domain membership." - } - } - }, - "ContactGroupMembership": { - "id": "ContactGroupMembership", - "type": "object", - "description": "A Google contact group membership.", - "properties": { - "contactGroupId": { - "type": "string", - "description": "The contact group ID for the contact group membership. The contact group ID can be custom or predefined. Possible values include, but are not limited to, the following: * `myContacts` * `starred` * A numerical ID for user-created groups." - } - } - }, - "DomainMembership": { - "id": "DomainMembership", - "type": "object", - "description": "A Google Apps Domain membership.", - "properties": { - "inViewerDomain": { - "type": "boolean", - "description": "True if the person is in the viewer's Google Apps domain." - } - } - }, - "GetPeopleResponse": { - "id": "GetPeopleResponse", - "type": "object", - "properties": { - "responses": { - "type": "array", - "description": "The response for each requested resource name.", - "items": { - "$ref": "PersonResponse" - } + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" } - } }, - "PersonResponse": { - "id": "PersonResponse", - "type": "object", - "description": "The response for a single person", - "properties": { - "httpStatusCode": { - "type": "integer", - "description": "[HTTP 1.1 status code](http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html).", - "format": "int32" - }, - "person": { - "$ref": "Person", - "description": "The person." + "schemas": { + "Occupation": { + "description": "A person's occupation.", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the occupation.", + "$ref": "FieldMetadata" + }, + "value": { + "description": "The occupation; for example, `carpenter`.", + "type": "string" + } + }, + "id": "Occupation" + }, + "Person": { + "properties": { + "residences": { + "description": "The person's residences.", + "type": "array", + "items": { + "$ref": "Residence" + } + }, + "genders": { + "description": "The person's genders.", + "type": "array", + "items": { + "$ref": "Gender" + } + }, + "resourceName": { + "description": "The resource name for the person, assigned by the server. An ASCII string\nwith a max length of 27 characters. Always starts with `people/`.", + "type": "string" + }, + "interests": { + "description": "The person's interests.", + "type": "array", + "items": { + "$ref": "Interest" + } + }, + "biographies": { + "description": "The person's biographies.", + "type": "array", + "items": { + "$ref": "Biography" + } + }, + "skills": { + "description": "The person's skills.", + "type": "array", + "items": { + "$ref": "Skill" + } + }, + "relationshipStatuses": { + "description": "The person's relationship statuses.", + "type": "array", + "items": { + "$ref": "RelationshipStatus" + } + }, + "photos": { + "type": "array", + "items": { + "$ref": "Photo" + }, + "description": "The person's photos." + }, + "ageRange": { + "enumDescriptions": [ + "Unspecified.", + "Younger than eighteen.", + "Between eighteen and twenty.", + "Twenty-one and older." + ], + "enum": [ + "AGE_RANGE_UNSPECIFIED", + "LESS_THAN_EIGHTEEN", + "EIGHTEEN_TO_TWENTY", + "TWENTY_ONE_OR_OLDER" + ], + "description": "The person's age range.", + "type": "string" + }, + "taglines": { + "description": "The person's taglines.", + "type": "array", + "items": { + "$ref": "Tagline" + } + }, + "addresses": { + "description": "The person's street addresses.", + "type": "array", + "items": { + "$ref": "Address" + } + }, + "events": { + "description": "The person's events.", + "type": "array", + "items": { + "$ref": "Event" + } + }, + "memberships": { + "description": "The person's group memberships.", + "type": "array", + "items": { + "$ref": "Membership" + } + }, + "phoneNumbers": { + "description": "The person's phone numbers.", + "type": "array", + "items": { + "$ref": "PhoneNumber" + } + }, + "coverPhotos": { + "description": "The person's cover photos.", + "type": "array", + "items": { + "$ref": "CoverPhoto" + } + }, + "imClients": { + "description": "The person's instant messaging clients.", + "type": "array", + "items": { + "$ref": "ImClient" + } + }, + "birthdays": { + "description": "The person's birthdays.", + "type": "array", + "items": { + "$ref": "Birthday" + } + }, + "locales": { + "description": "The person's locale preferences.", + "type": "array", + "items": { + "$ref": "Locale" + } + }, + "relationshipInterests": { + "description": "The kind of relationship the person is looking for.", + "type": "array", + "items": { + "$ref": "RelationshipInterest" + } + }, + "urls": { + "description": "The person's associated URLs.", + "type": "array", + "items": { + "$ref": "Url" + } + }, + "nicknames": { + "description": "The person's nicknames.", + "type": "array", + "items": { + "$ref": "Nickname" + } + }, + "relations": { + "description": "The person's relations.", + "type": "array", + "items": { + "$ref": "Relation" + } + }, + "names": { + "description": "The person's names.", + "type": "array", + "items": { + "$ref": "Name" + } + }, + "occupations": { + "description": "The person's occupations.", + "type": "array", + "items": { + "$ref": "Occupation" + } + }, + "emailAddresses": { + "description": "The person's email addresses.", + "type": "array", + "items": { + "$ref": "EmailAddress" + } + }, + "organizations": { + "description": "The person's past or current organizations.", + "type": "array", + "items": { + "$ref": "Organization" + } + }, + "etag": { + "description": "The [HTTP entity tag](https://en.wikipedia.org/wiki/HTTP_ETag) of the\nresource. Used for web cache validation.", + "type": "string" + }, + "braggingRights": { + "description": "The person's bragging rights.", + "type": "array", + "items": { + "$ref": "BraggingRights" + } + }, + "metadata": { + "$ref": "PersonMetadata", + "description": "Metadata about the person." + } + }, + "id": "Person", + "description": "Information about a person merged from various data sources such as the\nauthenticated user's contacts and profile data. Fields other than IDs,\nmetadata, and group memberships are user-edited.\n\nMost fields can have multiple items. The items in a field have no guaranteed\norder, but each non-empty field is guaranteed to have exactly one field with\n`metadata.primary` set to true.", + "type": "object" + }, + "GetPeopleResponse": { + "id": "GetPeopleResponse", + "type": "object", + "properties": { + "responses": { + "description": "The response for each requested resource name.", + "type": "array", + "items": { + "$ref": "PersonResponse" + } + } + } }, - "requestedResourceName": { - "type": "string", - "description": "The original requested resource name. May be different than the resource name on the returned person. The resource name can change when adding or removing fields that link a contact and profile such as a verified email, verified phone number, or a profile URL." - } - } - }, - "ListConnectionsResponse": { - "id": "ListConnectionsResponse", - "type": "object", - "properties": { - "connections": { - "type": "array", - "description": "The list of people that the requestor is connected to.", - "items": { - "$ref": "Person" - } + "PhoneNumber": { + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the phone number.", + "$ref": "FieldMetadata" + }, + "type": { + "description": "The type of the phone number. The type can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `home`\n* `work`\n* `mobile`\n* `homeFax`\n* `workFax`\n* `otherFax`\n* `pager`\n* `workMobile`\n* `workPager`\n* `main`\n* `googleVoice`\n* `other`", + "type": "string" + }, + "value": { + "description": "The phone number.", + "type": "string" + }, + "formattedType": { + "type": "string", + "description": "The read-only type of the phone number translated and formatted in the\nviewer's account locale or the the `Accept-Language` HTTP header locale." + }, + "canonicalForm": { + "type": "string", + "description": "The read-only canonicalized [ITU-T E.164](https://law.resource.org/pub/us/cfr/ibr/004/itu-t.E.164.1.2008.pdf)\nform of the phone number." + } + }, + "id": "PhoneNumber", + "description": "A person's phone number." + }, + "Photo": { + "description": "A person's read-only photo. A picture shown next to the person's name to\nhelp others recognize the person.", + "type": "object", + "properties": { + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the photo." + }, + "url": { + "description": "The URL of the photo.", + "type": "string" + } + }, + "id": "Photo" }, - "nextPageToken": { - "type": "string", - "description": "The token that can be used to retrieve the next page of results." + "ListConnectionsResponse": { + "properties": { + "nextPageToken": { + "description": "The token that can be used to retrieve the next page of results.", + "type": "string" + }, + "connections": { + "description": "The list of people that the requestor is connected to.", + "type": "array", + "items": { + "$ref": "Person" + } + }, + "nextSyncToken": { + "description": "The token that can be used to retrieve changes since the last request.", + "type": "string" + } + }, + "id": "ListConnectionsResponse", + "type": "object" + }, + "Birthday": { + "description": "A person's birthday. At least one of the `date` and `text` fields are\nspecified. The `date` and `text` fields typically represent the same\ndate, but are not guaranteed to.", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the birthday.", + "$ref": "FieldMetadata" + }, + "text": { + "description": "A free-form string representing the user's birthday.", + "type": "string" + }, + "date": { + "$ref": "Date", + "description": "The date of the birthday." + } + }, + "id": "Birthday" + }, + "Address": { + "id": "Address", + "description": "A person's physical address. May be a P.O. box or street address. All fields\nare optional.", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the address.", + "$ref": "FieldMetadata" + }, + "countryCode": { + "description": "The [ISO 3166-1 alpha-2](http://www.iso.org/iso/country_codes.htm) country\ncode of the address.", + "type": "string" + }, + "formattedType": { + "description": "The read-only type of the address translated and formatted in the viewer's\naccount locale or the `Accept-Language` HTTP header locale.", + "type": "string" + }, + "city": { + "description": "The city of the address.", + "type": "string" + }, + "formattedValue": { + "description": "The unstructured value of the address. If this is not set by the user it\nwill be automatically constructed from structured values.", + "type": "string" + }, + "country": { + "description": "The country of the address.", + "type": "string" + }, + "type": { + "description": "The type of the address. The type can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `home`\n* `work`\n* `other`", + "type": "string" + }, + "extendedAddress": { + "description": "The extended address of the address; for example, the apartment number.", + "type": "string" + }, + "poBox": { + "description": "The P.O. box of the address.", + "type": "string" + }, + "postalCode": { + "description": "The postal code of the address.", + "type": "string" + }, + "region": { + "description": "The region of the address; for example, the state or province.", + "type": "string" + }, + "streetAddress": { + "description": "The street address.", + "type": "string" + } + } }, - "nextSyncToken": { - "type": "string", - "description": "The token that can be used to retrieve changes since the last request." - } - } - } - }, - "resources": { - "people": { - "methods": { - "get": { - "id": "people.people.get", - "path": "v1/{+resourceName}", - "httpMethod": "GET", - "description": "Provides information about a person resource for a resource name. Use `people/me` to indicate the authenticated user.", - "parameters": { - "resourceName": { - "type": "string", - "description": "The resource name of the person to provide information about. - To get information about the authenticated user, specify `people/me`. - To get information about any user, specify the resource name that identifies the user, such as the resource names returned by [`people.connections.list`](/people/api/rest/v1/people.connections/list).", - "required": true, - "pattern": "^people/[^/]*$", - "location": "path" + "Residence": { + "description": "A person's past or current residence.", + "type": "object", + "properties": { + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the residence." + }, + "current": { + "description": "True if the residence is the person's current residence;\nfalse if the residence is a past residence.", + "type": "boolean" + }, + "value": { + "description": "The address of the residence.", + "type": "string" + } }, - "requestMask.includeField": { - "type": "string", - "description": "Comma-separated list of fields to be included in the response. Omitting this field will include all fields. Each path should start with `person.`: for example, `person.names` or `person.photos`.", - "location": "query" + "id": "Residence" + }, + "ContactGroupMembership": { + "id": "ContactGroupMembership", + "description": "A Google contact group membership.", + "type": "object", + "properties": { + "contactGroupId": { + "description": "The contact group ID for the contact group membership. The contact group\nID can be custom or predefined. Possible values include, but are not\nlimited to, the following:\n\n* `myContacts`\n* `starred`\n* A numerical ID for user-created groups.", + "type": "string" + } } - }, - "parameterOrder": [ - "resourceName" - ], - "response": { - "$ref": "Person" - }, - "scopes": [ - "https://www.googleapis.com/auth/contacts", - "https://www.googleapis.com/auth/contacts.readonly", - "https://www.googleapis.com/auth/plus.login", - "https://www.googleapis.com/auth/user.addresses.read", - "https://www.googleapis.com/auth/user.birthday.read", - "https://www.googleapis.com/auth/user.emails.read", - "https://www.googleapis.com/auth/user.phonenumbers.read", - "https://www.googleapis.com/auth/userinfo.email", - "https://www.googleapis.com/auth/userinfo.profile" - ] }, - "getBatchGet": { - "id": "people.people.getBatchGet", - "path": "v1/people:batchGet", - "httpMethod": "GET", - "description": "Provides information about a list of specific people by specifying a list of requested resource names. Use `people/me` to indicate the authenticated user.", - "parameters": { - "resourceNames": { - "type": "string", - "description": "The resource name, such as one returned by [`people.connections.list`](/people/api/rest/v1/people.connections/list), of one of the people to provide information about. You can include this parameter up to 50 times in one request.", - "repeated": true, - "location": "query" + "PersonMetadata": { + "description": "The read-only metadata about a person.", + "type": "object", + "properties": { + "linkedPeopleResourceNames": { + "description": "Resource names of people linked to this resource.", + "type": "array", + "items": { + "type": "string" + } + }, + "sources": { + "type": "array", + "items": { + "$ref": "Source" + }, + "description": "The sources of data for the person." + }, + "previousResourceNames": { + "description": "Any former resource names this person has had. Populated only for\n[`connections.list`](/people/api/rest/v1/people.connections/list) requests\nthat include a sync token.\n\nThe resource name may change when adding or removing fields that link a\ncontact and profile such as a verified email, verified phone number, or\nprofile URL.", + "type": "array", + "items": { + "type": "string" + } + }, + "deleted": { + "description": "True if the person resource has been deleted. Populated only for\n[`connections.list`](/people/api/rest/v1/people.connections/list) requests\nthat include a sync token.", + "type": "boolean" + }, + "objectType": { + "enumDescriptions": [ + "Unspecified.", + "Person.", + "[Google+ Page.](http://www.google.com/+/brands/)" + ], + "enum": [ + "OBJECT_TYPE_UNSPECIFIED", + "PERSON", + "PAGE" + ], + "description": "The type of the person object.", + "type": "string" + } + }, + "id": "PersonMetadata" + }, + "Event": { + "description": "An event related to the person.", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the event.", + "$ref": "FieldMetadata" + }, + "type": { + "type": "string", + "description": "The type of the event. The type can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `anniversary`\n* `other`" + }, + "date": { + "$ref": "Date", + "description": "The date of the event." + }, + "formattedType": { + "description": "The read-only type of the event translated and formatted in the\nviewer's account locale or the `Accept-Language` HTTP header locale.", + "type": "string" + } + }, + "id": "Event" + }, + "Url": { + "type": "object", + "properties": { + "value": { + "description": "The URL.", + "type": "string" + }, + "formattedType": { + "type": "string", + "description": "The read-only type of the URL translated and formatted in the viewer's\naccount locale or the `Accept-Language` HTTP header locale." + }, + "metadata": { + "description": "Metadata about the URL.", + "$ref": "FieldMetadata" + }, + "type": { + "description": "The type of the URL. The type can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `home`\n* `work`\n* `blog`\n* `profile`\n* `homePage`\n* `ftp`\n* `reservations`\n* `appInstallPage`: website for a Google+ application.\n* `other`", + "type": "string" + } + }, + "id": "Url", + "description": "A person's associated URLs." + }, + "Gender": { + "description": "A person's gender.", + "type": "object", + "properties": { + "formattedValue": { + "description": "The read-only value of the gender translated and formatted in the viewer's\naccount locale or the `Accept-Language` HTTP header locale.", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the gender." + }, + "value": { + "description": "The gender for the person. The gender can be custom or predefined.\nPossible values include, but are not limited to, the\nfollowing:\n\n* `male`\n* `female`\n* `other`\n* `unknown`", + "type": "string" + } + }, + "id": "Gender" + }, + "CoverPhoto": { + "description": "A person's read-only cover photo. A large image shown on the person's\nprofile page that represents who they are or what they care about.", + "type": "object", + "properties": { + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the cover photo." + }, + "default": { + "description": "True if the cover photo is the default cover photo;\nfalse if the cover photo is a user-provided cover photo.", + "type": "boolean" + }, + "url": { + "description": "The URL of the cover photo.", + "type": "string" + } + }, + "id": "CoverPhoto" + }, + "ImClient": { + "description": "A person's instant messaging client.", + "type": "object", + "properties": { + "protocol": { + "description": "The protocol of the IM client. The protocol can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `aim`\n* `msn`\n* `yahoo`\n* `skype`\n* `qq`\n* `googleTalk`\n* `icq`\n* `jabber`\n* `netMeeting`", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the IM client." + }, + "type": { + "type": "string", + "description": "The type of the IM client. The type can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `home`\n* `work`\n* `other`" + }, + "username": { + "description": "The user name used in the IM client.", + "type": "string" + }, + "formattedProtocol": { + "description": "The read-only protocol of the IM client formatted in the viewer's account\nlocale or the `Accept-Language` HTTP header locale.", + "type": "string" + }, + "formattedType": { + "type": "string", + "description": "The read-only type of the IM client translated and formatted in the\nviewer's account locale or the `Accept-Language` HTTP header locale." + } + }, + "id": "ImClient" + }, + "Interest": { + "type": "object", + "properties": { + "value": { + "description": "The interest; for example, `stargazing`.", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the interest." + } + }, + "id": "Interest", + "description": "One of the person's interests." + }, + "Nickname": { + "description": "A person's nickname.", + "type": "object", + "properties": { + "value": { + "description": "The nickname.", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the nickname." + }, + "type": { + "enumDescriptions": [ + "Generic nickname.", + "Maiden name or birth family name. Used when the person's family name has\nchanged as a result of marriage.", + "Initials.", + "Google+ profile nickname.", + "A professional affiliation or other name; for example, `Dr. Smith.`" + ], + "enum": [ + "DEFAULT", + "MAIDEN_NAME", + "INITIALS", + "GPLUS", + "OTHER_NAME" + ], + "description": "The type of the nickname.", + "type": "string" + } + }, + "id": "Nickname" + }, + "EmailAddress": { + "description": "A person's email address.", + "type": "object", + "properties": { + "displayName": { + "description": "The display name of the email.", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the email address." + }, + "type": { + "type": "string", + "description": "The type of the email address. The type can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `home`\n* `work`\n* `other`" + }, + "value": { + "description": "The email address.", + "type": "string" + }, + "formattedType": { + "description": "The read-only type of the email address translated and formatted in the\nviewer's account locale or the `Accept-Language` HTTP header locale.", + "type": "string" + } + }, + "id": "EmailAddress" + }, + "Skill": { + "description": "A skill that the person has.", + "type": "object", + "properties": { + "value": { + "description": "The skill; for example, `underwater basket weaving`.", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the skill." + } + }, + "id": "Skill" + }, + "DomainMembership": { + "description": "A Google Apps Domain membership.", + "type": "object", + "properties": { + "inViewerDomain": { + "description": "True if the person is in the viewer's Google Apps domain.", + "type": "boolean" + } + }, + "id": "DomainMembership" + }, + "Membership": { + "description": "A person's read-only membership in a group.", + "type": "object", + "properties": { + "contactGroupMembership": { + "$ref": "ContactGroupMembership", + "description": "The contact group membership." + }, + "domainMembership": { + "$ref": "DomainMembership", + "description": "The domain membership." + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the membership." + } + }, + "id": "Membership" + }, + "RelationshipStatus": { + "description": "A person's read-only relationship status.", + "type": "object", + "properties": { + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the relationship status." + }, + "value": { + "description": "The relationship status. The value can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `single`\n* `inARelationship`\n* `engaged`\n* `married`\n* `itsComplicated`\n* `openRelationship`\n* `widowed`\n* `inDomesticPartnership`\n* `inCivilUnion`", + "type": "string" + }, + "formattedValue": { + "type": "string", + "description": "The read-only value of the relationship status translated and formatted in\nthe viewer's account locale or the `Accept-Language` HTTP header locale." + } + }, + "id": "RelationshipStatus" + }, + "Date": { + "description": "Represents a whole calendar date, for example a date of birth. The time\nof day and time zone are either specified elsewhere or are not\nsignificant. The date is relative to the\n[Proleptic Gregorian Calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar).\nThe day may be 0 to represent a year and month where the day is not\nsignificant. The year may be 0 to represent a month and day independent\nof year; for example, anniversary date.", + "type": "object", + "properties": { + "year": { + "description": "Year of date. Must be from 1 to 9999, or 0 if specifying a date without\na year.", + "format": "int32", + "type": "integer" + }, + "day": { + "description": "Day of month. Must be from 1 to 31 and valid for the year and month, or 0\nif specifying a year/month where the day is not significant.", + "format": "int32", + "type": "integer" + }, + "month": { + "type": "integer", + "description": "Month of year. Must be from 1 to 12.", + "format": "int32" + } + }, + "id": "Date" + }, + "Tagline": { + "description": "A read-only brief one-line description of the person.", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the tagline.", + "$ref": "FieldMetadata" + }, + "value": { + "type": "string", + "description": "The tagline." + } + }, + "id": "Tagline" + }, + "Name": { + "description": "A person's name. If the name is a mononym, the family name is empty.", + "type": "object", + "properties": { + "phoneticMiddleName": { + "description": "The middle name(s) spelled as they sound.", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the name." + }, + "phoneticFullName": { + "description": "The full name spelled as it sounds.", + "type": "string" + }, + "displayNameLastFirst": { + "description": "The read-only display name with the last name first formatted according to\nthe locale specified by the viewer's account or the\n\u003ccode\u003eAccept-Language\u003c/code\u003e HTTP header.", + "type": "string" + }, + "displayName": { + "description": "The read-only display name formatted according to the locale specified by\nthe viewer's account or the \u003ccode\u003eAccept-Language\u003c/code\u003e HTTP header.", + "type": "string" + }, + "honorificSuffix": { + "description": "The honorific suffixes, such as `Jr.`", + "type": "string" + }, + "honorificPrefix": { + "type": "string", + "description": "The honorific prefixes, such as `Mrs.` or `Dr.`" + }, + "phoneticHonorificSuffix": { + "description": "The honorific suffixes spelled as they sound.", + "type": "string" + }, + "givenName": { + "type": "string", + "description": "The given name." + }, + "middleName": { + "description": "The middle name(s).", + "type": "string" + }, + "phoneticHonorificPrefix": { + "description": "The honorific prefixes spelled as they sound.", + "type": "string" + }, + "phoneticGivenName": { + "description": "The given name spelled as it sounds.", + "type": "string" + }, + "phoneticFamilyName": { + "description": "The family name spelled as it sounds.", + "type": "string" + }, + "familyName": { + "description": "The family name.", + "type": "string" + } + }, + "id": "Name" + }, + "BraggingRights": { + "description": "A person's bragging rights.", + "type": "object", + "properties": { + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the bragging rights." + }, + "value": { + "description": "The bragging rights; for example, `climbed mount everest`.", + "type": "string" + } }, - "requestMask.includeField": { - "type": "string", - "description": "Comma-separated list of fields to be included in the response. Omitting this field will include all fields. Each path should start with `person.`: for example, `person.names` or `person.photos`.", - "location": "query" + "id": "BraggingRights" + }, + "Locale": { + "id": "Locale", + "description": "A person's locale preference.", + "type": "object", + "properties": { + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the locale." + }, + "value": { + "type": "string", + "description": "The well-formed [IETF BCP 47](https://tools.ietf.org/html/bcp47)\nlanguage tag representing the locale." + } } - }, - "response": { - "$ref": "GetPeopleResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/contacts", - "https://www.googleapis.com/auth/contacts.readonly", - "https://www.googleapis.com/auth/plus.login", - "https://www.googleapis.com/auth/user.addresses.read", - "https://www.googleapis.com/auth/user.birthday.read", - "https://www.googleapis.com/auth/user.emails.read", - "https://www.googleapis.com/auth/user.phonenumbers.read", - "https://www.googleapis.com/auth/userinfo.email", - "https://www.googleapis.com/auth/userinfo.profile" - ] - } - }, - "resources": { - "connections": { - "methods": { - "list": { - "id": "people.people.connections.list", - "path": "v1/{+resourceName}/connections", - "httpMethod": "GET", - "description": "Provides a list of the authenticated user's contacts merged with any linked profiles.", - "parameters": { + }, + "Organization": { + "type": "object", + "properties": { + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the organization." + }, + "title": { + "description": "The person's job title at the organization.", + "type": "string" + }, + "location": { + "description": "The location of the organization office the person works at.", + "type": "string" + }, + "current": { + "description": "True if the organization is the person's current organization;\nfalse if the organization is a past organization.", + "type": "boolean" + }, + "startDate": { + "$ref": "Date", + "description": "The start date when the person joined the organization." + }, + "formattedType": { + "description": "The read-only type of the organization translated and formatted in the\nviewer's account locale or the `Accept-Language` HTTP header locale.", + "type": "string" + }, + "domain": { + "description": "The domain name associated with the organization; for example, `google.com`.", + "type": "string" + }, + "department": { + "type": "string", + "description": "The person's department at the organization." + }, + "type": { + "description": "The type of the organization. The type can be custom or predefined.\nPossible values include, but are not limited to, the following:\n\n* `work`\n* `school`", + "type": "string" + }, + "phoneticName": { + "description": "The phonetic name of the organization.", + "type": "string" + }, + "jobDescription": { + "description": "The person's job description at the organization.", + "type": "string" + }, + "endDate": { + "description": "The end date when the person left the organization.", + "$ref": "Date" + }, + "symbol": { + "description": "The symbol associated with the organization; for example, a stock ticker\nsymbol, abbreviation, or acronym.", + "type": "string" + }, + "name": { + "description": "The name of the organization.", + "type": "string" + } + }, + "id": "Organization", + "description": "A person's past or current organization. Overlapping date ranges are\npermitted." + }, + "Biography": { + "description": "A person's short biography.", + "type": "object", + "properties": { + "contentType": { + "enumDescriptions": [ + "Unspecified.", + "Plain text.", + "HTML text." + ], + "enum": [ + "CONTENT_TYPE_UNSPECIFIED", + "TEXT_PLAIN", + "TEXT_HTML" + ], + "description": "The content type of the biography.", + "type": "string" + }, + "metadata": { + "$ref": "FieldMetadata", + "description": "Metadata about the biography." + }, + "value": { + "description": "The short biography.", + "type": "string" + } + }, + "id": "Biography" + }, + "FieldMetadata": { + "description": "Metadata about a field.", + "type": "object", + "properties": { + "verified": { + "description": "True if the field is verified; false if the field is unverified. A\nverified field is typically a name, email address, phone number, or\nwebsite that has been confirmed to be owned by the person.", + "type": "boolean" + }, + "primary": { + "description": "True if the field is the primary field; false if the field is a secondary\nfield.", + "type": "boolean" + }, + "source": { + "description": "The source of the field.", + "$ref": "Source" + } + }, + "id": "FieldMetadata" + }, + "Source": { + "properties": { + "type": { + "enumDescriptions": [ + "Unspecified.", + "[Google Account](https://accounts.google.com).", + "[Google profile](https://profiles.google.com). You can view the\nprofile at https://profiles.google.com/\u003cid\u003e where \u003cid\u003e is the source\nid.", + "[Google Apps domain profile](https://admin.google.com).", + "[Google contact](https://contacts.google.com). You can view the\ncontact at https://contact.google.com/\u003cid\u003e where \u003cid\u003e is the source\nid." + ], + "enum": [ + "SOURCE_TYPE_UNSPECIFIED", + "ACCOUNT", + "PROFILE", + "DOMAIN_PROFILE", + "CONTACT" + ], + "description": "The source type.", + "type": "string" + }, + "etag": { + "description": "The [HTTP entity tag](https://en.wikipedia.org/wiki/HTTP_ETag) of the\nsource. Used for web cache validation. Only populated in\nperson.metadata.sources.", + "type": "string" + }, + "id": { + "description": "The unique identifier within the source type generated by the server.", + "type": "string" + }, "resourceName": { - "type": "string", - "description": "The resource name to return connections for. Only `people/me` is valid.", - "required": true, - "pattern": "^people/[^/]*$", - "location": "path" - }, - "pageToken": { - "type": "string", - "description": "The token of the page to be returned.", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The number of connections to include in the response. Valid values are between 1 and 500, inclusive. Defaults to 100.", - "format": "int32", - "location": "query" - }, - "sortOrder": { - "type": "string", - "description": "The order in which the connections should be sorted. Defaults to `LAST_MODIFIED_ASCENDING`.", - "enum": [ - "LAST_MODIFIED_ASCENDING", - "FIRST_NAME_ASCENDING", - "LAST_NAME_ASCENDING" - ], - "location": "query" - }, - "syncToken": { - "type": "string", - "description": "A sync token, returned by a previous call to `people.connections.list`. Only resources changed since the sync token was created are returned.", - "location": "query" - }, - "requestMask.includeField": { - "type": "string", - "description": "Comma-separated list of fields to be included in the response. Omitting this field will include all fields. Each path should start with `person.`: for example, `person.names` or `person.photos`.", - "location": "query" + "description": "The resource name of the source. Only set if there is a separate\nresource endpoint.", + "type": "string" + } + }, + "id": "Source", + "description": "The source of a field.", + "type": "object" + }, + "RelationshipInterest": { + "description": "A person's read-only relationship interest .", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the relationship interest.", + "$ref": "FieldMetadata" + }, + "value": { + "description": "The kind of relationship the person is looking for. The value can be custom\nor predefined. Possible values include, but are not limited to, the\nfollowing values:\n\n* `friend`\n* `date`\n* `relationship`\n* `networking`", + "type": "string" + }, + "formattedValue": { + "description": "The value of the relationship interest translated and formatted in the\nviewer's account locale or the locale specified in the Accept-Language\nHTTP header.", + "type": "string" + } + }, + "id": "RelationshipInterest" + }, + "PersonResponse": { + "description": "The response for a single person", + "type": "object", + "properties": { + "person": { + "description": "The person.", + "$ref": "Person" + }, + "httpStatusCode": { + "description": "[HTTP 1.1 status code](http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html).", + "format": "int32", + "type": "integer" + }, + "requestedResourceName": { + "type": "string", + "description": "The original requested resource name. May be different than the resource\nname on the returned person.\n\nThe resource name can change when adding or removing fields that link a\ncontact and profile such as a verified email, verified phone number, or a\nprofile URL." + } + }, + "id": "PersonResponse" + }, + "Relation": { + "description": "A person's relation to another person.", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the relation.", + "$ref": "FieldMetadata" + }, + "type": { + "description": "The person's relation to the other person. The type can be custom or predefined.\nPossible values include, but are not limited to, the following values:\n\n* `spouse`\n* `child`\n* `mother`\n* `father`\n* `parent`\n* `brother`\n* `sister`\n* `friend`\n* `relative`\n* `domesticPartner`\n* `manager`\n* `assistant`\n* `referredBy`\n* `partner`", + "type": "string" + }, + "person": { + "description": "The name of the other person this relation refers to.", + "type": "string" + }, + "formattedType": { + "description": "The type of the relation translated and formatted in the viewer's account\nlocale or the locale specified in the Accept-Language HTTP header.", + "type": "string" + } + }, + "id": "Relation" + } + }, + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "protocol": "rest", + "version": "v1", + "baseUrl": "https://people.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/userinfo.profile": { + "description": "View your basic profile info" + }, + "https://www.googleapis.com/auth/user.emails.read": { + "description": "View your email addresses" + }, + "https://www.googleapis.com/auth/contacts": { + "description": "Manage your contacts" + }, + "https://www.googleapis.com/auth/user.addresses.read": { + "description": "View your street addresses" + }, + "https://www.googleapis.com/auth/userinfo.email": { + "description": "View your email address" + }, + "https://www.googleapis.com/auth/user.phonenumbers.read": { + "description": "View your phone numbers" + }, + "https://www.googleapis.com/auth/user.birthday.read": { + "description": "View your complete date of birth" + }, + "https://www.googleapis.com/auth/contacts.readonly": { + "description": "View your contacts" + }, + "https://www.googleapis.com/auth/plus.login": { + "description": "Know the list of people in your circles, your age range, and language" } - }, - "parameterOrder": [ - "resourceName" - ], - "response": { - "$ref": "ListConnectionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/contacts", - "https://www.googleapis.com/auth/contacts.readonly" - ] } - } } - } - } - } + }, + "kind": "discovery#restDescription", + "servicePath": "", + "description": "Provides access to information about profiles and contacts.", + "rootUrl": "https://people.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "people" } diff --git a/vendor/google.golang.org/api/people/v1/people-gen.go b/vendor/google.golang.org/api/people/v1/people-gen.go index 1a0d8a824..e5f71434f 100644 --- a/vendor/google.golang.org/api/people/v1/people-gen.go +++ b/vendor/google.golang.org/api/people/v1/people-gen.go @@ -53,7 +53,7 @@ const ( // View your contacts ContactsReadonlyScope = "https://www.googleapis.com/auth/contacts.readonly" - // Know your basic profile info and list of people in your circles. + // Know the list of people in your circles, your age range, and language PlusLoginScope = "https://www.googleapis.com/auth/plus.login" // View your street addresses @@ -85,9 +85,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only People *PeopleService } @@ -99,6 +100,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewPeopleService(s *Service) *PeopleService { rs := &PeopleService{s: s} rs.Connections = NewPeopleConnectionsService(s) @@ -121,7 +126,8 @@ type PeopleConnectionsService struct { } // Address: A person's physical address. May be a P.O. box or street -// address. All fields are optional. +// address. All fields +// are optional. type Address struct { // City: The city of the address. City string `json:"city,omitempty"` @@ -130,8 +136,8 @@ type Address struct { Country string `json:"country,omitempty"` // CountryCode: The [ISO 3166-1 - // alpha-2](http://www.iso.org/iso/country_codes.htm) country code of - // the address. + // alpha-2](http://www.iso.org/iso/country_codes.htm) country + // code of the address. CountryCode string `json:"countryCode,omitempty"` // ExtendedAddress: The extended address of the address; for example, @@ -139,12 +145,13 @@ type Address struct { ExtendedAddress string `json:"extendedAddress,omitempty"` // FormattedType: The read-only type of the address translated and - // formatted in the viewer's account locale or the `Accept-Language` - // HTTP header locale. + // formatted in the viewer's + // account locale or the `Accept-Language` HTTP header locale. FormattedType string `json:"formattedType,omitempty"` - // FormattedValue: The read-only value of the address formatted in the - // viewer's account locale or the `Accept-Language` HTTP header locale. + // FormattedValue: The unstructured value of the address. If this is not + // set by the user it + // will be automatically constructed from structured values. FormattedValue string `json:"formattedValue,omitempty"` // Metadata: Metadata about the address. @@ -163,9 +170,13 @@ type Address struct { // StreetAddress: The street address. StreetAddress string `json:"streetAddress,omitempty"` - // Type: The type of the address. The type can be custom or predefined. - // Possible values include, but are not limited to, the following: * - // `home` * `work` * `other` + // Type: The type of the address. The type can be custom or + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `home` + // * `work` + // * `other` Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "City") to @@ -193,13 +204,21 @@ func (s *Address) MarshalJSON() ([]byte, error) { // Biography: A person's short biography. type Biography struct { + // ContentType: The content type of the biography. + // + // Possible values: + // "CONTENT_TYPE_UNSPECIFIED" - Unspecified. + // "TEXT_PLAIN" - Plain text. + // "TEXT_HTML" - HTML text. + ContentType string `json:"contentType,omitempty"` + // Metadata: Metadata about the biography. Metadata *FieldMetadata `json:"metadata,omitempty"` // Value: The short biography. Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Metadata") to + // ForceSendFields is a list of field names (e.g. "ContentType") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -207,10 +226,10 @@ type Biography struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Metadata") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ContentType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -223,8 +242,10 @@ func (s *Biography) MarshalJSON() ([]byte, error) { } // Birthday: A person's birthday. At least one of the `date` and `text` -// fields are specified. The `date` and `text` fields typically -// represent the same date, but are not guaranteed to. +// fields are +// specified. The `date` and `text` fields typically represent the +// same +// date, but are not guaranteed to. type Birthday struct { // Date: The date of the birthday. Date *Date `json:"date,omitempty"` @@ -292,9 +313,14 @@ func (s *BraggingRights) MarshalJSON() ([]byte, error) { // ContactGroupMembership: A Google contact group membership. type ContactGroupMembership struct { // ContactGroupId: The contact group ID for the contact group - // membership. The contact group ID can be custom or predefined. - // Possible values include, but are not limited to, the following: * - // `myContacts` * `starred` * A numerical ID for user-created groups. + // membership. The contact group + // ID can be custom or predefined. Possible values include, but are + // not + // limited to, the following: + // + // * `myContacts` + // * `starred` + // * A numerical ID for user-created groups. ContactGroupId string `json:"contactGroupId,omitempty"` // ForceSendFields is a list of field names (e.g. "ContactGroupId") to @@ -321,12 +347,12 @@ func (s *ContactGroupMembership) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CoverPhoto: A person's cover photo. A large image shown on the -// person's profile page that represents who they are or what they care -// about. +// CoverPhoto: A person's read-only cover photo. A large image shown on +// the person's +// profile page that represents who they are or what they care about. type CoverPhoto struct { - // Default: True if the cover photo is the default cover photo; false if - // the cover photo is a user-provided cover photo. + // Default: True if the cover photo is the default cover photo; + // false if the cover photo is a user-provided cover photo. Default bool `json:"default,omitempty"` // Metadata: Metadata about the cover photo. @@ -359,23 +385,30 @@ func (s *CoverPhoto) MarshalJSON() ([]byte, error) { } // Date: Represents a whole calendar date, for example a date of birth. -// The time of day and time zone are either specified elsewhere or are -// not significant. The date is relative to the [Proleptic Gregorian +// The time +// of day and time zone are either specified elsewhere or are +// not +// significant. The date is relative to the +// [Proleptic Gregorian // Calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar). -// The day may be 0 to represent a year and month where the day is not +// +// The day may be 0 to represent a year and month where the day is +// not // significant. The year may be 0 to represent a month and day -// independent of year; for example, anniversary date. +// independent +// of year; for example, anniversary date. type Date struct { // Day: Day of month. Must be from 1 to 31 and valid for the year and - // month, or 0 if specifying a year/month where the day is not - // significant. + // month, or 0 + // if specifying a year/month where the day is not significant. Day int64 `json:"day,omitempty"` // Month: Month of year. Must be from 1 to 12. Month int64 `json:"month,omitempty"` // Year: Year of date. Must be from 1 to 9999, or 0 if specifying a date - // without a year. + // without + // a year. Year int64 `json:"year,omitempty"` // ForceSendFields is a list of field names (e.g. "Day") to @@ -433,23 +466,30 @@ func (s *DomainMembership) MarshalJSON() ([]byte, error) { // EmailAddress: A person's email address. type EmailAddress struct { + // DisplayName: The display name of the email. + DisplayName string `json:"displayName,omitempty"` + // FormattedType: The read-only type of the email address translated and - // formatted in the viewer's account locale or the `Accept-Language` - // HTTP header locale. + // formatted in the + // viewer's account locale or the `Accept-Language` HTTP header locale. FormattedType string `json:"formattedType,omitempty"` // Metadata: Metadata about the email address. Metadata *FieldMetadata `json:"metadata,omitempty"` // Type: The type of the email address. The type can be custom or - // predefined. Possible values include, but are not limited to, the - // following: * `home` * `work` * `other` + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `home` + // * `work` + // * `other` Type string `json:"type,omitempty"` // Value: The email address. Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "FormattedType") to + // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -457,7 +497,7 @@ type EmailAddress struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "FormattedType") to include + // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -478,16 +518,19 @@ type Event struct { Date *Date `json:"date,omitempty"` // FormattedType: The read-only type of the event translated and - // formatted in the viewer's account locale or the `Accept-Language` - // HTTP header locale. + // formatted in the + // viewer's account locale or the `Accept-Language` HTTP header locale. FormattedType string `json:"formattedType,omitempty"` // Metadata: Metadata about the event. Metadata *FieldMetadata `json:"metadata,omitempty"` - // Type: The type of the event. The type can be custom or predefined. - // Possible values include, but are not limited to, the following: * - // `anniversary` * `other` + // Type: The type of the event. The type can be custom or + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `anniversary` + // * `other` Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Date") to @@ -516,16 +559,18 @@ func (s *Event) MarshalJSON() ([]byte, error) { // FieldMetadata: Metadata about a field. type FieldMetadata struct { // Primary: True if the field is the primary field; false if the field - // is a secondary field. + // is a secondary + // field. Primary bool `json:"primary,omitempty"` // Source: The source of the field. Source *Source `json:"source,omitempty"` // Verified: True if the field is verified; false if the field is - // unverified. A verified field is typically a name, email address, - // phone number, or website that has been confirmed to be owned by the - // person. + // unverified. A + // verified field is typically a name, email address, phone number, + // or + // website that has been confirmed to be owned by the person. Verified bool `json:"verified,omitempty"` // ForceSendFields is a list of field names (e.g. "Primary") to @@ -554,16 +599,22 @@ func (s *FieldMetadata) MarshalJSON() ([]byte, error) { // Gender: A person's gender. type Gender struct { // FormattedValue: The read-only value of the gender translated and - // formatted in the viewer's account locale or the `Accept-Language` - // HTTP header locale. + // formatted in the viewer's + // account locale or the `Accept-Language` HTTP header locale. FormattedValue string `json:"formattedValue,omitempty"` // Metadata: Metadata about the gender. Metadata *FieldMetadata `json:"metadata,omitempty"` // Value: The gender for the person. The gender can be custom or - // predefined. Possible values include, but are not limited to, the - // following: * `male` * `female` * `other` * `unknown` + // predefined. + // Possible values include, but are not limited to, the + // following: + // + // * `male` + // * `female` + // * `other` + // * `unknown` Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "FormattedValue") to @@ -624,27 +675,40 @@ func (s *GetPeopleResponse) MarshalJSON() ([]byte, error) { // ImClient: A person's instant messaging client. type ImClient struct { // FormattedProtocol: The read-only protocol of the IM client formatted - // in the viewer's account locale or the `Accept-Language` HTTP header - // locale. + // in the viewer's account + // locale or the `Accept-Language` HTTP header locale. FormattedProtocol string `json:"formattedProtocol,omitempty"` // FormattedType: The read-only type of the IM client translated and - // formatted in the viewer's account locale or the `Accept-Language` - // HTTP header locale. + // formatted in the + // viewer's account locale or the `Accept-Language` HTTP header locale. FormattedType string `json:"formattedType,omitempty"` // Metadata: Metadata about the IM client. Metadata *FieldMetadata `json:"metadata,omitempty"` // Protocol: The protocol of the IM client. The protocol can be custom - // or predefined. Possible values include, but are not limited to, the - // following: * `aim` * `msn` * `yahoo` * `skype` * `qq` * `googleTalk` - // * `icq` * `jabber` * `netMeeting` + // or predefined. + // Possible values include, but are not limited to, the following: + // + // * `aim` + // * `msn` + // * `yahoo` + // * `skype` + // * `qq` + // * `googleTalk` + // * `icq` + // * `jabber` + // * `netMeeting` Protocol string `json:"protocol,omitempty"` // Type: The type of the IM client. The type can be custom or - // predefined. Possible values include, but are not limited to, the - // following: * `home` * `work` * `other` + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `home` + // * `work` + // * `other` Type string `json:"type,omitempty"` // Username: The user name used in the IM client. @@ -750,8 +814,8 @@ type Locale struct { Metadata *FieldMetadata `json:"metadata,omitempty"` // Value: The well-formed [IETF BCP - // 47](https://tools.ietf.org/html/bcp47) language tag representing the - // locale. + // 47](https://tools.ietf.org/html/bcp47) + // language tag representing the locale. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Metadata") to @@ -777,7 +841,7 @@ func (s *Locale) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Membership: A person's membership in a group. +// Membership: A person's read-only membership in a group. type Membership struct { // ContactGroupMembership: The contact group membership. ContactGroupMembership *ContactGroupMembership `json:"contactGroupMembership,omitempty"` @@ -816,10 +880,18 @@ func (s *Membership) MarshalJSON() ([]byte, error) { // Name: A person's name. If the name is a mononym, the family name is // empty. type Name struct { - // DisplayName: The display name formatted according to the locale - // specified by the viewer's account or the Accept-Language HTTP header. + // DisplayName: The read-only display name formatted according to the + // locale specified by + // the viewer's account or the Accept-Language HTTP header. DisplayName string `json:"displayName,omitempty"` + // DisplayNameLastFirst: The read-only display name with the last name + // first formatted according to + // the locale specified by the viewer's account or + // the + // Accept-Language HTTP header. + DisplayNameLastFirst string `json:"displayNameLastFirst,omitempty"` + // FamilyName: The family name. FamilyName string `json:"familyName,omitempty"` @@ -841,6 +913,9 @@ type Name struct { // PhoneticFamilyName: The family name spelled as it sounds. PhoneticFamilyName string `json:"phoneticFamilyName,omitempty"` + // PhoneticFullName: The full name spelled as it sounds. + PhoneticFullName string `json:"phoneticFullName,omitempty"` + // PhoneticGivenName: The given name spelled as it sounds. PhoneticGivenName string `json:"phoneticGivenName,omitempty"` @@ -886,11 +961,14 @@ type Nickname struct { // Type: The type of the nickname. // // Possible values: - // "DEFAULT" - // "MAIDEN_NAME" - // "INITIALS" - // "GPLUS" - // "OTHER_NAME" + // "DEFAULT" - Generic nickname. + // "MAIDEN_NAME" - Maiden name or birth family name. Used when the + // person's family name has + // changed as a result of marriage. + // "INITIALS" - Initials. + // "GPLUS" - Google+ profile nickname. + // "OTHER_NAME" - A professional affiliation or other name; for + // example, `Dr. Smith.` Type string `json:"type,omitempty"` // Value: The nickname. @@ -951,10 +1029,12 @@ func (s *Occupation) MarshalJSON() ([]byte, error) { } // Organization: A person's past or current organization. Overlapping -// date ranges are permitted. +// date ranges are +// permitted. type Organization struct { // Current: True if the organization is the person's current - // organization; false if the organization is a past organization. + // organization; + // false if the organization is a past organization. Current bool `json:"current,omitempty"` // Department: The person's department at the organization. @@ -968,8 +1048,8 @@ type Organization struct { EndDate *Date `json:"endDate,omitempty"` // FormattedType: The read-only type of the organization translated and - // formatted in the viewer's account locale or the `Accept-Language` - // HTTP header locale. + // formatted in the + // viewer's account locale or the `Accept-Language` HTTP header locale. FormattedType string `json:"formattedType,omitempty"` // JobDescription: The person's job description at the organization. @@ -992,15 +1072,19 @@ type Organization struct { StartDate *Date `json:"startDate,omitempty"` // Symbol: The symbol associated with the organization; for example, a - // stock ticker symbol, abbreviation, or acronym. + // stock ticker + // symbol, abbreviation, or acronym. Symbol string `json:"symbol,omitempty"` // Title: The person's job title at the organization. Title string `json:"title,omitempty"` // Type: The type of the organization. The type can be custom or - // predefined. Possible values include, but are not limited to, the - // following: * `work` * `school` + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `work` + // * `school` Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Current") to @@ -1027,11 +1111,16 @@ func (s *Organization) MarshalJSON() ([]byte, error) { } // Person: Information about a person merged from various data sources -// such as the authenticated user's contacts and profile data. Fields -// other than IDs, metadata, and group memberships are user-edited. Most -// fields can have multiple items. The items in a field have no -// guaranteed order, but each non-empty field is guaranteed to have -// exactly one field with `metadata.primary` set to true. +// such as the +// authenticated user's contacts and profile data. Fields other than +// IDs, +// metadata, and group memberships are user-edited. +// +// Most fields can have multiple items. The items in a field have no +// guaranteed +// order, but each non-empty field is guaranteed to have exactly one +// field with +// `metadata.primary` set to true. type Person struct { // Addresses: The person's street addresses. Addresses []*Address `json:"addresses,omitempty"` @@ -1039,10 +1128,10 @@ type Person struct { // AgeRange: The person's age range. // // Possible values: - // "AGE_RANGE_UNSPECIFIED" - // "LESS_THAN_EIGHTEEN" - // "EIGHTEEN_TO_TWENTY" - // "TWENTY_ONE_OR_OLDER" + // "AGE_RANGE_UNSPECIFIED" - Unspecified. + // "LESS_THAN_EIGHTEEN" - Younger than eighteen. + // "EIGHTEEN_TO_TWENTY" - Between eighteen and twenty. + // "TWENTY_ONE_OR_OLDER" - Twenty-one and older. AgeRange string `json:"ageRange,omitempty"` // Biographies: The person's biographies. @@ -1061,7 +1150,8 @@ type Person struct { EmailAddresses []*EmailAddress `json:"emailAddresses,omitempty"` // Etag: The [HTTP entity tag](https://en.wikipedia.org/wiki/HTTP_ETag) - // of the resource. Used for web cache validation. + // of the + // resource. Used for web cache validation. Etag string `json:"etag,omitempty"` // Events: The person's events. @@ -1117,8 +1207,8 @@ type Person struct { Residences []*Residence `json:"residences,omitempty"` // ResourceName: The resource name for the person, assigned by the - // server. An ASCII string with a max length of 27 characters. Always - // starts with `people/`. + // server. An ASCII string + // with a max length of 27 characters. Always starts with `people/`. ResourceName string `json:"resourceName,omitempty"` // Skills: The person's skills. @@ -1157,27 +1247,39 @@ func (s *Person) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PersonMetadata: Metadata about a person. +// PersonMetadata: The read-only metadata about a person. type PersonMetadata struct { // Deleted: True if the person resource has been deleted. Populated only - // for [`connections.list`](/people/api/rest/v1/people.connections/list) - // requests that include a sync token. + // for + // [`connections.list`](/people/api/rest/v1/people.connections/list) + // requests + // that include a sync token. Deleted bool `json:"deleted,omitempty"` + // LinkedPeopleResourceNames: Resource names of people linked to this + // resource. + LinkedPeopleResourceNames []string `json:"linkedPeopleResourceNames,omitempty"` + // ObjectType: The type of the person object. // // Possible values: - // "OBJECT_TYPE_UNSPECIFIED" - // "PERSON" - // "PAGE" + // "OBJECT_TYPE_UNSPECIFIED" - Unspecified. + // "PERSON" - Person. + // "PAGE" - [Google+ Page.](http://www.google.com/+/brands/) ObjectType string `json:"objectType,omitempty"` // PreviousResourceNames: Any former resource names this person has had. - // Populated only for + // Populated only + // for // [`connections.list`](/people/api/rest/v1/people.connections/list) - // requests that include a sync token. The resource name may change when - // adding or removing fields that link a contact and profile such as a - // verified email, verified phone number, or profile URL. + // requests + // that include a sync token. + // + // The resource name may change when adding or removing fields that link + // a + // contact and profile such as a verified email, verified phone number, + // or + // profile URL. PreviousResourceNames []string `json:"previousResourceNames,omitempty"` // Sources: The sources of data for the person. @@ -1216,9 +1318,13 @@ type PersonResponse struct { Person *Person `json:"person,omitempty"` // RequestedResourceName: The original requested resource name. May be - // different than the resource name on the returned person. The resource - // name can change when adding or removing fields that link a contact - // and profile such as a verified email, verified phone number, or a + // different than the resource + // name on the returned person. + // + // The resource name can change when adding or removing fields that link + // a + // contact and profile such as a verified email, verified phone number, + // or a // profile URL. RequestedResourceName string `json:"requestedResourceName,omitempty"` @@ -1250,22 +1356,35 @@ func (s *PersonResponse) MarshalJSON() ([]byte, error) { type PhoneNumber struct { // CanonicalForm: The read-only canonicalized [ITU-T // E.164](https://law.resource.org/pub/us/cfr/ibr/004/itu-t.E.164.1.2008. - // pdf) form of the phone number. + // pdf) + // form of the phone number. CanonicalForm string `json:"canonicalForm,omitempty"` // FormattedType: The read-only type of the phone number translated and - // formatted in the viewer's account locale or the the `Accept-Language` - // HTTP header locale. + // formatted in the + // viewer's account locale or the the `Accept-Language` HTTP header + // locale. FormattedType string `json:"formattedType,omitempty"` // Metadata: Metadata about the phone number. Metadata *FieldMetadata `json:"metadata,omitempty"` // Type: The type of the phone number. The type can be custom or - // predefined. Possible values include, but are not limited to, the - // following: * `home` * `work` * `mobile` * `homeFax` * `workFax` * - // `otherFax` * `pager` * `workMobile` * `workPager` * `main` * - // `googleVoice` * `other` + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `home` + // * `work` + // * `mobile` + // * `homeFax` + // * `workFax` + // * `otherFax` + // * `pager` + // * `workMobile` + // * `workPager` + // * `main` + // * `googleVoice` + // * `other` Type string `json:"type,omitempty"` // Value: The phone number. @@ -1294,7 +1413,8 @@ func (s *PhoneNumber) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Photo: A person's photo. A picture shown next to the person's name to +// Photo: A person's read-only photo. A picture shown next to the +// person's name to // help others recognize the person. type Photo struct { // Metadata: Metadata about the photo. @@ -1329,8 +1449,8 @@ func (s *Photo) MarshalJSON() ([]byte, error) { // Relation: A person's relation to another person. type Relation struct { // FormattedType: The type of the relation translated and formatted in - // the viewer's account locale or the locale specified in the - // Accept-Language HTTP header. + // the viewer's account + // locale or the locale specified in the Accept-Language HTTP header. FormattedType string `json:"formattedType,omitempty"` // Metadata: Metadata about the relation. @@ -1340,11 +1460,24 @@ type Relation struct { Person string `json:"person,omitempty"` // Type: The person's relation to the other person. The type can be - // custom or predefined. Possible values include, but are not limited - // to, the following values: * `spouse` * `child` * `mother` * `father` - // * `parent` * `brother` * `sister` * `friend` * `relative` * - // `domesticPartner` * `manager` * `assistant` * `referredBy` * - // `partner` + // custom or predefined. + // Possible values include, but are not limited to, the following + // values: + // + // * `spouse` + // * `child` + // * `mother` + // * `father` + // * `parent` + // * `brother` + // * `sister` + // * `friend` + // * `relative` + // * `domesticPartner` + // * `manager` + // * `assistant` + // * `referredBy` + // * `partner` Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "FormattedType") to @@ -1370,21 +1503,28 @@ func (s *Relation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RelationshipInterest: The kind of relationship the person is looking -// for. +// RelationshipInterest: A person's read-only relationship interest . type RelationshipInterest struct { // FormattedValue: The value of the relationship interest translated and - // formatted in the viewer's account locale or the locale specified in - // the Accept-Language HTTP header. + // formatted in the + // viewer's account locale or the locale specified in the + // Accept-Language + // HTTP header. FormattedValue string `json:"formattedValue,omitempty"` // Metadata: Metadata about the relationship interest. Metadata *FieldMetadata `json:"metadata,omitempty"` // Value: The kind of relationship the person is looking for. The value - // can be custom or predefined. Possible values include, but are not - // limited to, the following values: * `friend` * `date` * - // `relationship` * `networking` + // can be custom + // or predefined. Possible values include, but are not limited to, + // the + // following values: + // + // * `friend` + // * `date` + // * `relationship` + // * `networking` Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "FormattedValue") to @@ -1411,21 +1551,30 @@ func (s *RelationshipInterest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RelationshipStatus: A person's relationship status. +// RelationshipStatus: A person's read-only relationship status. type RelationshipStatus struct { // FormattedValue: The read-only value of the relationship status - // translated and formatted in the viewer's account locale or the - // `Accept-Language` HTTP header locale. + // translated and formatted in + // the viewer's account locale or the `Accept-Language` HTTP header + // locale. FormattedValue string `json:"formattedValue,omitempty"` // Metadata: Metadata about the relationship status. Metadata *FieldMetadata `json:"metadata,omitempty"` // Value: The relationship status. The value can be custom or - // predefined. Possible values include, but are not limited to, the - // following: * `single` * `inARelationship` * `engaged` * `married` * - // `itsComplicated` * `openRelationship` * `widowed` * - // `inDomesticPartnership` * `inCivilUnion` + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `single` + // * `inARelationship` + // * `engaged` + // * `married` + // * `itsComplicated` + // * `openRelationship` + // * `widowed` + // * `inDomesticPartnership` + // * `inCivilUnion` Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "FormattedValue") to @@ -1454,7 +1603,8 @@ func (s *RelationshipStatus) MarshalJSON() ([]byte, error) { // Residence: A person's past or current residence. type Residence struct { - // Current: True if the residence is the person's current residence; + // Current: True if the residence is the person's current + // residence; // false if the residence is a past residence. Current bool `json:"current,omitempty"` @@ -1520,21 +1670,42 @@ func (s *Skill) MarshalJSON() ([]byte, error) { // Source: The source of a field. type Source struct { - // Id: A unique identifier within the source type generated by the + // Etag: The [HTTP entity tag](https://en.wikipedia.org/wiki/HTTP_ETag) + // of the + // source. Used for web cache validation. Only populated + // in + // person.metadata.sources. + Etag string `json:"etag,omitempty"` + + // Id: The unique identifier within the source type generated by the // server. Id string `json:"id,omitempty"` + // ResourceName: The resource name of the source. Only set if there is a + // separate + // resource endpoint. + ResourceName string `json:"resourceName,omitempty"` + // Type: The source type. // // Possible values: - // "OTHER" - // "ACCOUNT" - // "PROFILE" - // "DOMAIN_PROFILE" - // "CONTACT" + // "SOURCE_TYPE_UNSPECIFIED" - Unspecified. + // "ACCOUNT" - [Google Account](https://accounts.google.com). + // "PROFILE" - [Google profile](https://profiles.google.com). You can + // view the + // profile at https://profiles.google.com/ where is the + // source + // id. + // "DOMAIN_PROFILE" - [Google Apps domain + // profile](https://admin.google.com). + // "CONTACT" - [Google contact](https://contacts.google.com). You can + // view the + // contact at https://contact.google.com/ where is the + // source + // id. Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1542,7 +1713,7 @@ type Source struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -1557,7 +1728,7 @@ func (s *Source) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Tagline: A brief one-line description of the person. +// Tagline: A read-only brief one-line description of the person. type Tagline struct { // Metadata: Metadata about the tagline. Metadata *FieldMetadata `json:"metadata,omitempty"` @@ -1591,17 +1762,25 @@ func (s *Tagline) MarshalJSON() ([]byte, error) { // Url: A person's associated URLs. type Url struct { // FormattedType: The read-only type of the URL translated and formatted - // in the viewer's account locale or the `Accept-Language` HTTP header - // locale. + // in the viewer's + // account locale or the `Accept-Language` HTTP header locale. FormattedType string `json:"formattedType,omitempty"` // Metadata: Metadata about the URL. Metadata *FieldMetadata `json:"metadata,omitempty"` - // Type: The type of the URL. The type can be custom or predefined. - // Possible values include, but are not limited to, the following: * - // `home` * `work` * `blog` * `profile` * `homePage` * `ftp` * - // `reservations` * `appInstallPage`: website for a Google+ application. + // Type: The type of the URL. The type can be custom or + // predefined. + // Possible values include, but are not limited to, the following: + // + // * `home` + // * `work` + // * `blog` + // * `profile` + // * `homePage` + // * `ftp` + // * `reservations` + // * `appInstallPage`: website for a Google+ application. // * `other` Type string `json:"type,omitempty"` @@ -1643,7 +1822,8 @@ type PeopleGetCall struct { } // Get: Provides information about a person resource for a resource -// name. Use `people/me` to indicate the authenticated user. +// name. Use +// `people/me` to indicate the authenticated user. func (r *PeopleService) Get(resourceName string) *PeopleGetCall { c := &PeopleGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resourceName = resourceName @@ -1652,9 +1832,15 @@ func (r *PeopleService) Get(resourceName string) *PeopleGetCall { // RequestMaskIncludeField sets the optional parameter // "requestMask.includeField": Comma-separated list of fields to be -// included in the response. Omitting this field will include all -// fields. Each path should start with `person.`: for example, -// `person.names` or `person.photos`. +// included in the response. Omitting +// this field will include all fields except for connections.list +// requests, +// which have a default mask that includes common fields like metadata, +// name, +// photo, and profile url. +// Each path should start with `person.`: for example, `person.names` +// or +// `person.photos`. func (c *PeopleGetCall) RequestMaskIncludeField(requestMaskIncludeField string) *PeopleGetCall { c.urlParams_.Set("requestMask.includeField", requestMaskIncludeField) return c @@ -1701,6 +1887,7 @@ func (c *PeopleGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1754,7 +1941,8 @@ func (c *PeopleGetCall) Do(opts ...googleapi.CallOption) (*Person, error) { } return ret, nil // { - // "description": "Provides information about a person resource for a resource name. Use `people/me` to indicate the authenticated user.", + // "description": "Provides information about a person resource for a resource name. Use\n`people/me` to indicate the authenticated user.", + // "flatPath": "v1/people/{peopleId}", // "httpMethod": "GET", // "id": "people.people.get", // "parameterOrder": [ @@ -1762,14 +1950,15 @@ func (c *PeopleGetCall) Do(opts ...googleapi.CallOption) (*Person, error) { // ], // "parameters": { // "requestMask.includeField": { - // "description": "Comma-separated list of fields to be included in the response. Omitting this field will include all fields. Each path should start with `person.`: for example, `person.names` or `person.photos`.", + // "description": "Comma-separated list of fields to be included in the response. Omitting\nthis field will include all fields except for connections.list requests,\nwhich have a default mask that includes common fields like metadata, name,\nphoto, and profile url.\nEach path should start with `person.`: for example, `person.names` or\n`person.photos`.", + // "format": "google-fieldmask", // "location": "query", // "type": "string" // }, // "resourceName": { - // "description": "The resource name of the person to provide information about. - To get information about the authenticated user, specify `people/me`. - To get information about any user, specify the resource name that identifies the user, such as the resource names returned by [`people.connections.list`](/people/api/rest/v1/people.connections/list).", + // "description": "The resource name of the person to provide information about.\n\n- To get information about the authenticated user, specify `people/me`.\n- To get information about any user, specify the resource name that\n identifies the user, such as the resource names returned by\n [`people.connections.list`](/people/api/rest/v1/people.connections/list).", // "location": "path", - // "pattern": "^people/[^/]*$", + // "pattern": "^people/[^/]+$", // "required": true, // "type": "string" // } @@ -1804,8 +1993,10 @@ type PeopleGetBatchGetCall struct { } // GetBatchGet: Provides information about a list of specific people by -// specifying a list of requested resource names. Use `people/me` to -// indicate the authenticated user. +// specifying a list +// of requested resource names. Use `people/me` to indicate the +// authenticated +// user. func (r *PeopleService) GetBatchGet() *PeopleGetBatchGetCall { c := &PeopleGetBatchGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c @@ -1813,19 +2004,28 @@ func (r *PeopleService) GetBatchGet() *PeopleGetBatchGetCall { // RequestMaskIncludeField sets the optional parameter // "requestMask.includeField": Comma-separated list of fields to be -// included in the response. Omitting this field will include all -// fields. Each path should start with `person.`: for example, -// `person.names` or `person.photos`. +// included in the response. Omitting +// this field will include all fields except for connections.list +// requests, +// which have a default mask that includes common fields like metadata, +// name, +// photo, and profile url. +// Each path should start with `person.`: for example, `person.names` +// or +// `person.photos`. func (c *PeopleGetBatchGetCall) RequestMaskIncludeField(requestMaskIncludeField string) *PeopleGetBatchGetCall { c.urlParams_.Set("requestMask.includeField", requestMaskIncludeField) return c } // ResourceNames sets the optional parameter "resourceNames": The -// resource name, such as one returned by -// [`people.connections.list`](/people/api/rest/v1/people.connections/lis -// t), of one of the people to provide information about. You can -// include this parameter up to 50 times in one request. +// resource name, such as one returned +// by +// [`people.connections.list`](/people/api/rest/v1/people.connections/ +// list), +// of one of the people to provide information about. You can include +// this +// parameter up to 50 times in one request. func (c *PeopleGetBatchGetCall) ResourceNames(resourceNames ...string) *PeopleGetBatchGetCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -1872,6 +2072,7 @@ func (c *PeopleGetBatchGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1922,17 +2123,20 @@ func (c *PeopleGetBatchGetCall) Do(opts ...googleapi.CallOption) (*GetPeopleResp } return ret, nil // { - // "description": "Provides information about a list of specific people by specifying a list of requested resource names. Use `people/me` to indicate the authenticated user.", + // "description": "Provides information about a list of specific people by specifying a list\nof requested resource names. Use `people/me` to indicate the authenticated\nuser.", + // "flatPath": "v1/people:batchGet", // "httpMethod": "GET", // "id": "people.people.getBatchGet", + // "parameterOrder": [], // "parameters": { // "requestMask.includeField": { - // "description": "Comma-separated list of fields to be included in the response. Omitting this field will include all fields. Each path should start with `person.`: for example, `person.names` or `person.photos`.", + // "description": "Comma-separated list of fields to be included in the response. Omitting\nthis field will include all fields except for connections.list requests,\nwhich have a default mask that includes common fields like metadata, name,\nphoto, and profile url.\nEach path should start with `person.`: for example, `person.names` or\n`person.photos`.", + // "format": "google-fieldmask", // "location": "query", // "type": "string" // }, // "resourceNames": { - // "description": "The resource name, such as one returned by [`people.connections.list`](/people/api/rest/v1/people.connections/list), of one of the people to provide information about. You can include this parameter up to 50 times in one request.", + // "description": "The resource name, such as one returned by\n[`people.connections.list`](/people/api/rest/v1/people.connections/list),\nof one of the people to provide information about. You can include this\nparameter up to 50 times in one request.", // "location": "query", // "repeated": true, // "type": "string" @@ -1969,7 +2173,8 @@ type PeopleConnectionsListCall struct { } // List: Provides a list of the authenticated user's contacts merged -// with any linked profiles. +// with any +// linked profiles. func (r *PeopleConnectionsService) List(resourceName string) *PeopleConnectionsListCall { c := &PeopleConnectionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resourceName = resourceName @@ -1977,8 +2182,8 @@ func (r *PeopleConnectionsService) List(resourceName string) *PeopleConnectionsL } // PageSize sets the optional parameter "pageSize": The number of -// connections to include in the response. Valid values are between 1 -// and 500, inclusive. Defaults to 100. +// connections to include in the response. Valid values are +// between 1 and 500, inclusive. Defaults to 100. func (c *PeopleConnectionsListCall) PageSize(pageSize int64) *PeopleConnectionsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -1993,16 +2198,32 @@ func (c *PeopleConnectionsListCall) PageToken(pageToken string) *PeopleConnectio // RequestMaskIncludeField sets the optional parameter // "requestMask.includeField": Comma-separated list of fields to be -// included in the response. Omitting this field will include all -// fields. Each path should start with `person.`: for example, -// `person.names` or `person.photos`. +// included in the response. Omitting +// this field will include all fields except for connections.list +// requests, +// which have a default mask that includes common fields like metadata, +// name, +// photo, and profile url. +// Each path should start with `person.`: for example, `person.names` +// or +// `person.photos`. func (c *PeopleConnectionsListCall) RequestMaskIncludeField(requestMaskIncludeField string) *PeopleConnectionsListCall { c.urlParams_.Set("requestMask.includeField", requestMaskIncludeField) return c } +// RequestSyncToken sets the optional parameter "requestSyncToken": +// Whether the response should include a sync token, which can be used +// to get +// all changes since the last request. +func (c *PeopleConnectionsListCall) RequestSyncToken(requestSyncToken bool) *PeopleConnectionsListCall { + c.urlParams_.Set("requestSyncToken", fmt.Sprint(requestSyncToken)) + return c +} + // SortOrder sets the optional parameter "sortOrder": The order in which -// the connections should be sorted. Defaults to +// the connections should be sorted. Defaults +// to // `LAST_MODIFIED_ASCENDING`. // // Possible values: @@ -2015,8 +2236,9 @@ func (c *PeopleConnectionsListCall) SortOrder(sortOrder string) *PeopleConnectio } // SyncToken sets the optional parameter "syncToken": A sync token, -// returned by a previous call to `people.connections.list`. Only -// resources changed since the sync token was created are returned. +// returned by a previous call to `people.connections.list`. +// Only resources changed since the sync token was created will be +// returned. func (c *PeopleConnectionsListCall) SyncToken(syncToken string) *PeopleConnectionsListCall { c.urlParams_.Set("syncToken", syncToken) return c @@ -2063,6 +2285,7 @@ func (c *PeopleConnectionsListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2116,7 +2339,8 @@ func (c *PeopleConnectionsListCall) Do(opts ...googleapi.CallOption) (*ListConne } return ret, nil // { - // "description": "Provides a list of the authenticated user's contacts merged with any linked profiles.", + // "description": "Provides a list of the authenticated user's contacts merged with any\nlinked profiles.", + // "flatPath": "v1/people/{peopleId}/connections", // "httpMethod": "GET", // "id": "people.people.connections.list", // "parameterOrder": [ @@ -2124,7 +2348,7 @@ func (c *PeopleConnectionsListCall) Do(opts ...googleapi.CallOption) (*ListConne // ], // "parameters": { // "pageSize": { - // "description": "The number of connections to include in the response. Valid values are between 1 and 500, inclusive. Defaults to 100.", + // "description": "The number of connections to include in the response. Valid values are\nbetween 1 and 500, inclusive. Defaults to 100.", // "format": "int32", // "location": "query", // "type": "integer" @@ -2135,19 +2359,25 @@ func (c *PeopleConnectionsListCall) Do(opts ...googleapi.CallOption) (*ListConne // "type": "string" // }, // "requestMask.includeField": { - // "description": "Comma-separated list of fields to be included in the response. Omitting this field will include all fields. Each path should start with `person.`: for example, `person.names` or `person.photos`.", + // "description": "Comma-separated list of fields to be included in the response. Omitting\nthis field will include all fields except for connections.list requests,\nwhich have a default mask that includes common fields like metadata, name,\nphoto, and profile url.\nEach path should start with `person.`: for example, `person.names` or\n`person.photos`.", + // "format": "google-fieldmask", // "location": "query", // "type": "string" // }, + // "requestSyncToken": { + // "description": "Whether the response should include a sync token, which can be used to get\nall changes since the last request.", + // "location": "query", + // "type": "boolean" + // }, // "resourceName": { // "description": "The resource name to return connections for. Only `people/me` is valid.", // "location": "path", - // "pattern": "^people/[^/]*$", + // "pattern": "^people/[^/]+$", // "required": true, // "type": "string" // }, // "sortOrder": { - // "description": "The order in which the connections should be sorted. Defaults to `LAST_MODIFIED_ASCENDING`.", + // "description": "The order in which the connections should be sorted. Defaults to\n`LAST_MODIFIED_ASCENDING`.", // "enum": [ // "LAST_MODIFIED_ASCENDING", // "FIRST_NAME_ASCENDING", @@ -2157,7 +2387,7 @@ func (c *PeopleConnectionsListCall) Do(opts ...googleapi.CallOption) (*ListConne // "type": "string" // }, // "syncToken": { - // "description": "A sync token, returned by a previous call to `people.connections.list`. Only resources changed since the sync token was created are returned.", + // "description": "A sync token, returned by a previous call to `people.connections.list`.\nOnly resources changed since the sync token was created will be returned.", // "location": "query", // "type": "string" // } diff --git a/vendor/google.golang.org/api/playmoviespartner/v1/playmoviespartner-gen.go b/vendor/google.golang.org/api/playmoviespartner/v1/playmoviespartner-gen.go index 198d74a9c..c7ee38376 100644 --- a/vendor/google.golang.org/api/playmoviespartner/v1/playmoviespartner-gen.go +++ b/vendor/google.golang.org/api/playmoviespartner/v1/playmoviespartner-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Avails = NewAccountsAvailsService(s) @@ -803,6 +808,7 @@ func (c *AccountsAvailsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1018,6 +1024,7 @@ func (c *AccountsAvailsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1231,6 +1238,7 @@ func (c *AccountsOrdersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1443,6 +1451,7 @@ func (c *AccountsOrdersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1729,6 +1738,7 @@ func (c *AccountsStoreInfosListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1950,6 +1960,7 @@ func (c *AccountsStoreInfosCountryGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/plus/v1/plus-gen.go b/vendor/google.golang.org/api/plus/v1/plus-gen.go index 2240e8328..89f427a6b 100644 --- a/vendor/google.golang.org/api/plus/v1/plus-gen.go +++ b/vendor/google.golang.org/api/plus/v1/plus-gen.go @@ -72,9 +72,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Activities *ActivitiesService @@ -90,6 +91,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewActivitiesService(s *Service) *ActivitiesService { rs := &ActivitiesService{s: s} return rs @@ -2282,6 +2287,7 @@ func (c *ActivitiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2441,6 +2447,7 @@ func (c *ActivitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2667,6 +2674,7 @@ func (c *ActivitiesSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2857,6 +2865,7 @@ func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3024,6 +3033,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3213,6 +3223,7 @@ func (c *PeopleGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3384,6 +3395,7 @@ func (c *PeopleListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3607,6 +3619,7 @@ func (c *PeopleListByActivityCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3822,6 +3835,7 @@ func (c *PeopleSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/plusdomains/v1/plusdomains-gen.go b/vendor/google.golang.org/api/plusdomains/v1/plusdomains-gen.go index 74cc39ae0..63e8a0e34 100644 --- a/vendor/google.golang.org/api/plusdomains/v1/plusdomains-gen.go +++ b/vendor/google.golang.org/api/plusdomains/v1/plusdomains-gen.go @@ -95,9 +95,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Activities *ActivitiesService @@ -119,6 +120,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewActivitiesService(s *Service) *ActivitiesService { rs := &ActivitiesService{s: s} return rs @@ -2823,6 +2828,7 @@ func (c *ActivitiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2962,6 +2968,7 @@ func (c *ActivitiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.activity) if err != nil { @@ -3132,6 +3139,7 @@ func (c *ActivitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3338,6 +3346,7 @@ func (c *AudiencesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3516,6 +3525,7 @@ func (c *CirclesAddPeopleCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "circles/{circleId}/people") @@ -3663,6 +3673,7 @@ func (c *CirclesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3792,6 +3803,7 @@ func (c *CirclesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.circle) if err != nil { @@ -3954,6 +3966,7 @@ func (c *CirclesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4120,6 +4133,7 @@ func (c *CirclesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.circle) if err != nil { @@ -4252,6 +4266,7 @@ func (c *CirclesRemoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "circles/{circleId}") @@ -4362,6 +4377,7 @@ func (c *CirclesRemovePeopleCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "circles/{circleId}/people") @@ -4472,6 +4488,7 @@ func (c *CirclesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.circle) if err != nil { @@ -4615,6 +4632,7 @@ func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4744,6 +4762,7 @@ func (c *CommentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -4916,6 +4935,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5152,6 +5172,7 @@ func (c *MediaInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.media) if err != nil { @@ -5374,6 +5395,7 @@ func (c *PeopleGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5546,6 +5568,7 @@ func (c *PeopleListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5768,6 +5791,7 @@ func (c *PeopleListByActivityCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5977,6 +6001,7 @@ func (c *PeopleListByCircleCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/prediction/v1.2/prediction-gen.go b/vendor/google.golang.org/api/prediction/v1.2/prediction-gen.go index 5bdc138ed..9ae3c2ca1 100644 --- a/vendor/google.golang.org/api/prediction/v1.2/prediction-gen.go +++ b/vendor/google.golang.org/api/prediction/v1.2/prediction-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Hostedmodels *HostedmodelsService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewHostedmodelsService(s *Service) *HostedmodelsService { rs := &HostedmodelsService{s: s} return rs @@ -419,6 +424,7 @@ func (c *PredictCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -552,6 +558,7 @@ func (c *HostedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -683,6 +690,7 @@ func (c *TrainingDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "training/{data}") @@ -789,6 +797,7 @@ func (c *TrainingGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -922,6 +931,7 @@ func (c *TrainingInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.training) if err != nil { @@ -1051,6 +1061,7 @@ func (c *TrainingUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.update) if err != nil { diff --git a/vendor/google.golang.org/api/prediction/v1.3/prediction-gen.go b/vendor/google.golang.org/api/prediction/v1.3/prediction-gen.go index 519821df0..c865faff2 100644 --- a/vendor/google.golang.org/api/prediction/v1.3/prediction-gen.go +++ b/vendor/google.golang.org/api/prediction/v1.3/prediction-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Hostedmodels *HostedmodelsService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewHostedmodelsService(s *Service) *HostedmodelsService { rs := &HostedmodelsService{s: s} return rs @@ -479,6 +484,7 @@ func (c *HostedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -610,6 +616,7 @@ func (c *TrainingDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "training/{data}") @@ -716,6 +723,7 @@ func (c *TrainingGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -842,6 +850,7 @@ func (c *TrainingInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.training) if err != nil { @@ -964,6 +973,7 @@ func (c *TrainingPredictCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -1097,6 +1107,7 @@ func (c *TrainingUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.update) if err != nil { diff --git a/vendor/google.golang.org/api/prediction/v1.4/prediction-gen.go b/vendor/google.golang.org/api/prediction/v1.4/prediction-gen.go index 3829c726e..8033ca387 100644 --- a/vendor/google.golang.org/api/prediction/v1.4/prediction-gen.go +++ b/vendor/google.golang.org/api/prediction/v1.4/prediction-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Hostedmodels *HostedmodelsService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewHostedmodelsService(s *Service) *HostedmodelsService { rs := &HostedmodelsService{s: s} return rs @@ -525,6 +530,7 @@ func (c *HostedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -656,6 +662,7 @@ func (c *TrainedmodelsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "trainedmodels/{id}") @@ -762,6 +769,7 @@ func (c *TrainedmodelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -888,6 +896,7 @@ func (c *TrainedmodelsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.training) if err != nil { @@ -1010,6 +1019,7 @@ func (c *TrainedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -1143,6 +1153,7 @@ func (c *TrainedmodelsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.update) if err != nil { diff --git a/vendor/google.golang.org/api/prediction/v1.5/prediction-gen.go b/vendor/google.golang.org/api/prediction/v1.5/prediction-gen.go index 55cef8a10..0d4fc8de5 100644 --- a/vendor/google.golang.org/api/prediction/v1.5/prediction-gen.go +++ b/vendor/google.golang.org/api/prediction/v1.5/prediction-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Hostedmodels *HostedmodelsService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewHostedmodelsService(s *Service) *HostedmodelsService { rs := &HostedmodelsService{s: s} return rs @@ -978,6 +983,7 @@ func (c *HostedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -1121,6 +1127,7 @@ func (c *TrainedmodelsAnalyzeCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1247,6 +1254,7 @@ func (c *TrainedmodelsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "trainedmodels/{id}") @@ -1353,6 +1361,7 @@ func (c *TrainedmodelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1479,6 +1488,7 @@ func (c *TrainedmodelsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.training) if err != nil { @@ -1621,6 +1631,7 @@ func (c *TrainedmodelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1770,6 +1781,7 @@ func (c *TrainedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -1903,6 +1915,7 @@ func (c *TrainedmodelsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.update) if err != nil { diff --git a/vendor/google.golang.org/api/prediction/v1.6/prediction-gen.go b/vendor/google.golang.org/api/prediction/v1.6/prediction-gen.go index 51b0a18fa..5f535e3c4 100644 --- a/vendor/google.golang.org/api/prediction/v1.6/prediction-gen.go +++ b/vendor/google.golang.org/api/prediction/v1.6/prediction-gen.go @@ -74,9 +74,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Hostedmodels *HostedmodelsService @@ -90,6 +91,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewHostedmodelsService(s *Service) *HostedmodelsService { rs := &HostedmodelsService{s: s} return rs @@ -948,6 +953,7 @@ func (c *HostedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -1102,6 +1108,7 @@ func (c *TrainedmodelsAnalyzeCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1239,6 +1246,7 @@ func (c *TrainedmodelsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/trainedmodels/{id}") @@ -1356,6 +1364,7 @@ func (c *TrainedmodelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1493,6 +1502,7 @@ func (c *TrainedmodelsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.insert) if err != nil { @@ -1652,6 +1662,7 @@ func (c *TrainedmodelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1816,6 +1827,7 @@ func (c *TrainedmodelsPredictCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.input) if err != nil { @@ -1960,6 +1972,7 @@ func (c *TrainedmodelsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.update) if err != nil { diff --git a/vendor/google.golang.org/api/proximitybeacon/v1beta1/proximitybeacon-gen.go b/vendor/google.golang.org/api/proximitybeacon/v1beta1/proximitybeacon-gen.go index 1f9e5eebc..efaddd04a 100644 --- a/vendor/google.golang.org/api/proximitybeacon/v1beta1/proximitybeacon-gen.go +++ b/vendor/google.golang.org/api/proximitybeacon/v1beta1/proximitybeacon-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Beaconinfo *BeaconinfoService @@ -84,6 +85,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBeaconinfoService(s *Service) *BeaconinfoService { rs := &BeaconinfoService{s: s} return rs @@ -1149,6 +1154,7 @@ func (c *BeaconinfoGetforobservedCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getinfoforobservedbeaconsrequest) if err != nil { @@ -1280,6 +1286,7 @@ func (c *BeaconsActivateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+beaconName}:activate") @@ -1425,6 +1432,7 @@ func (c *BeaconsDeactivateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+beaconName}:deactivate") @@ -1569,6 +1577,7 @@ func (c *BeaconsDecommissionCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+beaconName}:decommission") @@ -1727,6 +1736,7 @@ func (c *BeaconsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1948,6 +1958,7 @@ func (c *BeaconsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2117,6 +2128,7 @@ func (c *BeaconsRegisterCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.beacon) if err != nil { @@ -2262,6 +2274,7 @@ func (c *BeaconsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.beacon) if err != nil { @@ -2427,6 +2440,7 @@ func (c *BeaconsAttachmentsBatchDeleteCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+beaconName}/attachments:batchDelete") @@ -2582,6 +2596,7 @@ func (c *BeaconsAttachmentsCreateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.beaconattachment) if err != nil { @@ -2734,6 +2749,7 @@ func (c *BeaconsAttachmentsDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+attachmentName}") @@ -2902,6 +2918,7 @@ func (c *BeaconsAttachmentsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3093,6 +3110,7 @@ func (c *BeaconsDiagnosticsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3289,6 +3307,7 @@ func (c *NamespacesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3420,6 +3439,7 @@ func (c *NamespacesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.namespace) if err != nil { @@ -3572,6 +3592,7 @@ func (c *V1beta1GetEidparamsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json index 08fb7675e..ca1f7d325 100644 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json @@ -1,1158 +1,1158 @@ { + "basePath": "", + "ownerDomain": "google.com", + "name": "pubsub", + "batchPath": "batch", + "revision": "20170124", "id": "pubsub:v1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/pubsub": { - "description": "View and manage Pub/Sub topics and subscriptions" - } - } - } - }, - "description": "Provides reliable, many-to-many, asynchronous messaging between applications.\n", - "protocol": "rest", + "documentationLink": "https://cloud.google.com/pubsub/docs", "title": "Google Cloud Pub/Sub API", + "ownerName": "Google", + "discoveryVersion": "v1", "resources": { "projects": { "resources": { "subscriptions": { "methods": { - "modifyPushConfig": { - "id": "pubsub.projects.subscriptions.modifyPushConfig", + "get": { + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + "id": "pubsub.projects.subscriptions.get", + "path": "v1/{+subscription}", + "description": "Gets the configuration details of a subscription.", + "httpMethod": "GET", "response": { - "$ref": "Empty" + "$ref": "Subscription" }, "parameterOrder": [ "subscription" ], - "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", - "request": { - "$ref": "ModifyPushConfigRequest" - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", - "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { "subscription": { - "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", + "description": "The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, "type": "string" } + } + }, + "testIamPermissions": { + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "pubsub.projects.subscriptions.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "request": { + "$ref": "TestIamPermissionsRequest" }, - "path": "v1/{+subscription}:modifyPushConfig", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + } }, - "getIamPolicy": { - "id": "pubsub.projects.subscriptions.getIamPolicy", - "response": { - "$ref": "Policy" + "modifyPushConfig": { + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", + "id": "pubsub.projects.subscriptions.modifyPushConfig", + "path": "v1/{+subscription}:modifyPushConfig", + "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", + "request": { + "$ref": "ModifyPushConfigRequest" }, + "httpMethod": "POST", "parameterOrder": [ - "resource" + "subscription" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" ], - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", - "httpMethod": "GET", "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "subscription": { + "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "location": "path" } + } + }, + "delete": { + "httpMethod": "DELETE", + "parameterOrder": [ + "subscription" + ], + "response": { + "$ref": "Empty" }, - "path": "v1/{+resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "parameters": { + "subscription": { + "description": "The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + "id": "pubsub.projects.subscriptions.delete", + "path": "v1/{+subscription}", + "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified." }, "pull": { - "id": "pubsub.projects.subscriptions.pull", + "request": { + "$ref": "PullRequest" + }, + "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", "response": { "$ref": "PullResponse" }, "parameterOrder": [ "subscription" ], - "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", - "request": { - "$ref": "PullRequest" - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", "httpMethod": "POST", "parameters": { "subscription": { + "location": "path", "description": "The subscription from which messages should be pulled.\nFormat is `projects/{project}/subscriptions/{sub}`.", "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" } }, - "path": "v1/{+subscription}:pull", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", + "path": "v1/{+subscription}:pull", + "id": "pubsub.projects.subscriptions.pull" }, "list": { - "id": "pubsub.projects.subscriptions.list", + "description": "Lists matching subscriptions.", + "httpMethod": "GET", "response": { "$ref": "ListSubscriptionsResponse" }, "parameterOrder": [ "project" ], - "description": "Lists matching subscriptions.", - "flatPath": "v1/projects/{projectsId}/subscriptions", - "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { + "pageToken": { + "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", + "type": "string", + "location": "query" + }, "pageSize": { "description": "Maximum number of subscriptions to return.", - "location": "query", + "format": "int32", "type": "integer", - "format": "int32" + "location": "query" }, "project": { - "description": "The name of the cloud project that subscriptions belong to.\nFormat is `projects/{project}`.", - "required": true, "pattern": "^projects/[^/]+$", "location": "path", - "type": "string" - }, - "pageToken": { - "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", - "location": "query", + "description": "The name of the cloud project that subscriptions belong to.\nFormat is `projects/{project}`.", + "required": true, "type": "string" } }, - "path": "v1/{+project}/subscriptions", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1/projects/{projectsId}/subscriptions", + "id": "pubsub.projects.subscriptions.list", + "path": "v1/{+project}/subscriptions" }, - "get": { - "id": "pubsub.projects.subscriptions.get", - "response": { - "$ref": "Subscription" - }, - "parameterOrder": [ - "subscription" - ], - "description": "Gets the configuration details of a subscription.", - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "httpMethod": "GET", + "create": { "parameters": { - "subscription": { - "description": "The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, + "name": { "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", + "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "required": true, "type": "string" } }, - "path": "v1/{+subscription}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "create": { + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + "path": "v1/{+name}", "id": "pubsub.projects.subscriptions.create", + "request": { + "$ref": "Subscription" + }, + "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name format](https://cloud.google.com/pubsub/docs/overview#names).\nThe generated name is populated in the returned Subscription object.\nNote that for REST API requests, you must specify a name in the request.", "response": { "$ref": "Subscription" }, "parameterOrder": [ "name" ], - "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name format](https://cloud.google.com/pubsub/docs/overview#names).\nThe generated name is populated in the returned Subscription object.\nNote that for REST API requests, you must specify a name in the request.", + "httpMethod": "PUT" + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", "request": { - "$ref": "Subscription" + "$ref": "SetIamPolicyRequest" }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "httpMethod": "PUT", + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { - "name": { - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "location": "path" } }, - "path": "v1/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "pubsub.projects.subscriptions.setIamPolicy" }, - "modifyAckDeadline": { - "id": "pubsub.projects.subscriptions.modifyAckDeadline", + "acknowledge": { + "path": "v1/{+subscription}:acknowledge", + "id": "pubsub.projects.subscriptions.acknowledge", + "request": { + "$ref": "AcknowledgeRequest" + }, + "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", "response": { "$ref": "Empty" }, "parameterOrder": [ "subscription" ], - "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", - "request": { - "$ref": "ModifyAckDeadlineRequest" - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", "httpMethod": "POST", "parameters": { "subscription": { - "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "location": "path" } }, - "path": "v1/{+subscription}:modifyAckDeadline", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge" }, - "setIamPolicy": { - "id": "pubsub.projects.subscriptions.setIamPolicy", + "modifyAckDeadline": { "response": { - "$ref": "Policy" + "$ref": "Empty" }, "parameterOrder": [ - "resource" + "subscription" ], - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", "httpMethod": "POST", "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "subscription": { + "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "location": "path" } }, - "path": "v1/{+resource}:setIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "delete": { - "id": "pubsub.projects.subscriptions.delete", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" ], - "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified.", - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "httpMethod": "DELETE", + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", + "path": "v1/{+subscription}:modifyAckDeadline", + "id": "pubsub.projects.subscriptions.modifyAckDeadline", + "request": { + "$ref": "ModifyAckDeadlineRequest" + }, + "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages." + }, + "getIamPolicy": { "parameters": { - "subscription": { - "description": "The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, + "resource": { "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, "type": "string" } }, - "path": "v1/{+subscription}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "testIamPermissions": { - "id": "pubsub.projects.subscriptions.testIamPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", + "id": "pubsub.projects.subscriptions.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "httpMethod": "GET", "parameterOrder": [ "resource" ], - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "response": { + "$ref": "Policy" + } + } + } + }, + "snapshots": { + "methods": { + "setIamPolicy": { + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy", + "id": "pubsub.projects.snapshots.setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", "request": { - "$ref": "TestIamPermissionsRequest" + "$ref": "SetIamPolicyRequest" }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$", "location": "path", - "type": "string" + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`." } + } + }, + "testIamPermissions": { + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "TestIamPermissionsResponse" }, - "path": "v1/{+resource}:testIamPermissions", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "parameters": { + "resource": { + "location": "path", + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", + "id": "pubsub.projects.snapshots.testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "request": { + "$ref": "TestIamPermissionsRequest" + } }, - "acknowledge": { - "id": "pubsub.projects.subscriptions.acknowledge", + "getIamPolicy": { "response": { - "$ref": "Empty" + "$ref": "Policy" }, "parameterOrder": [ - "subscription" + "resource" ], - "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", - "request": { - "$ref": "AcknowledgeRequest" - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", - "httpMethod": "POST", + "httpMethod": "GET", "parameters": { - "subscription": { - "description": "The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$", + "location": "path" } }, - "path": "v1/{+subscription}:acknowledge", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "id": "pubsub.projects.snapshots.getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." } } }, "topics": { - "resources": { - "subscriptions": { - "methods": { - "list": { - "id": "pubsub.projects.topics.subscriptions.list", - "response": { - "$ref": "ListTopicSubscriptionsResponse" - }, - "parameterOrder": [ - "topic" - ], - "description": "Lists the name of the subscriptions for this topic.", - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", - "httpMethod": "GET", - "parameters": { - "topic": { - "description": "The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", - "type": "string" - }, - "pageSize": { - "description": "Maximum number of subscription names to return.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "pageToken": { - "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+topic}/subscriptions", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - } - } - } - }, "methods": { - "getIamPolicy": { - "id": "pubsub.projects.topics.getIamPolicy", + "testIamPermissions": { "response": { - "$ref": "Policy" + "$ref": "TestIamPermissionsResponse" }, "parameterOrder": [ "resource" ], - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:getIamPolicy", - "httpMethod": "GET", + "httpMethod": "POST", "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, "type": "string" } }, - "path": "v1/{+resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "pubsub.projects.topics.testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." }, - "publish": { - "id": "pubsub.projects.topics.publish", + "delete": { + "path": "v1/{+topic}", + "id": "pubsub.projects.topics.delete", + "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", "response": { - "$ref": "PublishResponse" + "$ref": "Empty" }, "parameterOrder": [ "topic" ], - "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist. The message payload must not be empty; it must contain\n either a non-empty data field, or at least one attribute.", - "request": { - "$ref": "PublishRequest" - }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:publish", - "httpMethod": "POST", + "httpMethod": "DELETE", "parameters": { "topic": { - "description": "The messages in the request will be published on this topic.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", + "description": "Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, "type": "string" } }, - "path": "v1/{+topic}:publish", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}" }, "list": { - "id": "pubsub.projects.topics.list", + "httpMethod": "GET", "response": { "$ref": "ListTopicsResponse" }, "parameterOrder": [ "project" ], - "description": "Lists matching topics.", - "flatPath": "v1/projects/{projectsId}/topics", - "httpMethod": "GET", "parameters": { - "pageSize": { - "description": "Maximum number of topics to return.", - "location": "query", - "type": "integer", - "format": "int32" - }, "project": { - "description": "The name of the cloud project that topics belong to.\nFormat is `projects/{project}`.", - "required": true, "pattern": "^projects/[^/]+$", "location": "path", + "description": "The name of the cloud project that topics belong to.\nFormat is `projects/{project}`.", + "required": true, "type": "string" }, "pageToken": { "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", + "type": "string", + "location": "query" + }, + "pageSize": { "location": "query", - "type": "string" + "description": "Maximum number of topics to return.", + "format": "int32", + "type": "integer" } }, - "path": "v1/{+project}/topics", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "get": { - "id": "pubsub.projects.topics.get", - "response": { - "$ref": "Topic" - }, - "parameterOrder": [ - "topic" ], - "description": "Gets the configuration of a topic.", - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - "httpMethod": "GET", - "parameters": { - "topic": { - "description": "The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+topic}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1/projects/{projectsId}/topics", + "id": "pubsub.projects.topics.list", + "path": "v1/{+project}/topics", + "description": "Lists matching topics." }, "create": { - "id": "pubsub.projects.topics.create", "response": { "$ref": "Topic" }, "parameterOrder": [ "name" ], - "description": "Creates the given topic with the given name.", - "request": { - "$ref": "Topic" - }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", "httpMethod": "PUT", "parameters": { "name": { - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", - "type": "string" + "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`." } }, - "path": "v1/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, + ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", + "path": "v1/{+name}", + "id": "pubsub.projects.topics.create", + "request": { + "$ref": "Topic" + }, + "description": "Creates the given topic with the given name." + }, "setIamPolicy": { + "path": "v1/{+resource}:setIamPolicy", "id": "pubsub.projects.topics.setIamPolicy", + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "request": { + "$ref": "SetIamPolicyRequest" + }, "response": { "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy", "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+resource}:setIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "delete": { - "id": "pubsub.projects.topics.delete", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "topic" ], - "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - "httpMethod": "DELETE", "parameters": { - "topic": { - "description": "Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, + "resource": { "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, "type": "string" } }, - "path": "v1/{+topic}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy" }, - "testIamPermissions": { - "id": "pubsub.projects.topics.testIamPermissions", + "getIamPolicy": { + "path": "v1/{+resource}:getIamPolicy", + "id": "pubsub.projects.topics.getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", "response": { - "$ref": "TestIamPermissionsResponse" + "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+resource}:testIamPermissions", + "httpMethod": "GET", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - } - } - }, - "snapshots": { - "methods": { - "testIamPermissions": { - "id": "pubsub.projects.snapshots.testIamPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" ], - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", - "httpMethod": "POST", "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/snapshots/[^/]+$", + "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, "type": "string" } }, - "path": "v1/{+resource}:testIamPermissions", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:getIamPolicy" }, - "setIamPolicy": { - "id": "pubsub.projects.snapshots.setIamPolicy", - "response": { - "$ref": "Policy" - }, + "get": { + "httpMethod": "GET", "parameterOrder": [ - "resource" + "topic" ], - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" + "response": { + "$ref": "Topic" }, - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy", - "httpMethod": "POST", "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/snapshots/[^/]+$", + "topic": { "location": "path", - "type": "string" + "description": "The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$" } }, - "path": "v1/{+resource}:setIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", + "id": "pubsub.projects.topics.get", + "path": "v1/{+topic}", + "description": "Gets the configuration of a topic." }, - "getIamPolicy": { - "id": "pubsub.projects.snapshots.getIamPolicy", - "response": { - "$ref": "Policy" + "publish": { + "request": { + "$ref": "PublishRequest" }, + "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist. The message payload must not be empty; it must contain\n either a non-empty data field, or at least one attribute.", + "httpMethod": "POST", "parameterOrder": [ - "resource" + "topic" ], - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", - "httpMethod": "GET", + "response": { + "$ref": "PublishResponse" + }, "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/snapshots/[^/]+$", + "topic": { + "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", + "description": "The messages in the request will be published on this topic.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, "type": "string" } }, - "path": "v1/{+resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:publish", + "id": "pubsub.projects.topics.publish", + "path": "v1/{+topic}:publish" + } + }, + "resources": { + "subscriptions": { + "methods": { + "list": { + "response": { + "$ref": "ListTopicSubscriptionsResponse" + }, + "parameterOrder": [ + "topic" + ], + "httpMethod": "GET", + "parameters": { + "topic": { + "pattern": "^projects/[^/]+/topics/[^/]+$", + "location": "path", + "description": "The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Maximum number of subscription names to return.", + "format": "int32", + "type": "integer" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", + "path": "v1/{+topic}/subscriptions", + "id": "pubsub.projects.topics.subscriptions.list", + "description": "Lists the name of the subscriptions for this topic." + } + } } } } } } }, - "schemas": { - "Topic": { - "description": "A topic resource.", - "type": "object", - "properties": { - "name": { - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", - "type": "string" - } - }, - "id": "Topic" + "parameters": { + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, - "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "alt": { + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "type": "string", + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token." + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + } + }, + "schemas": { + "ModifyPushConfigRequest": { "type": "object", "properties": { - "policy": { - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", - "$ref": "Policy" + "pushConfig": { + "$ref": "PushConfig", + "description": "The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` is not called." } }, - "id": "SetIamPolicyRequest" + "id": "ModifyPushConfigRequest", + "description": "Request for the ModifyPushConfig method." }, - "ReceivedMessage": { - "description": "A message and its corresponding acknowledgment ID.", + "PubsubMessage": { + "description": "A message data and its attributes. The message payload must not be empty;\nit must contain either a non-empty data field, or at least one attribute.", "type": "object", "properties": { - "ackId": { - "description": "This ID can be used to acknowledge the received message.", + "publishTime": { + "description": "The time at which the message was published, populated by the server when\nit receives the `Publish` call. It must not be populated by the\npublisher in a `Publish` call.", + "format": "google-datetime", "type": "string" }, - "message": { - "description": "The message.", - "$ref": "PubsubMessage" - } - }, - "id": "ReceivedMessage" - }, - "PublishRequest": { - "description": "Request for the Publish method.", - "type": "object", - "properties": { - "messages": { - "description": "The messages to publish.", - "type": "array", - "items": { - "$ref": "PubsubMessage" + "data": { + "description": "The message payload.", + "format": "byte", + "type": "string" + }, + "attributes": { + "description": "Optional attributes for this message.", + "type": "object", + "additionalProperties": { + "type": "string" } + }, + "messageId": { + "type": "string", + "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call." } }, - "id": "PublishRequest" + "id": "PubsubMessage" }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", + "Binding": { + "id": "Binding", + "description": "Associates `members` with a `role`.", "type": "object", "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", "type": "array", "items": { "type": "string" } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" } - }, - "id": "TestIamPermissionsResponse" + } }, - "PublishResponse": { - "description": "Response for the `Publish` method.", + "AcknowledgeRequest": { + "description": "Request for the Acknowledge method.", "type": "object", "properties": { - "messageIds": { - "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", + "ackIds": { + "description": "The acknowledgment ID for the messages being acknowledged that was returned\nby the Pub/Sub system in the `Pull` response. Must not be empty.", "type": "array", "items": { "type": "string" } } }, - "id": "PublishResponse" + "id": "AcknowledgeRequest" }, - "ListSubscriptionsResponse": { - "description": "Response for the `ListSubscriptions` method.", + "ListTopicsResponse": { + "description": "Response for the `ListTopics` method.", "type": "object", "properties": { - "subscriptions": { - "description": "The subscriptions that match the request.", + "topics": { "type": "array", "items": { - "$ref": "Subscription" - } + "$ref": "Topic" + }, + "description": "The resulting topics." }, "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListSubscriptionsRequest` to get more subscriptions.", + "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`.", "type": "string" } }, - "id": "ListSubscriptionsResponse" + "id": "ListTopicsResponse" }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", - "type": "object", - "properties": { - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", - "type": "array", - "items": { - "$ref": "Binding" - } - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "type": "string", - "format": "byte" - }, - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer", - "format": "int32" - } - }, - "id": "Policy" + "Empty": { + "properties": {}, + "id": "Empty", + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object" }, "ListTopicSubscriptionsResponse": { - "description": "Response for the `ListTopicSubscriptions` method.", - "type": "object", "properties": { + "nextPageToken": { + "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListTopicSubscriptionsRequest` to get more subscriptions.", + "type": "string" + }, "subscriptions": { "description": "The names of the subscriptions that match the request.", "type": "array", "items": { "type": "string" } - }, - "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListTopicSubscriptionsRequest` to get more subscriptions.", - "type": "string" } }, - "id": "ListTopicSubscriptionsResponse" - }, - "Subscription": { - "description": "A subscription resource.", - "type": "object", - "properties": { - "pushConfig": { - "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods.", - "$ref": "PushConfig" - }, - "topic": { - "description": "The name of the topic from which this subscription is receiving messages.\nFormat is `projects/{project}/topics/{topic}`.\nThe value of this field will be `_deleted-topic_` if the topic has been\ndeleted.", - "type": "string" - }, - "ackDeadlineSeconds": { - "description": "This value is the maximum time after a subscriber receives a message\nbefore the subscriber should acknowledge the message. After message\ndelivery but before the ack deadline expires and before the message is\nacknowledged, it is an outstanding message and will not be delivered\nagain during that time (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using\npull.\nThe minimum custom deadline you can specify is 10 seconds.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\nIf this parameter is 0, a default value of 10 seconds is used.\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.", - "type": "integer", - "format": "int32" - }, - "name": { - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", - "type": "string" - } - }, - "id": "Subscription" + "id": "ListTopicSubscriptionsResponse", + "description": "Response for the `ListTopicSubscriptions` method.", + "type": "object" }, - "ModifyAckDeadlineRequest": { - "description": "Request for the ModifyAckDeadline method.", + "PullResponse": { + "description": "Response for the `Pull` method.", "type": "object", "properties": { - "ackDeadlineSeconds": { - "description": "The new ack deadline with respect to the time this request was sent to\nthe Pub/Sub system. For example, if the value is 10, the new\nack deadline will expire 10 seconds after the `ModifyAckDeadline` call\nwas made. Specifying zero may immediately make the message available for\nanother pull request.\nThe minimum deadline you can specify is 0 seconds.\nThe maximum deadline you can specify is 600 seconds (10 minutes).", - "type": "integer", - "format": "int32" - }, - "ackIds": { - "description": "List of acknowledgment IDs.", + "receivedMessages": { "type": "array", "items": { - "type": "string" - } + "$ref": "ReceivedMessage" + }, + "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if\nthere are no more available in the backlog. The Pub/Sub system may return\nfewer than the `maxMessages` requested even if there are more messages\navailable in the backlog." } }, - "id": "ModifyAckDeadlineRequest" + "id": "PullResponse" }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", + "ReceivedMessage": { "type": "object", "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "array", - "items": { - "type": "string" - } + "message": { + "description": "The message.", + "$ref": "PubsubMessage" + }, + "ackId": { + "description": "This ID can be used to acknowledge the received message.", + "type": "string" } }, - "id": "TestIamPermissionsRequest" + "id": "ReceivedMessage", + "description": "A message and its corresponding acknowledgment ID." }, "PushConfig": { "description": "Configuration for a push delivery endpoint.", "type": "object", "properties": { + "pushEndpoint": { + "type": "string", + "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\"." + }, "attributes": { - "description": "Endpoint configuration attributes.\n\nEvery endpoint has a set of API supported attributes that can be used to\ncontrol different aspects of the message delivery.\n\nThe currently supported attribute is `x-goog-version`, which you can\nuse to change the format of the push message. This attribute\nindicates the version of the data expected by the endpoint. This\ncontrols the shape of the envelope (i.e. its fields and metadata).\nThe endpoint version is based on the version of the Pub/Sub\nAPI.\n\nIf not present during the `CreateSubscription` call, it will default to\nthe version of the API used to make such call. If not present during a\n`ModifyPushConfig` call, its value will not be changed. `GetSubscription`\ncalls will always return a valid version, even if the subscription was\ncreated without this attribute.\n\nThe possible values for this attribute are:\n\n* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.\n* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", "additionalProperties": { "type": "string" }, - "type": "object" - }, - "pushEndpoint": { - "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", - "type": "string" + "description": "Endpoint configuration attributes.\n\nEvery endpoint has a set of API supported attributes that can be used to\ncontrol different aspects of the message delivery.\n\nThe currently supported attribute is `x-goog-version`, which you can\nuse to change the format of the push message. This attribute\nindicates the version of the data expected by the endpoint. This\ncontrols the shape of the envelope (i.e. its fields and metadata).\nThe endpoint version is based on the version of the Pub/Sub\nAPI.\n\nIf not present during the `CreateSubscription` call, it will default to\nthe version of the API used to make such call. If not present during a\n`ModifyPushConfig` call, its value will not be changed. `GetSubscription`\ncalls will always return a valid version, even if the subscription was\ncreated without this attribute.\n\nThe possible values for this attribute are:\n\n* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.\n* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", + "type": "object" } }, "id": "PushConfig" }, + "TestIamPermissionsResponse": { + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse", + "description": "Response message for `TestIamPermissions` method." + }, "PullRequest": { + "id": "PullRequest", "description": "Request for the `Pull` method.", "type": "object", "properties": { + "maxMessages": { + "description": "The maximum number of messages returned for this request. The Pub/Sub\nsystem may return fewer than the number specified.", + "format": "int32", + "type": "integer" + }, "returnImmediately": { "description": "If this field set to true, the system will respond immediately even if\nit there are no messages available to return in the `Pull` response.\nOtherwise, the system may wait (for a bounded amount of time) until at\nleast one message is available, rather than returning no messages. The\nclient may cancel the request if it does not wish to wait any longer for\nthe response.", "type": "boolean" + } + } + }, + "ListSubscriptionsResponse": { + "type": "object", + "properties": { + "nextPageToken": { + "type": "string", + "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListSubscriptionsRequest` to get more subscriptions." }, - "maxMessages": { - "description": "The maximum number of messages returned for this request. The Pub/Sub\nsystem may return fewer than the number specified.", - "type": "integer", - "format": "int32" + "subscriptions": { + "description": "The subscriptions that match the request.", + "type": "array", + "items": { + "$ref": "Subscription" + } } }, - "id": "PullRequest" + "id": "ListSubscriptionsResponse", + "description": "Response for the `ListSubscriptions` method." }, - "ModifyPushConfigRequest": { - "description": "Request for the ModifyPushConfig method.", + "PublishRequest": { + "description": "Request for the Publish method.", "type": "object", "properties": { - "pushConfig": { - "description": "The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` is not called.", - "$ref": "PushConfig" + "messages": { + "description": "The messages to publish.", + "type": "array", + "items": { + "$ref": "PubsubMessage" + } } }, - "id": "ModifyPushConfigRequest" + "id": "PublishRequest" }, - "PullResponse": { - "description": "Response for the `Pull` method.", - "type": "object", + "PublishResponse": { "properties": { - "receivedMessages": { - "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if\nthere are no more available in the backlog. The Pub/Sub system may return\nfewer than the `maxMessages` requested even if there are more messages\navailable in the backlog.", + "messageIds": { + "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", "type": "array", "items": { - "$ref": "ReceivedMessage" + "type": "string" } } }, - "id": "PullResponse" + "id": "PublishResponse", + "description": "Response for the `Publish` method.", + "type": "object" }, - "PubsubMessage": { - "description": "A message data and its attributes. The message payload must not be empty;\nit must contain either a non-empty data field, or at least one attribute.", - "type": "object", + "Subscription": { "properties": { - "data": { - "description": "The message payload.", - "type": "string", - "format": "byte" + "pushConfig": { + "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods.", + "$ref": "PushConfig" }, - "attributes": { - "description": "Optional attributes for this message.", - "additionalProperties": { - "type": "string" - }, - "type": "object" + "ackDeadlineSeconds": { + "description": "This value is the maximum time after a subscriber receives a message\nbefore the subscriber should acknowledge the message. After message\ndelivery but before the ack deadline expires and before the message is\nacknowledged, it is an outstanding message and will not be delivered\nagain during that time (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using\npull.\nThe minimum custom deadline you can specify is 10 seconds.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\nIf this parameter is 0, a default value of 10 seconds is used.\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.", + "format": "int32", + "type": "integer" }, - "messageId": { - "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call.", + "name": { + "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", "type": "string" }, - "publishTime": { - "description": "The time at which the message was published, populated by the server when\nit receives the `Publish` call. It must not be populated by the\npublisher in a `Publish` call.", - "type": "string", - "format": "google-datetime" + "topic": { + "description": "The name of the topic from which this subscription is receiving messages.\nFormat is `projects/{project}/topics/{topic}`.\nThe value of this field will be `_deleted-topic_` if the topic has been\ndeleted.", + "type": "string" } }, - "id": "PubsubMessage" + "id": "Subscription", + "description": "A subscription resource.", + "type": "object" }, - "AcknowledgeRequest": { - "description": "Request for the Acknowledge method.", + "TestIamPermissionsRequest": { "type": "object", "properties": { - "ackIds": { - "description": "The acknowledgment ID for the messages being acknowledged that was returned\nby the Pub/Sub system in the `Pull` response. Must not be empty.", + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "type": "array", "items": { "type": "string" } } }, - "id": "AcknowledgeRequest" + "id": "TestIamPermissionsRequest", + "description": "Request message for `TestIamPermissions` method." }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "Topic": { + "id": "Topic", + "description": "A topic resource.", "type": "object", - "properties": {}, - "id": "Empty" + "properties": { + "name": { + "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "type": "string" + } + } }, - "ListTopicsResponse": { - "description": "Response for the `ListTopics` method.", - "type": "object", + "Policy": { "properties": { - "nextPageToken": { - "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`.", + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", "type": "string" }, - "topics": { - "description": "The resulting topics.", + "version": { + "type": "integer", + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", "type": "array", "items": { - "$ref": "Topic" + "$ref": "Binding" } } }, - "id": "ListTopicsResponse" + "id": "Policy", + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "type": "object" }, - "Binding": { - "description": "Associates `members` with a `role`.", + "ModifyAckDeadlineRequest": { + "description": "Request for the ModifyAckDeadline method.", "type": "object", "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "ackDeadlineSeconds": { + "description": "The new ack deadline with respect to the time this request was sent to\nthe Pub/Sub system. For example, if the value is 10, the new\nack deadline will expire 10 seconds after the `ModifyAckDeadline` call\nwas made. Specifying zero may immediately make the message available for\nanother pull request.\nThe minimum deadline you can specify is 0 seconds.\nThe maximum deadline you can specify is 600 seconds (10 minutes).", + "format": "int32", + "type": "integer" + }, + "ackIds": { + "description": "List of acknowledgment IDs.", "type": "array", "items": { "type": "string" } - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" } }, - "id": "Binding" + "id": "ModifyAckDeadlineRequest" + }, + "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "type": "object", + "properties": { + "policy": { + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", + "$ref": "Policy" + } + }, + "id": "SetIamPolicyRequest" } }, - "revision": "20170105", - "basePath": "", "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, - "discoveryVersion": "v1", + "protocol": "rest", + "version": "v1", "baseUrl": "https://pubsub.googleapis.com/", - "name": "pubsub", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/pubsub": { + "description": "View and manage Pub/Sub topics and subscriptions" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } }, - "documentationLink": "https://cloud.google.com/pubsub/docs", - "ownerDomain": "google.com", - "batchPath": "batch", + "kind": "discovery#restDescription", + "description": "Provides reliable, many-to-many, asynchronous messaging between applications.\n", "servicePath": "", - "ownerName": "Google", - "version": "v1", - "rootUrl": "https://pubsub.googleapis.com/", - "kind": "discovery#restDescription" + "rootUrl": "https://pubsub.googleapis.com/" } diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go index c2cb87cd0..6b2d92366 100644 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Snapshots = NewProjectsSnapshotsService(s) @@ -1120,6 +1125,7 @@ func (c *ProjectsSnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1253,6 +1259,7 @@ func (c *ProjectsSnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -1393,6 +1400,7 @@ func (c *ProjectsSnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -1539,6 +1547,7 @@ func (c *ProjectsSubscriptionsAcknowledgeCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest) if err != nil { @@ -1689,6 +1698,7 @@ func (c *ProjectsSubscriptionsCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { @@ -1831,6 +1841,7 @@ func (c *ProjectsSubscriptionsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}") @@ -1968,6 +1979,7 @@ func (c *ProjectsSubscriptionsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2111,6 +2123,7 @@ func (c *ProjectsSubscriptionsGetIamPolicyCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2268,6 +2281,7 @@ func (c *ProjectsSubscriptionsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2439,6 +2453,7 @@ func (c *ProjectsSubscriptionsModifyAckDeadlineCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest) if err != nil { @@ -2585,6 +2600,7 @@ func (c *ProjectsSubscriptionsModifyPushConfigCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest) if err != nil { @@ -2727,6 +2743,7 @@ func (c *ProjectsSubscriptionsPullCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest) if err != nil { @@ -2865,6 +2882,7 @@ func (c *ProjectsSubscriptionsSetIamPolicyCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -3005,6 +3023,7 @@ func (c *ProjectsSubscriptionsTestIamPermissionsCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -3141,6 +3160,7 @@ func (c *ProjectsTopicsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic) if err != nil { @@ -3283,6 +3303,7 @@ func (c *ProjectsTopicsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}") @@ -3420,6 +3441,7 @@ func (c *ProjectsTopicsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3563,6 +3585,7 @@ func (c *ProjectsTopicsGetIamPolicyCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3720,6 +3743,7 @@ func (c *ProjectsTopicsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3887,6 +3911,7 @@ func (c *ProjectsTopicsPublishCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest) if err != nil { @@ -4025,6 +4050,7 @@ func (c *ProjectsTopicsSetIamPolicyCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -4165,6 +4191,7 @@ func (c *ProjectsTopicsTestIamPermissionsCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -4327,6 +4354,7 @@ func (c *ProjectsTopicsSubscriptionsListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-api.json index 8bf3b7eae..9092e4089 100644 --- a/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-api.json +++ b/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-api.json @@ -1,460 +1,544 @@ { - "id": "pubsub:v1beta1a", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/pubsub": { - "description": "View and manage Pub/Sub topics and subscriptions" - } - } - } - }, - "description": "Provides reliable, many-to-many, asynchronous messaging between applications.\n", - "protocol": "rest", - "title": "Google Cloud Pub/Sub API", + "discoveryVersion": "v1", + "ownerName": "Google", "resources": { - "subscriptions": { + "topics": { "methods": { - "modifyPushConfig": { - "id": "pubsub.subscriptions.modifyPushConfig", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [], - "description": "Modifies the \u003ccode\u003ePushConfig\u003c/code\u003e for a specified subscription.\nThis method can be used to suspend the flow of messages to an endpoint\nby clearing the \u003ccode\u003ePushConfig\u003c/code\u003e field in the request. Messages\nwill be accumulated for delivery even if no push configuration is\ndefined or while the configuration is modified.", + "publish": { + "description": "Adds a message to the topic. Returns NOT_FOUND if the topic does not\nexist.", "request": { - "$ref": "ModifyPushConfigRequest" + "$ref": "PublishRequest" }, - "flatPath": "v1beta1a/subscriptions/modifyPushConfig", "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1a/subscriptions/modifyPushConfig", + "parameterOrder": [], + "response": { + "$ref": "Empty" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "parameters": {}, + "flatPath": "v1beta1a/topics/publish", + "id": "pubsub.topics.publish", + "path": "v1beta1a/topics/publish" }, - "pull": { - "id": "pubsub.subscriptions.pull", + "delete": { + "flatPath": "v1beta1a/topics/{topicsId}", + "path": "v1beta1a/topics/{+topic}", + "id": "pubsub.topics.delete", + "description": "Deletes the topic with the given name. Returns NOT_FOUND if the topic does\nnot exist. After a topic is deleted, a new topic may be created with the\nsame name.", "response": { - "$ref": "PullResponse" + "$ref": "Empty" }, - "parameterOrder": [], - "description": "Pulls a single message from the server.\nIf return_immediately is true, and no messages are available in the\nsubscription, this method returns FAILED_PRECONDITION. The system is free\nto return an UNAVAILABLE error if no messages are available in a\nreasonable amount of time (to reduce system load).", + "parameterOrder": [ + "topic" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "topic": { + "description": "Name of the topic to delete.", + "required": true, + "type": "string", + "pattern": "^.+$", + "location": "path" + } + } + }, + "publishBatch": { + "flatPath": "v1beta1a/topics/publishBatch", + "path": "v1beta1a/topics/publishBatch", + "id": "pubsub.topics.publishBatch", "request": { - "$ref": "PullRequest" + "$ref": "PublishBatchRequest" }, - "flatPath": "v1beta1a/subscriptions/pull", + "description": "Adds one or more messages to the topic. Returns NOT_FOUND if the topic does\nnot exist.", + "response": { + "$ref": "PublishBatchResponse" + }, + "parameterOrder": [], "httpMethod": "POST", "parameters": {}, - "path": "v1beta1a/subscriptions/pull", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "list": { - "id": "pubsub.subscriptions.list", "response": { - "$ref": "ListSubscriptionsResponse" + "$ref": "ListTopicsResponse" }, "parameterOrder": [], - "description": "Lists matching subscriptions.", - "flatPath": "v1beta1a/subscriptions", "httpMethod": "GET", "parameters": { + "pageToken": { + "description": "The value obtained in the last \u003ccode\u003eListTopicsResponse\u003c/code\u003e\nfor continuation.", + "type": "string", + "location": "query" + }, "query": { - "description": "A valid label query expression.", "location": "query", + "description": "A valid label query expression.", "type": "string" }, "maxResults": { - "description": "Maximum number of subscriptions to return.", - "location": "query", + "description": "Maximum number of topics to return.", + "format": "int32", "type": "integer", - "format": "int32" - }, - "pageToken": { - "description": "The value obtained in the last \u003ccode\u003eListSubscriptionsResponse\u003c/code\u003e\nfor continuation.", - "location": "query", - "type": "string" + "location": "query" } }, - "path": "v1beta1a/subscriptions", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "get": { - "id": "pubsub.subscriptions.get", - "response": { - "$ref": "Subscription" - }, - "parameterOrder": [ - "subscription" ], - "description": "Gets the configuration details of a subscription.", - "flatPath": "v1beta1a/subscriptions/{subscriptionsId}", - "httpMethod": "GET", - "parameters": { - "subscription": { - "description": "The name of the subscription to get.", - "required": true, - "pattern": "^.+$", - "location": "path", - "type": "string" - } - }, - "path": "v1beta1a/subscriptions/{+subscription}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1beta1a/topics", + "path": "v1beta1a/topics", + "id": "pubsub.topics.list", + "description": "Lists matching topics." }, - "pullBatch": { - "id": "pubsub.subscriptions.pullBatch", - "response": { - "$ref": "PullBatchResponse" - }, - "parameterOrder": [], - "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The system is free to return UNAVAILABLE\nif there are too many pull requests outstanding for the given subscription.", + "create": { + "description": "Creates the given topic with the given name.", "request": { - "$ref": "PullBatchRequest" + "$ref": "Topic" }, - "flatPath": "v1beta1a/subscriptions/pullBatch", "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1a/subscriptions/pullBatch", + "parameterOrder": [], + "response": { + "$ref": "Topic" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "parameters": {}, + "flatPath": "v1beta1a/topics", + "id": "pubsub.topics.create", + "path": "v1beta1a/topics" }, - "create": { - "id": "pubsub.subscriptions.create", + "get": { + "description": "Gets the configuration of a topic. Since the topic only has the name\nattribute, this method is only useful to check the existence of a topic.\nIf other attributes are added in the future, they will be returned here.", "response": { - "$ref": "Subscription" - }, - "parameterOrder": [], - "description": "Creates a subscription on a given topic for a given subscriber.\nIf the subscription already exists, returns ALREADY_EXISTS.\nIf the corresponding topic doesn't exist, returns NOT_FOUND.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic.", - "request": { - "$ref": "Subscription" + "$ref": "Topic" }, - "flatPath": "v1beta1a/subscriptions", - "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1a/subscriptions", + "parameterOrder": [ + "topic" + ], + "httpMethod": "GET", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, + ], + "parameters": { + "topic": { + "location": "path", + "description": "The name of the topic to get.", + "required": true, + "type": "string", + "pattern": "^.+$" + } + }, + "flatPath": "v1beta1a/topics/{topicsId}", + "path": "v1beta1a/topics/{+topic}", + "id": "pubsub.topics.get" + } + } + }, + "subscriptions": { + "methods": { "modifyAckDeadline": { - "id": "pubsub.subscriptions.modifyAckDeadline", "response": { "$ref": "Empty" }, "parameterOrder": [], - "description": "Modifies the Ack deadline for a message received from a pull request.", - "request": { - "$ref": "ModifyAckDeadlineRequest" - }, - "flatPath": "v1beta1a/subscriptions/modifyAckDeadline", "httpMethod": "POST", "parameters": {}, - "path": "v1beta1a/subscriptions/modifyAckDeadline", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "delete": { - "id": "pubsub.subscriptions.delete", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" ], - "description": "Deletes an existing subscription. All pending messages in the subscription\nare immediately dropped. Calls to Pull after deletion will return\nNOT_FOUND.", - "flatPath": "v1beta1a/subscriptions/{subscriptionsId}", - "httpMethod": "DELETE", - "parameters": { - "subscription": { - "description": "The subscription to delete.", - "required": true, - "pattern": "^.+$", - "location": "path", - "type": "string" - } + "flatPath": "v1beta1a/subscriptions/modifyAckDeadline", + "path": "v1beta1a/subscriptions/modifyAckDeadline", + "id": "pubsub.subscriptions.modifyAckDeadline", + "request": { + "$ref": "ModifyAckDeadlineRequest" }, - "path": "v1beta1a/subscriptions/{+subscription}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "description": "Modifies the Ack deadline for a message received from a pull request." }, "acknowledge": { + "flatPath": "v1beta1a/subscriptions/acknowledge", + "path": "v1beta1a/subscriptions/acknowledge", "id": "pubsub.subscriptions.acknowledge", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [], "description": "Acknowledges a particular received message: the Pub/Sub system can remove\nthe given message from the subscription. Acknowledging a message whose\nAck deadline has expired may succeed, but the message could have been\nalready redelivered. Acknowledging a message more than once will not\nresult in an error. This is only used for messages received via pull.", "request": { "$ref": "AcknowledgeRequest" }, - "flatPath": "v1beta1a/subscriptions/acknowledge", - "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1a/subscriptions/acknowledge", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - } - } - }, - "topics": { - "methods": { - "publish": { - "id": "pubsub.topics.publish", "response": { "$ref": "Empty" }, "parameterOrder": [], - "description": "Adds a message to the topic. Returns NOT_FOUND if the topic does not\nexist.", - "request": { - "$ref": "PublishRequest" - }, - "flatPath": "v1beta1a/topics/publish", "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1a/topics/publish", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "parameters": {} }, - "list": { - "id": "pubsub.topics.list", - "response": { - "$ref": "ListTopicsResponse" + "pullBatch": { + "id": "pubsub.subscriptions.pullBatch", + "path": "v1beta1a/subscriptions/pullBatch", + "request": { + "$ref": "PullBatchRequest" }, + "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The system is free to return UNAVAILABLE\nif there are too many pull requests outstanding for the given subscription.", + "httpMethod": "POST", "parameterOrder": [], - "description": "Lists matching topics.", - "flatPath": "v1beta1a/topics", - "httpMethod": "GET", - "parameters": { - "query": { - "description": "A valid label query expression.", - "location": "query", - "type": "string" - }, - "maxResults": { - "description": "Maximum number of topics to return.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "pageToken": { - "description": "The value obtained in the last \u003ccode\u003eListTopicsResponse\u003c/code\u003e\nfor continuation.", - "location": "query", - "type": "string" - } + "response": { + "$ref": "PullBatchResponse" }, - "path": "v1beta1a/topics", + "parameters": {}, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta1a/subscriptions/pullBatch" }, "get": { - "id": "pubsub.topics.get", + "flatPath": "v1beta1a/subscriptions/{subscriptionsId}", + "id": "pubsub.subscriptions.get", + "path": "v1beta1a/subscriptions/{+subscription}", + "description": "Gets the configuration details of a subscription.", + "httpMethod": "GET", + "parameterOrder": [ + "subscription" + ], "response": { - "$ref": "Topic" + "$ref": "Subscription" }, - "parameterOrder": [ - "topic" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" ], - "description": "Gets the configuration of a topic. Since the topic only has the name\nattribute, this method is only useful to check the existence of a topic.\nIf other attributes are added in the future, they will be returned here.", - "flatPath": "v1beta1a/topics/{topicsId}", - "httpMethod": "GET", "parameters": { - "topic": { - "description": "The name of the topic to get.", + "subscription": { "required": true, + "type": "string", "pattern": "^.+$", "location": "path", - "type": "string" + "description": "The name of the subscription to get." } - }, - "path": "v1beta1a/topics/{+topic}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + } }, - "publishBatch": { - "id": "pubsub.topics.publishBatch", + "modifyPushConfig": { + "flatPath": "v1beta1a/subscriptions/modifyPushConfig", + "path": "v1beta1a/subscriptions/modifyPushConfig", + "id": "pubsub.subscriptions.modifyPushConfig", + "request": { + "$ref": "ModifyPushConfigRequest" + }, + "description": "Modifies the \u003ccode\u003ePushConfig\u003c/code\u003e for a specified subscription.\nThis method can be used to suspend the flow of messages to an endpoint\nby clearing the \u003ccode\u003ePushConfig\u003c/code\u003e field in the request. Messages\nwill be accumulated for delivery even if no push configuration is\ndefined or while the configuration is modified.", "response": { - "$ref": "PublishBatchResponse" + "$ref": "Empty" }, "parameterOrder": [], - "description": "Adds one or more messages to the topic. Returns NOT_FOUND if the topic does\nnot exist.", - "request": { - "$ref": "PublishBatchRequest" - }, - "flatPath": "v1beta1a/topics/publishBatch", "httpMethod": "POST", "parameters": {}, - "path": "v1beta1a/topics/publishBatch", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, - "create": { - "id": "pubsub.topics.create", - "response": { - "$ref": "Topic" - }, + "pull": { + "httpMethod": "POST", "parameterOrder": [], - "description": "Creates the given topic with the given name.", - "request": { - "$ref": "Topic" + "response": { + "$ref": "PullResponse" }, - "flatPath": "v1beta1a/topics", - "httpMethod": "POST", "parameters": {}, - "path": "v1beta1a/topics", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta1a/subscriptions/pull", + "id": "pubsub.subscriptions.pull", + "path": "v1beta1a/subscriptions/pull", + "request": { + "$ref": "PullRequest" + }, + "description": "Pulls a single message from the server.\nIf return_immediately is true, and no messages are available in the\nsubscription, this method returns FAILED_PRECONDITION. The system is free\nto return an UNAVAILABLE error if no messages are available in a\nreasonable amount of time (to reduce system load)." }, "delete": { - "id": "pubsub.topics.delete", + "parameterOrder": [ + "subscription" + ], "response": { "$ref": "Empty" }, - "parameterOrder": [ - "topic" + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "subscription": { + "description": "The subscription to delete.", + "required": true, + "type": "string", + "pattern": "^.+$", + "location": "path" + } + }, + "flatPath": "v1beta1a/subscriptions/{subscriptionsId}", + "id": "pubsub.subscriptions.delete", + "path": "v1beta1a/subscriptions/{+subscription}", + "description": "Deletes an existing subscription. All pending messages in the subscription\nare immediately dropped. Calls to Pull after deletion will return\nNOT_FOUND." + }, + "list": { + "flatPath": "v1beta1a/subscriptions", + "id": "pubsub.subscriptions.list", + "path": "v1beta1a/subscriptions", + "description": "Lists matching subscriptions.", + "httpMethod": "GET", + "response": { + "$ref": "ListSubscriptionsResponse" + }, + "parameterOrder": [], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" ], - "description": "Deletes the topic with the given name. Returns NOT_FOUND if the topic does\nnot exist. After a topic is deleted, a new topic may be created with the\nsame name.", - "flatPath": "v1beta1a/topics/{topicsId}", - "httpMethod": "DELETE", "parameters": { - "topic": { - "description": "Name of the topic to delete.", - "required": true, - "pattern": "^.+$", - "location": "path", + "pageToken": { + "description": "The value obtained in the last \u003ccode\u003eListSubscriptionsResponse\u003c/code\u003e\nfor continuation.", + "type": "string", + "location": "query" + }, + "query": { + "location": "query", + "description": "A valid label query expression.", "type": "string" + }, + "maxResults": { + "description": "Maximum number of subscriptions to return.", + "format": "int32", + "type": "integer", + "location": "query" } + } + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "Subscription" }, - "path": "v1beta1a/topics/{+topic}", + "parameters": {}, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta1a/subscriptions", + "id": "pubsub.subscriptions.create", + "path": "v1beta1a/subscriptions", + "request": { + "$ref": "Subscription" + }, + "description": "Creates a subscription on a given topic for a given subscriber.\nIf the subscription already exists, returns ALREADY_EXISTS.\nIf the corresponding topic doesn't exist, returns NOT_FOUND.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic." } } } }, + "parameters": { + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "alt": { + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "access_token": { + "type": "string", + "location": "query", + "description": "OAuth access token." + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + } + }, "schemas": { - "PullBatchResponse": { - "description": "Response for the PullBatch method.", + "PushConfig": { + "description": "Configuration for a push delivery endpoint.", "type": "object", "properties": { - "pullResponses": { - "description": "Received Pub/Sub messages or status events. The Pub/Sub system will return\nzero messages if there are no more messages available in the backlog. The\nPub/Sub system may return fewer than the max_events requested even if\nthere are more messages available in the backlog.", - "type": "array", - "items": { - "$ref": "PullResponse" - } + "pushEndpoint": { + "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", + "type": "string" } }, - "id": "PullBatchResponse" + "id": "PushConfig" }, - "Topic": { - "description": "A topic resource.", + "PullRequest": { + "description": "Request for the Pull method.", "type": "object", "properties": { - "name": { - "description": "Name of the topic.", - "type": "string" + "subscription": { + "type": "string", + "description": "The subscription from which a message should be pulled." + }, + "returnImmediately": { + "description": "If this is specified as true the system will respond immediately even if\nit is not able to return a message in the Pull response. Otherwise the\nsystem is allowed to wait until at least one message is available rather\nthan returning FAILED_PRECONDITION. The client may cancel the request if\nit does not wish to wait any longer for the response.", + "type": "boolean" } }, - "id": "Topic" + "id": "PullRequest" }, - "PublishBatchResponse": { - "description": "Response for the PublishBatch method.", + "ListSubscriptionsResponse": { + "description": "Response for the ListSubscriptions method.", "type": "object", "properties": { - "messageIds": { - "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", + "subscription": { + "description": "The subscriptions that match the request.", "type": "array", "items": { - "type": "string" + "$ref": "Subscription" } + }, + "nextPageToken": { + "description": "If not empty, indicates that there are more subscriptions that match the\nrequest and this value should be passed to the next\n\u003ccode\u003eListSubscriptionsRequest\u003c/code\u003e to continue.", + "type": "string" } }, - "id": "PublishBatchResponse" + "id": "ListSubscriptionsResponse" }, - "PublishRequest": { - "description": "Request for the Publish method.", + "PubsubEvent": { + "description": "An event indicating a received message or truncation event.", "type": "object", "properties": { - "topic": { - "description": "The message in the request will be published on this topic.", - "type": "string" + "truncated": { + "description": "Indicates that this subscription has been truncated.", + "type": "boolean" + }, + "deleted": { + "description": "Indicates that this subscription has been deleted. (Note that pull\nsubscribers will always receive NOT_FOUND in response in their pull\nrequest on the subscription, rather than seeing this boolean.)", + "type": "boolean" }, "message": { - "description": "The message to publish.", - "$ref": "PubsubMessage" + "$ref": "PubsubMessage", + "description": "A received message." + }, + "subscription": { + "description": "The subscription that received the event.", + "type": "string" } }, - "id": "PublishRequest" + "id": "PubsubEvent" }, - "ListSubscriptionsResponse": { - "description": "Response for the ListSubscriptions method.", + "PublishRequest": { + "description": "Request for the Publish method.", "type": "object", "properties": { - "nextPageToken": { - "description": "If not empty, indicates that there are more subscriptions that match the\nrequest and this value should be passed to the next\n\u003ccode\u003eListSubscriptionsRequest\u003c/code\u003e to continue.", + "topic": { + "description": "The message in the request will be published on this topic.", "type": "string" }, - "subscription": { - "description": "The subscriptions that match the request.", - "type": "array", - "items": { - "$ref": "Subscription" - } + "message": { + "$ref": "PubsubMessage", + "description": "The message to publish." } }, - "id": "ListSubscriptionsResponse" + "id": "PublishRequest" }, "Subscription": { "description": "A subscription resource.", "type": "object", "properties": { - "pushConfig": { - "description": "If push delivery is used with this subscription, this field is\nused to configure it.", - "$ref": "PushConfig" - }, "topic": { - "description": "The name of the topic from which this subscription is receiving messages.", - "type": "string" + "type": "string", + "description": "The name of the topic from which this subscription is receiving messages." + }, + "pushConfig": { + "$ref": "PushConfig", + "description": "If push delivery is used with this subscription, this field is\nused to configure it." }, "ackDeadlineSeconds": { "description": "For either push or pull delivery, the value is the maximum time after a\nsubscriber receives a message before the subscriber should acknowledge or\nNack the message. If the Ack deadline for a message passes without an\nAck or a Nack, the Pub/Sub system will eventually redeliver the message.\nIf a subscriber acknowledges after the deadline, the Pub/Sub system may\naccept the Ack, but it is possible that the message has been already\ndelivered again. Multiple Acks to the message are allowed and will\nsucceed.\n\nFor push delivery, this value is used to set the request timeout for\nthe call to the push endpoint.\n\nFor pull delivery, this value is used as the initial value for the Ack\ndeadline. It may be overridden for each message using its corresponding\nack_id with \u003ccode\u003eModifyAckDeadline\u003c/code\u003e.\nWhile a message is outstanding (i.e. it has been delivered to a pull\nsubscriber and the subscriber has not yet Acked or Nacked), the Pub/Sub\nsystem will not deliver that message to another pull subscriber\n(on a best-effort basis).", - "type": "integer", - "format": "int32" + "format": "int32", + "type": "integer" }, "name": { "description": "Name of the subscription.", @@ -463,6 +547,31 @@ }, "id": "Subscription" }, + "PublishBatchResponse": { + "description": "Response for the PublishBatch method.", + "type": "object", + "properties": { + "messageIds": { + "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "PublishBatchResponse" + }, + "Topic": { + "id": "Topic", + "description": "A topic resource.", + "type": "object", + "properties": { + "name": { + "description": "Name of the topic.", + "type": "string" + } + } + }, "Label": { "description": "A key-value pair applied to a given object.", "type": "object", @@ -471,14 +580,14 @@ "description": "A string value.", "type": "string" }, + "numValue": { + "description": "An integer value.", + "format": "int64", + "type": "string" + }, "key": { "description": "The key of a label is a syntactically valid URL (as per RFC 1738) with\nthe \"scheme\" and initial slashes omitted and with the additional\nrestrictions noted below. Each key should be globally unique. The\n\"host\" portion is called the \"namespace\" and is not necessarily\nresolvable to a network endpoint. Instead, the namespace indicates what\nsystem or entity defines the semantics of the label. Namespaces do not\nrestrict the set of objects to which a label may be associated.\n\nKeys are defined by the following grammar:\n\n key = hostname \"/\" kpath\n kpath = ksegment *[ \"/\" ksegment ]\n ksegment = alphadigit | *[ alphadigit | \"-\" | \"_\" | \".\" ]\n\nwhere \"hostname\" and \"alphadigit\" are defined as in RFC 1738.\n\nExample key:\n spanner.google.com/universe", "type": "string" - }, - "numValue": { - "description": "An integer value.", - "type": "string", - "format": "int64" } }, "id": "Label" @@ -487,18 +596,14 @@ "description": "Request for the ModifyAckDeadline method.", "type": "object", "properties": { - "ackDeadlineSeconds": { - "description": "The new ack deadline with respect to the time this request was sent to the\nPub/Sub system. Must be \u003e= 0. For example, if the value is 10, the new ack\ndeadline will expire 10 seconds after the ModifyAckDeadline call was made.\nSpecifying zero may immediately make the message available for another pull\nrequest.", - "type": "integer", - "format": "int32" - }, "ackId": { "description": "The acknowledgment ID. Either this or ack_ids must be populated,\nnot both.", "type": "string" }, - "subscription": { - "description": "Next Index: 5\nThe name of the subscription from which messages are being pulled.", - "type": "string" + "ackDeadlineSeconds": { + "description": "The new ack deadline with respect to the time this request was sent to the\nPub/Sub system. Must be \u003e= 0. For example, if the value is 10, the new ack\ndeadline will expire 10 seconds after the ModifyAckDeadline call was made.\nSpecifying zero may immediately make the message available for another pull\nrequest.", + "format": "int32", + "type": "integer" }, "ackIds": { "description": "List of acknowledgment IDs. Either this field or ack_id\nshould be populated, not both.", @@ -506,35 +611,33 @@ "items": { "type": "string" } + }, + "subscription": { + "description": "Next Index: 5\nThe name of the subscription from which messages are being pulled.", + "type": "string" } }, "id": "ModifyAckDeadlineRequest" }, - "PushConfig": { - "description": "Configuration for a push delivery endpoint.", - "type": "object", + "PullBatchRequest": { "properties": { - "pushEndpoint": { - "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", + "subscription": { + "description": "The subscription from which messages should be pulled.", "type": "string" - } - }, - "id": "PushConfig" - }, - "PullRequest": { - "description": "Request for the Pull method.", - "type": "object", - "properties": { + }, "returnImmediately": { - "description": "If this is specified as true the system will respond immediately even if\nit is not able to return a message in the Pull response. Otherwise the\nsystem is allowed to wait until at least one message is available rather\nthan returning FAILED_PRECONDITION. The client may cancel the request if\nit does not wish to wait any longer for the response.", + "description": "If this is specified as true the system will respond immediately even if\nit is not able to return a message in the Pull response. Otherwise the\nsystem is allowed to wait until at least one message is available rather\nthan returning no messages. The client may cancel the request if it does\nnot wish to wait any longer for the response.", "type": "boolean" }, - "subscription": { - "description": "The subscription from which a message should be pulled.", - "type": "string" + "maxEvents": { + "description": "The maximum number of PubsubEvents returned for this request. The Pub/Sub\nsystem may return fewer than the number of events specified.", + "format": "int32", + "type": "integer" } }, - "id": "PullRequest" + "id": "PullBatchRequest", + "description": "Request for the PullBatch method.", + "type": "object" }, "ModifyPushConfigRequest": { "description": "Request for the ModifyPushConfig method.", @@ -551,256 +654,153 @@ }, "id": "ModifyPushConfigRequest" }, - "PullResponse": { - "description": "Either a \u003ccode\u003ePubsubMessage\u003c/code\u003e or a truncation event. One of these two\nmust be populated.", - "type": "object", - "properties": { - "pubsubEvent": { - "description": "A pubsub message or truncation event.", - "$ref": "PubsubEvent" - }, - "ackId": { - "description": "This ID must be used to acknowledge the received event or message.", - "type": "string" - } - }, - "id": "PullResponse" - }, "PubsubMessage": { + "id": "PubsubMessage", "description": "A message data and its labels.", "type": "object", "properties": { - "data": { - "description": "The message payload.", - "type": "string", - "format": "byte" - }, "messageId": { "description": "ID of this message assigned by the server at publication time. Guaranteed\nto be unique within the topic. This value may be read by a subscriber\nthat receives a PubsubMessage via a Pull call or a push delivery. It must\nnot be populated by a publisher in a Publish call.", "type": "string" }, - "publishTime": { - "description": "The time at which the message was published.\nThe time is milliseconds since the UNIX epoch.", - "type": "string", - "format": "int64" - }, "label": { "description": "Optional list of labels for this message. Keys in this collection must\nbe unique.", "type": "array", "items": { "$ref": "Label" } + }, + "publishTime": { + "description": "The time at which the message was published.\nThe time is milliseconds since the UNIX epoch.", + "format": "int64", + "type": "string" + }, + "data": { + "description": "The message payload.", + "format": "byte", + "type": "string" } - }, - "id": "PubsubMessage" + } }, - "PublishBatchRequest": { - "description": "Request for the PublishBatch method.", + "AcknowledgeRequest": { + "description": "Request for the Acknowledge method.", "type": "object", "properties": { - "topic": { - "description": "The messages in the request will be published on this topic.", + "subscription": { + "description": "The subscription whose message is being acknowledged.", "type": "string" }, - "messages": { - "description": "The messages to publish.", + "ackId": { + "description": "The acknowledgment ID for the message being acknowledged. This was\nreturned by the Pub/Sub system in the Pull response.", "type": "array", "items": { - "$ref": "PubsubMessage" + "type": "string" } } }, - "id": "PublishBatchRequest" + "id": "AcknowledgeRequest" }, - "AcknowledgeRequest": { - "description": "Request for the Acknowledge method.", + "PullBatchResponse": { "type": "object", "properties": { - "ackId": { - "description": "The acknowledgment ID for the message being acknowledged. This was\nreturned by the Pub/Sub system in the Pull response.", + "pullResponses": { + "description": "Received Pub/Sub messages or status events. The Pub/Sub system will return\nzero messages if there are no more messages available in the backlog. The\nPub/Sub system may return fewer than the max_events requested even if\nthere are more messages available in the backlog.", "type": "array", "items": { - "type": "string" + "$ref": "PullResponse" } - }, - "subscription": { - "description": "The subscription whose message is being acknowledged.", - "type": "string" } }, - "id": "AcknowledgeRequest" + "id": "PullBatchResponse", + "description": "Response for the PullBatch method." }, "Empty": { - "description": "An empty message that you can re-use to avoid defining duplicated empty\nmessages in your project. A typical example is to use it as argument or the\nreturn value of a service API. For instance:\n\n service Foo {\n rpc Bar (proto2.Empty) returns (proto2.Empty) { };\n };\n\nBEGIN GOOGLE-INTERNAL\nThe difference between this one and net/rpc/empty-message.proto is that\n1) The generated message here is in proto2 C++ API.\n2) The proto2.Empty has minimum dependencies\n (no message_set or net/rpc dependencies)\nEND GOOGLE-INTERNAL", "type": "object", "properties": {}, - "id": "Empty" + "id": "Empty", + "description": "An empty message that you can re-use to avoid defining duplicated empty\nmessages in your project. A typical example is to use it as argument or the\nreturn value of a service API. For instance:\n\n service Foo {\n rpc Bar (proto2.Empty) returns (proto2.Empty) { };\n };\n\nBEGIN GOOGLE-INTERNAL\nThe difference between this one and net/rpc/empty-message.proto is that\n1) The generated message here is in proto2 C++ API.\n2) The proto2.Empty has minimum dependencies\n (no message_set or net/rpc dependencies)\nEND GOOGLE-INTERNAL" }, - "ListTopicsResponse": { - "description": "Response for the ListTopics method.", + "PublishBatchRequest": { + "description": "Request for the PublishBatch method.", "type": "object", "properties": { "topic": { - "description": "The resulting topics.", + "description": "The messages in the request will be published on this topic.", + "type": "string" + }, + "messages": { + "description": "The messages to publish.", "type": "array", "items": { - "$ref": "Topic" + "$ref": "PubsubMessage" } - }, - "nextPageToken": { - "description": "If not empty, indicates that there are more topics that match the request,\nand this value should be passed to the next \u003ccode\u003eListTopicsRequest\u003c/code\u003e\nto continue.", - "type": "string" } }, - "id": "ListTopicsResponse" + "id": "PublishBatchRequest" }, - "PubsubEvent": { - "description": "An event indicating a received message or truncation event.", - "type": "object", + "ListTopicsResponse": { "properties": { - "truncated": { - "description": "Indicates that this subscription has been truncated.", - "type": "boolean" - }, - "deleted": { - "description": "Indicates that this subscription has been deleted. (Note that pull\nsubscribers will always receive NOT_FOUND in response in their pull\nrequest on the subscription, rather than seeing this boolean.)", - "type": "boolean" + "topic": { + "type": "array", + "items": { + "$ref": "Topic" + }, + "description": "The resulting topics." }, - "subscription": { - "description": "The subscription that received the event.", + "nextPageToken": { + "description": "If not empty, indicates that there are more topics that match the request,\nand this value should be passed to the next \u003ccode\u003eListTopicsRequest\u003c/code\u003e\nto continue.", "type": "string" - }, - "message": { - "description": "A received message.", - "$ref": "PubsubMessage" } }, - "id": "PubsubEvent" + "id": "ListTopicsResponse", + "description": "Response for the ListTopics method.", + "type": "object" }, - "PullBatchRequest": { - "description": "Request for the PullBatch method.", + "PullResponse": { + "id": "PullResponse", + "description": "Either a \u003ccode\u003ePubsubMessage\u003c/code\u003e or a truncation event. One of these two\nmust be populated.", "type": "object", "properties": { - "maxEvents": { - "description": "The maximum number of PubsubEvents returned for this request. The Pub/Sub\nsystem may return fewer than the number of events specified.", - "type": "integer", - "format": "int32" - }, - "returnImmediately": { - "description": "If this is specified as true the system will respond immediately even if\nit is not able to return a message in the Pull response. Otherwise the\nsystem is allowed to wait until at least one message is available rather\nthan returning no messages. The client may cancel the request if it does\nnot wish to wait any longer for the response.", - "type": "boolean" - }, - "subscription": { - "description": "The subscription from which messages should be pulled.", + "ackId": { + "description": "This ID must be used to acknowledge the received event or message.", "type": "string" + }, + "pubsubEvent": { + "$ref": "PubsubEvent", + "description": "A pubsub message or truncation event." } - }, - "id": "PullBatchRequest" + } } }, - "revision": "20170105", - "basePath": "", "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, - "discoveryVersion": "v1", + "protocol": "rest", + "version": "v1beta1a", "baseUrl": "https://pubsub.googleapis.com/", - "name": "pubsub", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/pubsub": { + "description": "View and manage Pub/Sub topics and subscriptions" + } + } } }, - "documentationLink": "https://cloud.google.com/pubsub/docs", - "ownerDomain": "google.com", - "batchPath": "batch", "servicePath": "", - "ownerName": "Google", - "version": "v1beta1a", + "kind": "discovery#restDescription", + "description": "Provides reliable, many-to-many, asynchronous messaging between applications.\n", "rootUrl": "https://pubsub.googleapis.com/", - "kind": "discovery#restDescription" + "basePath": "", + "ownerDomain": "google.com", + "name": "pubsub", + "batchPath": "batch", + "documentationLink": "https://cloud.google.com/pubsub/docs", + "revision": "20170124", + "id": "pubsub:v1beta1a", + "title": "Google Cloud Pub/Sub API" } diff --git a/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-gen.go index dd370e1e2..faa6272fa 100644 --- a/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-gen.go +++ b/vendor/google.golang.org/api/pubsub/v1beta1a/pubsub-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Subscriptions *SubscriptionsService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewSubscriptionsService(s *Service) *SubscriptionsService { rs := &SubscriptionsService{s: s} return rs @@ -915,6 +920,7 @@ func (c *SubscriptionsAcknowledgeCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest) if err != nil { @@ -1043,6 +1049,7 @@ func (c *SubscriptionsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { @@ -1168,6 +1175,7 @@ func (c *SubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1a/subscriptions/{+subscription}") @@ -1305,6 +1313,7 @@ func (c *SubscriptionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1465,6 +1474,7 @@ func (c *SubscriptionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1620,6 +1630,7 @@ func (c *SubscriptionsModifyAckDeadlineCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest) if err != nil { @@ -1749,6 +1760,7 @@ func (c *SubscriptionsModifyPushConfigCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest) if err != nil { @@ -1877,6 +1889,7 @@ func (c *SubscriptionsPullCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest) if err != nil { @@ -2003,6 +2016,7 @@ func (c *SubscriptionsPullBatchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullbatchrequest) if err != nil { @@ -2124,6 +2138,7 @@ func (c *TopicsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic) if err != nil { @@ -2249,6 +2264,7 @@ func (c *TopicsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1a/topics/{+topic}") @@ -2391,6 +2407,7 @@ func (c *TopicsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2551,6 +2568,7 @@ func (c *TopicsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2707,6 +2725,7 @@ func (c *TopicsPublishCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest) if err != nil { @@ -2830,6 +2849,7 @@ func (c *TopicsPublishBatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishbatchrequest) if err != nil { diff --git a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json index 1afdbbeca..3e69231b6 100644 --- a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json +++ b/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json @@ -1,5 +1,4 @@ { - "id": "pubsub:v1beta2", "auth": { "oauth2": { "scopes": { @@ -12,627 +11,715 @@ } } }, + "kind": "discovery#restDescription", "description": "Provides reliable, many-to-many, asynchronous messaging between applications.\n", - "protocol": "rest", + "servicePath": "", + "rootUrl": "https://pubsub.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "pubsub", + "batchPath": "batch", + "revision": "20170124", + "documentationLink": "https://cloud.google.com/pubsub/docs", + "id": "pubsub:v1beta2", "title": "Google Cloud Pub/Sub API", + "discoveryVersion": "v1", + "ownerName": "Google", "resources": { "projects": { "resources": { - "subscriptions": { + "topics": { "methods": { - "modifyPushConfig": { - "id": "pubsub.projects.subscriptions.modifyPushConfig", - "response": { - "$ref": "Empty" + "publish": { + "request": { + "$ref": "PublishRequest" }, + "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist. The message payload must not be empty; it must contain\n either a non-empty data field, or at least one attribute.", + "httpMethod": "POST", "parameterOrder": [ - "subscription" + "topic" ], - "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", - "request": { - "$ref": "ModifyPushConfigRequest" + "response": { + "$ref": "PublishResponse" }, - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", - "httpMethod": "POST", "parameters": { - "subscription": { - "description": "The name of the subscription.", + "topic": { "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", - "type": "string" + "description": "The messages in the request will be published on this topic." } }, - "path": "v1beta2/{+subscription}:modifyPushConfig", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:publish", + "id": "pubsub.projects.topics.publish", + "path": "v1beta2/{+topic}:publish" }, - "getIamPolicy": { - "id": "pubsub.projects.subscriptions.getIamPolicy", + "testIamPermissions": { "response": { - "$ref": "Policy" + "$ref": "TestIamPermissionsResponse" }, "parameterOrder": [ "resource" ], - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", - "httpMethod": "GET", + "httpMethod": "POST", "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", - "type": "string" + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$" } }, - "path": "v1beta2/{+resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:testIamPermissions", + "path": "v1beta2/{+resource}:testIamPermissions", + "id": "pubsub.projects.topics.testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." }, - "pull": { - "id": "pubsub.projects.subscriptions.pull", + "delete": { + "httpMethod": "DELETE", "response": { - "$ref": "PullResponse" + "$ref": "Empty" }, "parameterOrder": [ - "subscription" + "topic" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" ], - "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", - "request": { - "$ref": "PullRequest" - }, - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", - "httpMethod": "POST", "parameters": { - "subscription": { - "description": "The subscription from which messages should be pulled.", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "topic": { "location": "path", - "type": "string" + "description": "Name of the topic to delete.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$" } }, - "path": "v1beta2/{+subscription}:pull", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}", + "id": "pubsub.projects.topics.delete", + "path": "v1beta2/{+topic}", + "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`." }, "list": { - "id": "pubsub.projects.subscriptions.list", + "flatPath": "v1beta2/projects/{projectsId}/topics", + "path": "v1beta2/{+project}/topics", + "id": "pubsub.projects.topics.list", + "description": "Lists matching topics.", "response": { - "$ref": "ListSubscriptionsResponse" + "$ref": "ListTopicsResponse" }, "parameterOrder": [ "project" ], - "description": "Lists matching subscriptions.", - "flatPath": "v1beta2/projects/{projectsId}/subscriptions", "httpMethod": "GET", "parameters": { + "pageToken": { + "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", + "type": "string", + "location": "query" + }, "pageSize": { - "description": "Maximum number of subscriptions to return.", - "location": "query", + "description": "Maximum number of topics to return.", + "format": "int32", "type": "integer", - "format": "int32" + "location": "query" }, "project": { - "description": "The name of the cloud project that subscriptions belong to.", + "description": "The name of the cloud project that topics belong to.", "required": true, + "type": "string", "pattern": "^projects/[^/]+$", - "location": "path", - "type": "string" - }, - "pageToken": { - "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", - "location": "query", - "type": "string" + "location": "path" } }, - "path": "v1beta2/{+project}/subscriptions", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, - "get": { - "id": "pubsub.projects.subscriptions.get", - "response": { - "$ref": "Subscription" + "create": { + "description": "Creates the given topic with the given name.", + "request": { + "$ref": "Topic" }, + "httpMethod": "PUT", "parameterOrder": [ - "subscription" + "name" ], - "description": "Gets the configuration details of a subscription.", - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}", - "httpMethod": "GET", - "parameters": { - "subscription": { - "description": "The name of the subscription to get.", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" - } + "response": { + "$ref": "Topic" }, - "path": "v1beta2/{+subscription}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] - }, - "create": { - "id": "pubsub.projects.subscriptions.create", - "response": { - "$ref": "Subscription" - }, - "parameterOrder": [ - "name" ], - "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic. Note that\nfor REST API requests, you must specify a name.", - "request": { - "$ref": "Subscription" - }, - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}", - "httpMethod": "PUT", "parameters": { "name": { - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", - "type": "string" + "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$" } }, - "path": "v1beta2/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}", + "id": "pubsub.projects.topics.create", + "path": "v1beta2/{+name}" }, - "modifyAckDeadline": { - "id": "pubsub.projects.subscriptions.modifyAckDeadline", - "response": { - "$ref": "Empty" + "setIamPolicy": { + "request": { + "$ref": "SetIamPolicyRequest" }, + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "httpMethod": "POST", "parameterOrder": [ - "subscription" + "resource" ], - "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", - "request": { - "$ref": "ModifyAckDeadlineRequest" + "response": { + "$ref": "Policy" }, - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", - "httpMethod": "POST", "parameters": { - "subscription": { - "description": "The name of the subscription.", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "resource": { + "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, "type": "string" } }, - "path": "v1beta2/{+subscription}:modifyAckDeadline", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:setIamPolicy", + "id": "pubsub.projects.topics.setIamPolicy", + "path": "v1beta2/{+resource}:setIamPolicy" }, - "setIamPolicy": { - "id": "pubsub.projects.subscriptions.setIamPolicy", + "getIamPolicy": { + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:getIamPolicy", + "id": "pubsub.projects.topics.getIamPolicy", + "path": "v1beta2/{+resource}:getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "httpMethod": "GET", "response": { "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", - "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$", + "location": "path" } - }, - "path": "v1beta2/{+resource}:setIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + } }, - "delete": { - "id": "pubsub.projects.subscriptions.delete", + "get": { + "path": "v1beta2/{+topic}", + "id": "pubsub.projects.topics.get", + "description": "Gets the configuration of a topic.", "response": { - "$ref": "Empty" + "$ref": "Topic" }, "parameterOrder": [ - "subscription" + "topic" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" ], - "description": "Deletes an existing subscription. All pending messages in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription, or its topic unless the same topic is specified.", - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}", - "httpMethod": "DELETE", "parameters": { - "subscription": { - "description": "The subscription to delete.", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1beta2/{+subscription}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "testIamPermissions": { - "id": "pubsub.projects.subscriptions.testIamPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1beta2/{+resource}:testIamPermissions", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "acknowledge": { - "id": "pubsub.projects.subscriptions.acknowledge", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", - "request": { - "$ref": "AcknowledgeRequest" - }, - "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", - "httpMethod": "POST", - "parameters": { - "subscription": { - "description": "The subscription whose message is being acknowledged.", + "topic": { + "description": "The name of the topic to get.", "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$", + "location": "path" } }, - "path": "v1beta2/{+subscription}:acknowledge", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}" } - } - }, - "topics": { + }, "resources": { "subscriptions": { "methods": { "list": { + "path": "v1beta2/{+topic}/subscriptions", "id": "pubsub.projects.topics.subscriptions.list", - "response": { - "$ref": "ListTopicSubscriptionsResponse" - }, + "description": "Lists the name of the subscriptions for this topic.", "parameterOrder": [ "topic" ], - "description": "Lists the name of the subscriptions for this topic.", - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}/subscriptions", + "response": { + "$ref": "ListTopicSubscriptionsResponse" + }, "httpMethod": "GET", "parameters": { - "topic": { - "description": "The name of the topic that subscriptions are attached to.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", + "pageToken": { + "location": "query", + "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", "type": "string" }, "pageSize": { - "description": "Maximum number of subscription names to return.", "location": "query", - "type": "integer", - "format": "int32" + "description": "Maximum number of subscription names to return.", + "format": "int32", + "type": "integer" }, - "pageToken": { - "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", - "location": "query", + "topic": { + "pattern": "^projects/[^/]+/topics/[^/]+$", + "location": "path", + "description": "The name of the topic that subscriptions are attached to.", + "required": true, "type": "string" } }, - "path": "v1beta2/{+topic}/subscriptions", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}/subscriptions" } } } - }, + } + }, + "subscriptions": { "methods": { - "getIamPolicy": { - "id": "pubsub.projects.topics.getIamPolicy", + "create": { + "httpMethod": "PUT", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Subscription" + }, + "parameters": { + "name": { + "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}", + "id": "pubsub.projects.subscriptions.create", + "path": "v1beta2/{+name}", + "request": { + "$ref": "Subscription" + }, + "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic. Note that\nfor REST API requests, you must specify a name." + }, + "setIamPolicy": { "response": { "$ref": "Policy" }, "parameterOrder": [ "resource" ], - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:getIamPolicy", - "httpMethod": "GET", + "httpMethod": "POST", "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path", - "type": "string" + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" } }, - "path": "v1beta2/{+resource}:getIamPolicy", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", + "path": "v1beta2/{+resource}:setIamPolicy", + "id": "pubsub.projects.subscriptions.setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy." }, - "publish": { - "id": "pubsub.projects.topics.publish", + "getIamPolicy": { + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", "response": { - "$ref": "PublishResponse" + "$ref": "Policy" }, "parameterOrder": [ - "topic" + "resource" ], - "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist. The message payload must not be empty; it must contain\n either a non-empty data field, or at least one attribute.", + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", + "path": "v1beta2/{+resource}:getIamPolicy", + "id": "pubsub.projects.subscriptions.getIamPolicy" + }, + "modifyAckDeadline": { + "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", "request": { - "$ref": "PublishRequest" + "$ref": "ModifyAckDeadlineRequest" }, - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:publish", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "subscription" + ], "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { - "topic": { - "description": "The messages in the request will be published on this topic.", + "subscription": { "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", - "type": "string" + "description": "The name of the subscription." } }, - "path": "v1beta2/{+topic}:publish", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", + "path": "v1beta2/{+subscription}:modifyAckDeadline", + "id": "pubsub.projects.subscriptions.modifyAckDeadline" }, - "list": { - "id": "pubsub.projects.topics.list", - "response": { - "$ref": "ListTopicsResponse" + "acknowledge": { + "request": { + "$ref": "AcknowledgeRequest" }, + "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", + "httpMethod": "POST", "parameterOrder": [ - "project" + "subscription" ], - "description": "Lists matching topics.", - "flatPath": "v1beta2/projects/{projectsId}/topics", - "httpMethod": "GET", + "response": { + "$ref": "Empty" + }, "parameters": { - "pageSize": { - "description": "Maximum number of topics to return.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "project": { - "description": "The name of the cloud project that topics belong to.", - "required": true, - "pattern": "^projects/[^/]+$", + "subscription": { "location": "path", - "type": "string" - }, - "pageToken": { - "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", - "location": "query", - "type": "string" + "description": "The subscription whose message is being acknowledged.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" } }, - "path": "v1beta2/{+project}/topics", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", + "id": "pubsub.projects.subscriptions.acknowledge", + "path": "v1beta2/{+subscription}:acknowledge" }, "get": { - "id": "pubsub.projects.topics.get", - "response": { - "$ref": "Topic" - }, "parameterOrder": [ - "topic" + "subscription" ], - "description": "Gets the configuration of a topic.", - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}", + "response": { + "$ref": "Subscription" + }, "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { - "topic": { - "description": "The name of the topic to get.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", + "subscription": { + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", + "description": "The name of the subscription to get.", + "required": true, "type": "string" } }, - "path": "v1beta2/{+topic}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}", + "path": "v1beta2/{+subscription}", + "id": "pubsub.projects.subscriptions.get", + "description": "Gets the configuration details of a subscription." }, - "create": { - "id": "pubsub.projects.topics.create", + "testIamPermissions": { "response": { - "$ref": "Topic" + "$ref": "TestIamPermissionsResponse" }, "parameterOrder": [ - "name" + "resource" ], - "description": "Creates the given topic with the given name.", - "request": { - "$ref": "Topic" - }, - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}", - "httpMethod": "PUT", + "httpMethod": "POST", "parameters": { - "name": { - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "resource": { "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "location": "path", - "type": "string" + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`." } }, - "path": "v1beta2/{+name}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", + "path": "v1beta2/{+resource}:testIamPermissions", + "id": "pubsub.projects.subscriptions.testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." }, - "setIamPolicy": { - "id": "pubsub.projects.topics.setIamPolicy", - "response": { - "$ref": "Policy" + "modifyPushConfig": { + "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", + "request": { + "$ref": "ModifyPushConfigRequest" }, + "httpMethod": "POST", "parameterOrder": [ - "resource" + "subscription" ], - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" + "response": { + "$ref": "Empty" }, - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:setIamPolicy", - "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "subscription": { + "description": "The name of the subscription.", "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", - "type": "string" + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" } }, - "path": "v1beta2/{+resource}:setIamPolicy", + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", + "id": "pubsub.projects.subscriptions.modifyPushConfig", + "path": "v1beta2/{+subscription}:modifyPushConfig" + }, + "pull": { + "response": { + "$ref": "PullResponse" + }, + "parameterOrder": [ + "subscription" + ], + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "parameters": { + "subscription": { + "location": "path", + "description": "The subscription from which messages should be pulled.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" + } + }, + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", + "path": "v1beta2/{+subscription}:pull", + "id": "pubsub.projects.subscriptions.pull", + "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", + "request": { + "$ref": "PullRequest" + } }, "delete": { - "id": "pubsub.projects.topics.delete", + "flatPath": "v1beta2/projects/{projectsId}/subscriptions/{subscriptionsId}", + "path": "v1beta2/{+subscription}", + "id": "pubsub.projects.subscriptions.delete", + "description": "Deletes an existing subscription. All pending messages in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription, or its topic unless the same topic is specified.", "response": { "$ref": "Empty" }, + "httpMethod": "DELETE", "parameterOrder": [ - "topic" + "subscription" ], - "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}", - "httpMethod": "DELETE", "parameters": { - "topic": { - "description": "Name of the topic to delete.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", + "subscription": { "location": "path", - "type": "string" + "description": "The subscription to delete.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" } }, - "path": "v1beta2/{+topic}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, - "testIamPermissions": { - "id": "pubsub.projects.topics.testIamPermissions", + "list": { + "description": "Lists matching subscriptions.", "response": { - "$ref": "TestIamPermissionsResponse" + "$ref": "ListSubscriptionsResponse" }, "parameterOrder": [ - "resource" + "project" ], - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1beta2/projects/{projectsId}/topics/{topicsId}:testIamPermissions", - "httpMethod": "POST", + "httpMethod": "GET", "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", + "pageToken": { + "location": "query", + "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", + "type": "string" + }, + "pageSize": { + "description": "Maximum number of subscriptions to return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "project": { + "pattern": "^projects/[^/]+$", "location": "path", + "description": "The name of the cloud project that subscriptions belong to.", + "required": true, "type": "string" } }, - "path": "v1beta2/{+resource}:testIamPermissions", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" - ] + ], + "flatPath": "v1beta2/projects/{projectsId}/subscriptions", + "path": "v1beta2/{+project}/subscriptions", + "id": "pubsub.projects.subscriptions.list" } } } } } }, - "schemas": { - "Topic": { - "description": "A topic resource.", - "type": "object", - "properties": { - "name": { - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", - "type": "string" - } - }, - "id": "Topic" + "parameters": { + "alt": { + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "type": "string", + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token." + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + } + }, + "schemas": { "SetIamPolicyRequest": { "description": "Request message for `SetIamPolicy` method.", "type": "object", @@ -644,104 +731,100 @@ }, "id": "SetIamPolicyRequest" }, - "ReceivedMessage": { - "description": "A message and its corresponding acknowledgment ID.", + "PubsubMessage": { + "description": "A message data and its attributes. The message payload must not be empty;\nit must contain either a non-empty data field, or at least one attribute.", "type": "object", "properties": { - "ackId": { - "description": "This ID can be used to acknowledge the received message.", + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional attributes for this message.", + "type": "object" + }, + "messageId": { + "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call.", "type": "string" }, - "message": { - "description": "The message.", - "$ref": "PubsubMessage" - } - }, - "id": "ReceivedMessage" - }, - "PublishRequest": { - "description": "Request for the Publish method.", - "type": "object", - "properties": { - "messages": { - "description": "The messages to publish.", - "type": "array", - "items": { - "$ref": "PubsubMessage" - } + "publishTime": { + "description": "The time at which the message was published, populated by the server when\nit receives the `Publish` call. It must not be populated by the\npublisher in a `Publish` call.", + "format": "google-datetime", + "type": "string" + }, + "data": { + "type": "string", + "description": "The message payload. For JSON requests, the value of this field must be\n[base64-encoded](https://tools.ietf.org/html/rfc4648).", + "format": "byte" } }, - "id": "PublishRequest" + "id": "PubsubMessage" }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", + "ModifyPushConfigRequest": { "type": "object", "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "type": "array", - "items": { - "type": "string" - } + "pushConfig": { + "description": "The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` is not called.", + "$ref": "PushConfig" } }, - "id": "TestIamPermissionsResponse" + "id": "ModifyPushConfigRequest", + "description": "Request for the ModifyPushConfig method." }, - "PublishResponse": { - "description": "Response for the `Publish` method.", + "Binding": { "type": "object", "properties": { - "messageIds": { - "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", "type": "array", "items": { "type": "string" } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" } }, - "id": "PublishResponse" + "id": "Binding", + "description": "Associates `members` with a `role`." }, - "ListSubscriptionsResponse": { - "description": "Response for the `ListSubscriptions` method.", + "ListTopicsResponse": { + "description": "Response for the `ListTopics` method.", "type": "object", "properties": { - "subscriptions": { - "description": "The subscriptions that match the request.", + "topics": { + "description": "The resulting topics.", "type": "array", "items": { - "$ref": "Subscription" + "$ref": "Topic" } }, "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListSubscriptionsRequest` to get more subscriptions.", + "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`.", "type": "string" } }, - "id": "ListSubscriptionsResponse" + "id": "ListTopicsResponse" }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "AcknowledgeRequest": { + "description": "Request for the Acknowledge method.", "type": "object", "properties": { - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "ackIds": { + "description": "The acknowledgment ID for the messages being acknowledged that was returned\nby the Pub/Sub system in the `Pull` response. Must not be empty.", "type": "array", "items": { - "$ref": "Binding" + "type": "string" } - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "type": "string", - "format": "byte" - }, - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer", - "format": "int32" } }, - "id": "Policy" + "id": "AcknowledgeRequest" }, "ListTopicSubscriptionsResponse": { "description": "Response for the `ListTopicSubscriptions` method.", @@ -761,85 +844,67 @@ }, "id": "ListTopicSubscriptionsResponse" }, - "Subscription": { - "description": "A subscription resource.", + "PullResponse": { + "description": "Response for the `Pull` method.", "type": "object", "properties": { - "pushConfig": { - "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods.", - "$ref": "PushConfig" - }, - "topic": { - "description": "The name of the topic from which this subscription is receiving messages.\nThe value of this field will be `_deleted-topic_` if the topic has been\ndeleted.", - "type": "string" - }, - "ackDeadlineSeconds": { - "description": "This value is the maximum time after a subscriber receives a message\nbefore the subscriber should acknowledge the message. After message\ndelivery but before the ack deadline expires and before the message is\nacknowledged, it is an outstanding message and will not be delivered\nagain during that time (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using pull.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.\n\nIf this parameter is 0, a default value of 10 seconds is used.", - "type": "integer", - "format": "int32" - }, - "name": { - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", - "type": "string" + "receivedMessages": { + "type": "array", + "items": { + "$ref": "ReceivedMessage" + }, + "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if\nthere are no more available in the backlog. The Pub/Sub system may return\nfewer than the `maxMessages` requested even if there are more messages\navailable in the backlog." } }, - "id": "Subscription" + "id": "PullResponse" }, - "ModifyAckDeadlineRequest": { - "description": "Request for the ModifyAckDeadline method.", + "ReceivedMessage": { + "description": "A message and its corresponding acknowledgment ID.", "type": "object", "properties": { - "ackDeadlineSeconds": { - "description": "The new ack deadline with respect to the time this request was sent to\nthe Pub/Sub system. Must be \u003e= 0. For example, if the value is 10, the new\nack deadline will expire 10 seconds after the `ModifyAckDeadline` call\nwas made. Specifying zero may immediately make the message available for\nanother pull request.", - "type": "integer", - "format": "int32" + "message": { + "$ref": "PubsubMessage", + "description": "The message." }, "ackId": { - "description": "The acknowledgment ID. Either this or ack_ids must be populated, but not\nboth.", + "description": "This ID can be used to acknowledge the received message.", "type": "string" - }, - "ackIds": { - "description": "List of acknowledgment IDs.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "ModifyAckDeadlineRequest" - }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "array", - "items": { - "type": "string" - } } }, - "id": "TestIamPermissionsRequest" + "id": "ReceivedMessage" }, "PushConfig": { "description": "Configuration for a push delivery endpoint.", "type": "object", "properties": { + "pushEndpoint": { + "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", + "type": "string" + }, "attributes": { "description": "Endpoint configuration attributes.\n\nEvery endpoint has a set of API supported attributes that can be used to\ncontrol different aspects of the message delivery.\n\nThe currently supported attribute is `x-goog-version`, which you can\nuse to change the format of the push message. This attribute\nindicates the version of the data expected by the endpoint. This\ncontrols the shape of the envelope (i.e. its fields and metadata).\nThe endpoint version is based on the version of the Pub/Sub\nAPI.\n\nIf not present during the `CreateSubscription` call, it will default to\nthe version of the API used to make such call. If not present during a\n`ModifyPushConfig` call, its value will not be changed. `GetSubscription`\ncalls will always return a valid version, even if the subscription was\ncreated without this attribute.\n\nThe possible values for this attribute are:\n\n* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.\n* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", + "type": "object", "additionalProperties": { "type": "string" - }, - "type": "object" - }, - "pushEndpoint": { - "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", - "type": "string" + } } }, "id": "PushConfig" }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse" + }, "PullRequest": { "description": "Request for the `Pull` method.", "type": "object", @@ -850,225 +915,160 @@ }, "maxMessages": { "description": "The maximum number of messages returned for this request. The Pub/Sub\nsystem may return fewer than the number specified.", - "type": "integer", - "format": "int32" + "format": "int32", + "type": "integer" } }, "id": "PullRequest" }, - "ModifyPushConfigRequest": { - "description": "Request for the ModifyPushConfig method.", + "ListSubscriptionsResponse": { + "description": "Response for the `ListSubscriptions` method.", "type": "object", "properties": { - "pushConfig": { - "description": "The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` is not called.", - "$ref": "PushConfig" + "nextPageToken": { + "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListSubscriptionsRequest` to get more subscriptions.", + "type": "string" + }, + "subscriptions": { + "description": "The subscriptions that match the request.", + "type": "array", + "items": { + "$ref": "Subscription" + } } }, - "id": "ModifyPushConfigRequest" + "id": "ListSubscriptionsResponse" }, - "PullResponse": { - "description": "Response for the `Pull` method.", + "PublishRequest": { + "properties": { + "messages": { + "type": "array", + "items": { + "$ref": "PubsubMessage" + }, + "description": "The messages to publish." + } + }, + "id": "PublishRequest", + "description": "Request for the Publish method.", + "type": "object" + }, + "PublishResponse": { + "description": "Response for the `Publish` method.", "type": "object", "properties": { - "receivedMessages": { - "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if\nthere are no more available in the backlog. The Pub/Sub system may return\nfewer than the `maxMessages` requested even if there are more messages\navailable in the backlog.", + "messageIds": { + "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", "type": "array", "items": { - "$ref": "ReceivedMessage" + "type": "string" } } }, - "id": "PullResponse" + "id": "PublishResponse" }, - "PubsubMessage": { - "description": "A message data and its attributes. The message payload must not be empty;\nit must contain either a non-empty data field, or at least one attribute.", + "Subscription": { + "description": "A subscription resource.", "type": "object", "properties": { - "data": { - "description": "The message payload. For JSON requests, the value of this field must be\n[base64-encoded](https://tools.ietf.org/html/rfc4648).", - "type": "string", - "format": "byte" + "topic": { + "description": "The name of the topic from which this subscription is receiving messages.\nThe value of this field will be `_deleted-topic_` if the topic has been\ndeleted.", + "type": "string" }, - "attributes": { - "description": "Optional attributes for this message.", - "additionalProperties": { - "type": "string" - }, - "type": "object" + "pushConfig": { + "$ref": "PushConfig", + "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods." }, - "messageId": { - "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call.", - "type": "string" + "ackDeadlineSeconds": { + "description": "This value is the maximum time after a subscriber receives a message\nbefore the subscriber should acknowledge the message. After message\ndelivery but before the ack deadline expires and before the message is\nacknowledged, it is an outstanding message and will not be delivered\nagain during that time (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using pull.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.\n\nIf this parameter is 0, a default value of 10 seconds is used.", + "format": "int32", + "type": "integer" }, - "publishTime": { - "description": "The time at which the message was published, populated by the server when\nit receives the `Publish` call. It must not be populated by the\npublisher in a `Publish` call.", - "type": "string", - "format": "google-datetime" + "name": { + "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "type": "string" } }, - "id": "PubsubMessage" + "id": "Subscription" }, - "AcknowledgeRequest": { - "description": "Request for the Acknowledge method.", + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", "type": "object", "properties": { - "ackIds": { - "description": "The acknowledgment ID for the messages being acknowledged that was returned\nby the Pub/Sub system in the `Pull` response. Must not be empty.", + "permissions": { "type": "array", "items": { "type": "string" - } + }, + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions)." } }, - "id": "AcknowledgeRequest" + "id": "TestIamPermissionsRequest" }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "Topic": { + "description": "A topic resource.", "type": "object", - "properties": {}, - "id": "Empty" + "properties": { + "name": { + "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "type": "string" + } + }, + "id": "Topic" }, - "ListTopicsResponse": { - "description": "Response for the `ListTopics` method.", + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", "type": "object", "properties": { - "nextPageToken": { - "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`.", + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", "type": "string" }, - "topics": { - "description": "The resulting topics.", + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", "type": "array", "items": { - "$ref": "Topic" + "$ref": "Binding" } } }, - "id": "ListTopicsResponse" + "id": "Policy" }, - "Binding": { - "description": "Associates `members` with a `role`.", + "ModifyAckDeadlineRequest": { + "description": "Request for the ModifyAckDeadline method.", "type": "object", "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "ackId": { + "description": "The acknowledgment ID. Either this or ack_ids must be populated, but not\nboth.", + "type": "string" + }, + "ackDeadlineSeconds": { + "description": "The new ack deadline with respect to the time this request was sent to\nthe Pub/Sub system. Must be \u003e= 0. For example, if the value is 10, the new\nack deadline will expire 10 seconds after the `ModifyAckDeadline` call\nwas made. Specifying zero may immediately make the message available for\nanother pull request.", + "format": "int32", + "type": "integer" + }, + "ackIds": { + "description": "List of acknowledgment IDs.", "type": "array", "items": { "type": "string" } - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" } }, - "id": "Binding" + "id": "ModifyAckDeadlineRequest" } }, - "revision": "20170105", - "basePath": "", + "protocol": "rest", "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "discoveryVersion": "v1", - "baseUrl": "https://pubsub.googleapis.com/", - "name": "pubsub", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - } + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, - "documentationLink": "https://cloud.google.com/pubsub/docs", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", "version": "v1beta2", - "rootUrl": "https://pubsub.googleapis.com/", - "kind": "discovery#restDescription" + "baseUrl": "https://pubsub.googleapis.com/" } diff --git a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go index 12ba05f10..f6cbb0b30 100644 --- a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go +++ b/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Subscriptions = NewProjectsSubscriptionsService(s) @@ -1108,6 +1113,7 @@ func (c *ProjectsSubscriptionsAcknowledgeCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest) if err != nil { @@ -1252,6 +1258,7 @@ func (c *ProjectsSubscriptionsCreateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { @@ -1394,6 +1401,7 @@ func (c *ProjectsSubscriptionsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta2/{+subscription}") @@ -1531,6 +1539,7 @@ func (c *ProjectsSubscriptionsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1674,6 +1683,7 @@ func (c *ProjectsSubscriptionsGetIamPolicyCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1831,6 +1841,7 @@ func (c *ProjectsSubscriptionsListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2002,6 +2013,7 @@ func (c *ProjectsSubscriptionsModifyAckDeadlineCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest) if err != nil { @@ -2148,6 +2160,7 @@ func (c *ProjectsSubscriptionsModifyPushConfigCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest) if err != nil { @@ -2290,6 +2303,7 @@ func (c *ProjectsSubscriptionsPullCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest) if err != nil { @@ -2428,6 +2442,7 @@ func (c *ProjectsSubscriptionsSetIamPolicyCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -2568,6 +2583,7 @@ func (c *ProjectsSubscriptionsTestIamPermissionsCall) doRequest(alt string) (*ht reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -2704,6 +2720,7 @@ func (c *ProjectsTopicsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic) if err != nil { @@ -2846,6 +2863,7 @@ func (c *ProjectsTopicsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta2/{+topic}") @@ -2983,6 +3001,7 @@ func (c *ProjectsTopicsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3126,6 +3145,7 @@ func (c *ProjectsTopicsGetIamPolicyCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3283,6 +3303,7 @@ func (c *ProjectsTopicsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3450,6 +3471,7 @@ func (c *ProjectsTopicsPublishCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest) if err != nil { @@ -3588,6 +3610,7 @@ func (c *ProjectsTopicsSetIamPolicyCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -3728,6 +3751,7 @@ func (c *ProjectsTopicsTestIamPermissionsCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -3890,6 +3914,7 @@ func (c *ProjectsTopicsSubscriptionsListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/qpxexpress/v1/qpxexpress-gen.go b/vendor/google.golang.org/api/qpxexpress/v1/qpxexpress-gen.go index 84605ed91..b14aeb28e 100644 --- a/vendor/google.golang.org/api/qpxexpress/v1/qpxexpress-gen.go +++ b/vendor/google.golang.org/api/qpxexpress/v1/qpxexpress-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Trips *TripsService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewTripsService(s *Service) *TripsService { rs := &TripsService{s: s} return rs @@ -1293,6 +1298,7 @@ func (c *TripsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tripssearchrequest) if err != nil { diff --git a/vendor/google.golang.org/api/replicapool/v1beta1/replicapool-gen.go b/vendor/google.golang.org/api/replicapool/v1beta1/replicapool-gen.go index 9518bfa05..f0354f58c 100644 --- a/vendor/google.golang.org/api/replicapool/v1beta1/replicapool-gen.go +++ b/vendor/google.golang.org/api/replicapool/v1beta1/replicapool-gen.go @@ -79,9 +79,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Pools *PoolsService @@ -95,6 +96,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewPoolsService(s *Service) *PoolsService { rs := &PoolsService{s: s} return rs @@ -1118,6 +1123,7 @@ func (c *PoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.poolsdeleterequest) if err != nil { @@ -1254,6 +1260,7 @@ func (c *PoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1405,6 +1412,7 @@ func (c *PoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pool) if err != nil { @@ -1575,6 +1583,7 @@ func (c *PoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1764,6 +1773,7 @@ func (c *PoolsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{projectName}/zones/{zone}/pools/{poolName}/resize") @@ -1917,6 +1927,7 @@ func (c *PoolsUpdatetemplateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.template) if err != nil { @@ -2046,6 +2057,7 @@ func (c *ReplicasDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.replicasdeleterequest) if err != nil { @@ -2220,6 +2232,7 @@ func (c *ReplicasGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2406,6 +2419,7 @@ func (c *ReplicasListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2594,6 +2608,7 @@ func (c *ReplicasRestartCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{projectName}/zones/{zone}/pools/{poolName}/replicas/{replicaName}/restart") diff --git a/vendor/google.golang.org/api/replicapool/v1beta2/replicapool-gen.go b/vendor/google.golang.org/api/replicapool/v1beta2/replicapool-gen.go index a52b3beb9..c4a420c7c 100644 --- a/vendor/google.golang.org/api/replicapool/v1beta2/replicapool-gen.go +++ b/vendor/google.golang.org/api/replicapool/v1beta2/replicapool-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only InstanceGroupManagers *InstanceGroupManagersService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewInstanceGroupManagersService(s *Service) *InstanceGroupManagersService { rs := &InstanceGroupManagersService{s: s} return rs @@ -804,6 +809,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { @@ -962,6 +968,7 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") @@ -1114,6 +1121,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) if err != nil { @@ -1280,6 +1288,7 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1434,6 +1443,7 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { @@ -1622,6 +1632,7 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1809,6 +1820,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) if err != nil { @@ -1968,6 +1980,7 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") @@ -2127,6 +2140,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { @@ -2286,6 +2300,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) if err != nil { @@ -2452,6 +2467,7 @@ func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2635,6 +2651,7 @@ func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/replicapoolupdater/v1beta1/replicapoolupdater-gen.go b/vendor/google.golang.org/api/replicapoolupdater/v1beta1/replicapoolupdater-gen.go index 750c0135e..3c44f86b1 100644 --- a/vendor/google.golang.org/api/replicapoolupdater/v1beta1/replicapoolupdater-gen.go +++ b/vendor/google.golang.org/api/replicapoolupdater/v1beta1/replicapoolupdater-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only RollingUpdates *RollingUpdatesService @@ -87,6 +88,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewRollingUpdatesService(s *Service) *RollingUpdatesService { rs := &RollingUpdatesService{s: s} return rs @@ -850,6 +855,7 @@ func (c *RollingUpdatesCancelCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/rollingUpdates/{rollingUpdate}/cancel") @@ -1007,6 +1013,7 @@ func (c *RollingUpdatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1158,6 +1165,7 @@ func (c *RollingUpdatesInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollingupdate) if err != nil { @@ -1337,6 +1345,7 @@ func (c *RollingUpdatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1555,6 +1564,7 @@ func (c *RollingUpdatesListInstanceUpdatesCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1748,6 +1758,7 @@ func (c *RollingUpdatesPauseCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/rollingUpdates/{rollingUpdate}/pause") @@ -1895,6 +1906,7 @@ func (c *RollingUpdatesResumeCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/rollingUpdates/{rollingUpdate}/resume") @@ -2043,6 +2055,7 @@ func (c *RollingUpdatesRollbackCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/rollingUpdates/{rollingUpdate}/rollback") @@ -2199,6 +2212,7 @@ func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2380,6 +2394,7 @@ func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/reseller/v1/reseller-api.json b/vendor/google.golang.org/api/reseller/v1/reseller-api.json index d97ad040f..36153b63f 100644 --- a/vendor/google.golang.org/api/reseller/v1/reseller-api.json +++ b/vendor/google.golang.org/api/reseller/v1/reseller-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/n63v3jbq0NMxEV0nJ3he6mTRpog\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/zGPGa-LIIu53_wHsQE6CSF1P9Iw\"", "discoveryVersion": "v1", "id": "reseller:v1", "name": "reseller", "version": "v1", - "revision": "20160329", + "revision": "20170216", "title": "Enterprise Apps Reseller API", "description": "Creates and manages your customers and their subscriptions.", "ownerDomain": "google.com", @@ -89,44 +89,44 @@ "properties": { "addressLine1": { "type": "string", - "description": "Address line 1 of the address." + "description": "A customer's physical address. An address can be composed of one to three lines. The addressline2 and addressLine3 are optional." }, "addressLine2": { "type": "string", - "description": "Address line 2 of the address." + "description": "Line 2 of the address." }, "addressLine3": { "type": "string", - "description": "Address line 3 of the address." + "description": "Line 3 of the address." }, "contactName": { "type": "string", - "description": "Name of the contact person." + "description": "The customer contact's name. This is required." }, "countryCode": { "type": "string", - "description": "ISO 3166 country code." + "description": "For countryCode information, see the ISO 3166 country code elements. Verify that country is approved for resale of Google products. This property is required when creating a new customer." }, "kind": { "type": "string", - "description": "Identifies the resource as a customer address.", + "description": "Identifies the resource as a customer address. Value: customers#address", "default": "customers#address" }, "locality": { "type": "string", - "description": "Name of the locality. This is in accordance with - http://portablecontacts.net/draft-spec.html#address_element." + "description": "An example of a locality value is the city of San Francisco." }, "organizationName": { "type": "string", - "description": "Name of the organization." + "description": "The company or company division name. This is required." }, "postalCode": { "type": "string", - "description": "The postal code. This is in accordance with - http://portablecontacts.net/draft-spec.html#address_element." + "description": "A postalCode example is a postal zip code such as 94043. This property is required when creating a new customer." }, "region": { "type": "string", - "description": "Name of the region. This is in accordance with - http://portablecontacts.net/draft-spec.html#address_element." + "description": "An example of a region value is CA for the state of California." } } }, @@ -137,24 +137,24 @@ "properties": { "dealCode": { "type": "string", - "description": "External name of the deal code applicable for the subscription. This field is optional. If missing, the deal price plan won't be used." + "description": "Google-issued code (100 char max) for discounted pricing on subscription plans. Deal code must be included in changePlan request in order to receive discounted rate. This property is optional. If a deal code has already been added to a subscription, this property may be left empty and the existing discounted rate will still apply (if not empty, only provide the deal code that is already present on the subscription). If a deal code has never been added to a subscription and this property is left blank, regular pricing will apply." }, "kind": { "type": "string", - "description": "Identifies the resource as a subscription change plan request.", + "description": "Identifies the resource as a subscription change plan request. Value: subscriptions#changePlanRequest", "default": "subscriptions#changePlanRequest" }, "planName": { "type": "string", - "description": "Name of the plan to change to." + "description": "The planName property is required. This is the name of the subscription's payment plan. For more information about the Google payment plans, see API concepts.\n\nPossible values are: \n- ANNUAL_MONTHLY_PAY - The annual commitment plan with monthly payments \n- ANNUAL_YEARLY_PAY - The annual commitment plan with yearly payments \n- FLEXIBLE - The flexible plan \n- TRIAL - The 30-day free trial plan" }, "purchaseOrderId": { "type": "string", - "description": "Purchase order id for your order tracking purposes." + "description": "This is an optional property. This purchase order (PO) information is for resellers to use for their company tracking usage. If a purchaseOrderId value is given it appears in the API responses and shows up in the invoice. The property accepts up to 80 plain text characters." }, "seats": { "$ref": "Seats", - "description": "Number/Limit of seats in the new plan." + "description": "This is a required property. The seats property is the number of user seat licenses." } } }, @@ -165,11 +165,11 @@ "properties": { "alternateEmail": { "type": "string", - "description": "The alternate email of the customer." + "description": "Like the \"Customer email\" in the reseller tools, this email is the secondary contact used if something happens to the customer's service such as service outage or a security issue. This property is required when creating a new customer and should not use the same domain as customerDomain." }, "customerDomain": { "type": "string", - "description": "The domain name of the customer." + "description": "The customer's primary domain name string. customerDomain is required when creating a new customer. Do not include the www prefix in the domain when adding a customer." }, "customerDomainVerified": { "type": "boolean", @@ -177,24 +177,24 @@ }, "customerId": { "type": "string", - "description": "The id of the customer." + "description": "This property will always be returned in a response as the unique identifier generated by Google. In a request, this property can be either the primary domain or the unique identifier generated by Google." }, "kind": { "type": "string", - "description": "Identifies the resource as a customer.", + "description": "Identifies the resource as a customer. Value: reseller#customer", "default": "reseller#customer" }, "phoneNumber": { "type": "string", - "description": "The phone number of the customer." + "description": "Customer contact phone number. This can be continuous numbers, with spaces, etc. But it must be a real phone number and not, for example, \"123\". See phone local format conventions." }, "postalAddress": { "$ref": "Address", - "description": "The postal address of the customer." + "description": "A customer's address information. Each field has a limit of 255 charcters." }, "resourceUiUrl": { "type": "string", - "description": "Ui url for customer resource." + "description": "URL to customer's Admin console dashboard. The read-only URL is generated by the API service. This is used if your client application requires the customer to complete a task in the Admin console." } } }, @@ -205,12 +205,41 @@ "properties": { "kind": { "type": "string", - "description": "Identifies the resource as a subscription renewal setting.", + "description": "Identifies the resource as a subscription renewal setting. Value: subscriptions#renewalSettings", "default": "subscriptions#renewalSettings" }, "renewalType": { "type": "string", - "description": "Subscription renewal type." + "description": "Renewal settings for the annual commitment plan. For more detailed information, see renewal options in the administrator help center. When renewing a subscription, the renewalType is a required property." + } + } + }, + "ResellernotifyGetwatchdetailsResponse": { + "id": "ResellernotifyGetwatchdetailsResponse", + "type": "object", + "description": "JSON template for resellernotify getwatchdetails response.", + "properties": { + "serviceAccountEmailAddresses": { + "type": "array", + "description": "List of registered service accounts.", + "items": { + "type": "string" + } + }, + "topicName": { + "type": "string", + "description": "Topic name of the PubSub" + } + } + }, + "ResellernotifyResource": { + "id": "ResellernotifyResource", + "type": "object", + "description": "JSON template for resellernotify response.", + "properties": { + "topicName": { + "type": "string", + "description": "Topic name of the PubSub" } } }, @@ -221,7 +250,7 @@ "properties": { "kind": { "type": "string", - "description": "Identifies the resource as a subscription change plan request.", + "description": "Identifies the resource as a subscription change plan request. Value: subscriptions#seats", "default": "subscriptions#seats" }, "licensedNumberOfSeats": { @@ -231,12 +260,12 @@ }, "maximumNumberOfSeats": { "type": "integer", - "description": "Maximum number of seats that can be purchased. This needs to be provided only for a non-commitment plan. For a commitment plan it is decided by the contract.", + "description": "The maximumNumberOfSeats property is the maximum number of licenses that the customer can purchase. This property applies to plans other than the annual commitment plan. How a user's licenses are managed depends on the subscription's payment plan: \n- annual commitment plan (with monthly or yearly payments) — For this plan, a reseller is invoiced on the number of user licenses in the numberOfSeats property. The maximumNumberOfSeats property is a read-only property in the API's response. \n- flexible plan — For this plan, a reseller is invoiced on the actual number of users which is capped by the maximumNumberOfSeats. This is the maximum number of user licenses a customer has for user license provisioning. This quantity can be increased up to the maximum limit defined in the reseller's contract. And the minimum quantity is the current number of users in the customer account. \n- 30-day free trial plan — A subscription in a 30-day free trial is restricted to maximum 10 seats.", "format": "int32" }, "numberOfSeats": { "type": "integer", - "description": "Number of seats to purchase. This is applicable only for a commitment plan.", + "description": "The numberOfSeats property holds the customer's number of user licenses. How a user's licenses are managed depends on the subscription's plan: \n- annual commitment plan (with monthly or yearly pay) — For this plan, a reseller is invoiced on the number of user licenses in the numberOfSeats property. This is the maximum number of user licenses that a reseller's customer can create. The reseller can add more licenses, but once set, the numberOfSeats can not be reduced until renewal. The reseller is invoiced based on the numberOfSeats value regardless of how many of these user licenses are provisioned users. \n- flexible plan — For this plan, a reseller is invoiced on the actual number of users which is capped by the maximumNumberOfSeats. The numberOfSeats property is not used in the request or response for flexible plan customers. \n- 30-day free trial plan — The numberOfSeats property is not used in the request or response for an account in a 30-day trial.", "format": "int32" } } @@ -248,11 +277,11 @@ "properties": { "billingMethod": { "type": "string", - "description": "Billing method of this subscription." + "description": "Read-only field that returns the current billing method for a subscription." }, "creationTime": { "type": "string", - "description": "Creation time of this subscription in milliseconds since Unix epoch.", + "description": "The creationTime property is the date when subscription was created. It is in milliseconds using the Epoch format. See an example Epoch converter.", "format": "int64" }, "customerDomain": { @@ -261,108 +290,109 @@ }, "customerId": { "type": "string", - "description": "The id of the customer to whom the subscription belongs." + "description": "This property will always be returned in a response as the unique identifier generated by Google. In a request, this property can be either the primary domain or the unique identifier generated by Google." }, "dealCode": { "type": "string", - "description": "External name of the deal, if this subscription was provisioned under one. Otherwise this field will be empty." + "description": "Google-issued code (100 char max) for discounted pricing on subscription plans. Deal code must be included in insert requests in order to receive discounted rate. This property is optional, regular pricing applies if left empty." }, "kind": { "type": "string", - "description": "Identifies the resource as a Subscription.", + "description": "Identifies the resource as a Subscription. Value: reseller#subscription", "default": "reseller#subscription" }, "plan": { "type": "object", - "description": "Plan details of the subscription", + "description": "The plan property is required. In this version of the API, the G Suite plans are the flexible plan, annual commitment plan, and the 30-day free trial plan. For more information about the API\"s payment plans, see the API concepts.", "properties": { "commitmentInterval": { "type": "object", - "description": "Interval of the commitment if it is a commitment plan.", + "description": "In this version of the API, annual commitment plan's interval is one year.", "properties": { "endTime": { "type": "string", - "description": "End time of the commitment interval in milliseconds since Unix epoch.", + "description": "An annual commitment plan's interval's endTime in milliseconds using the UNIX Epoch format. See an example Epoch converter.", "format": "int64" }, "startTime": { "type": "string", - "description": "Start time of the commitment interval in milliseconds since Unix epoch.", + "description": "An annual commitment plan's interval's startTime in milliseconds using UNIX Epoch format. See an example Epoch converter.", "format": "int64" } } }, "isCommitmentPlan": { "type": "boolean", - "description": "Whether the plan is a commitment plan or not." + "description": "The isCommitmentPlan property's boolean value identifies the plan as an annual commitment plan:\n- true — The subscription's plan is an annual commitment plan.\n- false — The plan is not an annual commitment plan." }, "planName": { "type": "string", - "description": "The plan name of this subscription's plan." + "description": "The planName property is required. This is the name of the subscription's plan. For more information about the Google payment plans, see the API concepts.\n\nPossible values are: \n- ANNUAL_MONTHLY_PAY — The annual commitment plan with monthly payments \n- ANNUAL_YEARLY_PAY — The annual commitment plan with yearly payments \n- FLEXIBLE — The flexible plan \n- TRIAL — The 30-day free trial plan. A subscription in trial will be suspended after the 30th free day if no payment plan is assigned. Calling changePlan will assign a payment plan to a trial but will not activate the plan. A trial will automatically begin its assigned payment plan after its 30th free day or immediately after calling startPaidService." } } }, "purchaseOrderId": { "type": "string", - "description": "Purchase order id for your order tracking purposes." + "description": "This is an optional property. This purchase order (PO) information is for resellers to use for their company tracking usage. If a purchaseOrderId value is given it appears in the API responses and shows up in the invoice. The property accepts up to 80 plain text characters." }, "renewalSettings": { "$ref": "RenewalSettings", - "description": "Renewal settings of the subscription." + "description": "Renewal settings for the annual commitment plan. For more detailed information, see renewal options in the administrator help center." }, "resourceUiUrl": { "type": "string", - "description": "Ui url for subscription resource." + "description": "URL to customer's Subscriptions page in the Admin console. The read-only URL is generated by the API service. This is used if your client application requires the customer to complete a task using the Subscriptions page in the Admin console." }, "seats": { "$ref": "Seats", - "description": "Number/Limit of seats in the new plan." + "description": "This is a required property. The number and limit of user seat licenses in the plan." }, "skuId": { "type": "string", - "description": "Name of the sku for which this subscription is purchased." + "description": "A required property. The skuId is a unique system identifier for a product's SKU assigned to a customer in the subscription. For products and SKUs available in this version of the API, see Product and SKU IDs." }, "status": { "type": "string", - "description": "Status of the subscription." + "description": "This is an optional property." }, "subscriptionId": { "type": "string", - "description": "The id of the subscription." + "description": "The subscriptionId is the subscription identifier and is unique for each customer. This is a required property. Since a subscriptionId changes when a subscription is updated, we recommend not using this ID as a key for persistent data. Use the subscriptionId as described in retrieve all reseller subscriptions." }, "suspensionReasons": { "type": "array", - "description": "Read-only field containing an enumerable of all the current suspension reasons for a subscription. It is possible for a subscription to have many concurrent, overlapping suspension reasons. A subscription's STATUS is SUSPENDED until all pending suspensions are removed. Possible options include: \n- PENDING_TOS_ACCEPTANCE - The customer has not logged in and accepted the Google Apps Resold Terms of Services. \n- RENEWAL_WITH_TYPE_CANCEL - The customer's commitment ended and their service was cancelled at the end of their term. \n- RESELLER_INITIATED - A manual suspension invoked by a Reseller. \n- TRIAL_ENDED - The customer's trial expired without a plan selected. \n- OTHER - The customer is suspended for an internal Google reason (e.g. abuse or otherwise).", + "description": "Read-only field containing an enumerable of all the current suspension reasons for a subscription. It is possible for a subscription to have many concurrent, overlapping suspension reasons. A subscription's STATUS is SUSPENDED until all pending suspensions are removed.\n\nPossible options include: \n- PENDING_TOS_ACCEPTANCE - The customer has not logged in and accepted the G Suite Resold Terms of Services. \n- RENEWAL_WITH_TYPE_CANCEL - The customer's commitment ended and their service was cancelled at the end of their term. \n- RESELLER_INITIATED - A manual suspension invoked by a Reseller. \n- TRIAL_ENDED - The customer's trial expired without a plan selected. \n- OTHER - The customer is suspended for an internal Google reason (e.g. abuse or otherwise).", "items": { "type": "string" } }, "transferInfo": { "type": "object", - "description": "Transfer related information for the subscription.", + "description": "Read-only transfer related information for the subscription. For more information, see retrieve transferable subscriptions for a customer.", "properties": { "minimumTransferableSeats": { "type": "integer", + "description": "When inserting a subscription, this is the minimum number of seats listed in the transfer order for this product. For example, if the customer has 20 users, the reseller cannot place a transfer order of 15 seats. The minimum is 20 seats.", "format": "int32" }, "transferabilityExpirationTime": { "type": "string", - "description": "Time when transfer token or intent to transfer will expire.", + "description": "The time when transfer token or intent to transfer will expire. The time is in milliseconds using UNIX Epoch format.", "format": "int64" } } }, "trialSettings": { "type": "object", - "description": "Trial Settings of the subscription.", + "description": "The G Suite annual commitment and flexible payment plans can be in a 30-day free trial. For more information, see the API concepts.", "properties": { "isInTrial": { "type": "boolean", - "description": "Whether the subscription is in trial." + "description": "Determines if a subscription's plan is in a 30-day free trial or not:\n- true — The plan is in trial.\n- false — The plan is not in trial." }, "trialEndTime": { "type": "string", - "description": "End time of the trial in milliseconds since Unix epoch.", + "description": "Date when the trial ends. The value is in milliseconds using the UNIX Epoch format. See an example Epoch converter.", "format": "int64" } } @@ -376,7 +406,7 @@ "properties": { "kind": { "type": "string", - "description": "Identifies the resource as a collection of subscriptions.", + "description": "Identifies the resource as a collection of subscriptions. Value: reseller#subscriptions", "default": "reseller#subscriptions" }, "nextPageToken": { @@ -400,11 +430,11 @@ "id": "reseller.customers.get", "path": "customers/{customerId}", "httpMethod": "GET", - "description": "Gets a customer resource if one exists and is owned by the reseller.", + "description": "Get a customer account.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" } @@ -424,11 +454,11 @@ "id": "reseller.customers.insert", "path": "customers", "httpMethod": "POST", - "description": "Creates a customer resource if one does not already exist.", + "description": "Order a new customer's account.", "parameters": { "customerAuthToken": { "type": "string", - "description": "An auth token needed for inserting a customer for which domain already exists. Can be generated at https://admin.google.com/TransferToken. Optional.", + "description": "The customerAuthToken query string is required when creating a resold account that transfers a direct customer's subscription or transfers another reseller customer's subscription to your reseller management. This is a hexadecimal authentication token needed to complete the subscription transfer. For more information, see the administrator help center.", "location": "query" } }, @@ -446,11 +476,11 @@ "id": "reseller.customers.patch", "path": "customers/{customerId}", "httpMethod": "PATCH", - "description": "Update a customer resource if one it exists and is owned by the reseller. This method supports patch semantics.", + "description": "Update a customer account's settings. This method supports patch semantics.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" } @@ -472,11 +502,11 @@ "id": "reseller.customers.update", "path": "customers/{customerId}", "httpMethod": "PUT", - "description": "Update a customer resource if one it exists and is owned by the reseller.", + "description": "Update a customer account's settings.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" } @@ -496,6 +526,61 @@ } } }, + "resellernotify": { + "methods": { + "getwatchdetails": { + "id": "reseller.resellernotify.getwatchdetails", + "path": "resellernotify/getwatchdetails", + "httpMethod": "GET", + "description": "Returns all the details of the watch corresponding to the reseller.", + "response": { + "$ref": "ResellernotifyGetwatchdetailsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/apps.order", + "https://www.googleapis.com/auth/apps.order.readonly" + ] + }, + "register": { + "id": "reseller.resellernotify.register", + "path": "resellernotify/register", + "httpMethod": "POST", + "description": "Registers a Reseller for receiving notifications.", + "parameters": { + "serviceAccountEmailAddress": { + "type": "string", + "description": "The service account which will own the created Cloud-PubSub topic.", + "location": "query" + } + }, + "response": { + "$ref": "ResellernotifyResource" + }, + "scopes": [ + "https://www.googleapis.com/auth/apps.order" + ] + }, + "unregister": { + "id": "reseller.resellernotify.unregister", + "path": "resellernotify/unregister", + "httpMethod": "POST", + "description": "Unregisters a Reseller for receiving notifications.", + "parameters": { + "serviceAccountEmailAddress": { + "type": "string", + "description": "The service account which owns the Cloud-PubSub topic.", + "location": "query" + } + }, + "response": { + "$ref": "ResellernotifyResource" + }, + "scopes": [ + "https://www.googleapis.com/auth/apps.order" + ] + } + } + }, "subscriptions": { "methods": { "activate": { @@ -506,13 +591,13 @@ "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } @@ -532,17 +617,17 @@ "id": "reseller.subscriptions.changePlan", "path": "customers/{customerId}/subscriptions/{subscriptionId}/changePlan", "httpMethod": "POST", - "description": "Changes the plan of a subscription", + "description": "Update a subscription plan. Use this method to update a plan for a 30-day trial or a flexible plan subscription to an annual commitment plan with monthly or yearly payments.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } @@ -565,17 +650,17 @@ "id": "reseller.subscriptions.changeRenewalSettings", "path": "customers/{customerId}/subscriptions/{subscriptionId}/changeRenewalSettings", "httpMethod": "POST", - "description": "Changes the renewal settings of a subscription", + "description": "Update a user license's renewal settings. This is applicable for accounts with annual commitment plans only.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } @@ -598,17 +683,17 @@ "id": "reseller.subscriptions.changeSeats", "path": "customers/{customerId}/subscriptions/{subscriptionId}/changeSeats", "httpMethod": "POST", - "description": "Changes the seats configuration of a subscription", + "description": "Update a subscription's user license settings.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } @@ -631,17 +716,17 @@ "id": "reseller.subscriptions.delete", "path": "customers/{customerId}/subscriptions/{subscriptionId}", "httpMethod": "DELETE", - "description": "Cancels/Downgrades a subscription.", + "description": "Cancel, suspend or transfer a subscription to direct.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "deletionType": { "type": "string", - "description": "Whether the subscription is to be fully cancelled or downgraded", + "description": "The deletionType query string enables the cancellation, downgrade, or suspension of a subscription.", "required": true, "enum": [ "cancel", @@ -650,16 +735,16 @@ "transfer_to_direct" ], "enumDescriptions": [ - "Cancels the subscription immediately", - "Downgrades a Google Apps for Business subscription to Google Apps", - "Suspends the subscriptions for 4 days before cancelling it", - "Transfers a subscription directly to Google" + "Cancels the subscription immediately. This does not apply to a G Suite subscription.", + "Downgrades a G Suite subscription to a Google Apps Free edition subscription only if the customer was initially subscribed to a Google Apps Free edition (also known as the Standard edition). Once downgraded, the customer no longer has access to the previous G Suite subscription and is no longer managed by the reseller.\n\nA G Suite subscription's downgrade cannot be invoked if an active or suspended Google Drive or Google Vault subscription is present. The Google Drive or Google Vault subscription must be cancelled before the G Suite subscription's downgrade is invoked.\n\nThe downgrade deletionType does not apply to other products or G Suite SKUs.", + "(DEPRECATED) The G Suite account is suspended for four days and then cancelled. Once suspended, an administrator has access to the suspended account, but the account users can not access their services. A suspension can be lifted, using the reseller tools.\n\nA G Suite subscription's suspension can not be invoked if an active or suspended Google Drive or Google Vault subscription is present. The Google Drive or Google Vault subscription must be cancelled before the G Suite subscription's suspension is invoked.", + "Transfers a subscription directly to Google.  The customer is immediately transferred to a direct billing relationship with Google and is given a short amount of time with no service interruption. The customer can then choose to set up billing directly with Google by using a credit card, or they can transfer to another reseller." ], "location": "query" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } @@ -677,17 +762,17 @@ "id": "reseller.subscriptions.get", "path": "customers/{customerId}/subscriptions/{subscriptionId}", "httpMethod": "GET", - "description": "Gets a subscription of the customer.", + "description": "Get a specific subscription.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } @@ -708,16 +793,16 @@ "id": "reseller.subscriptions.insert", "path": "customers/{customerId}/subscriptions", "httpMethod": "POST", - "description": "Creates/Transfers a subscription for the customer.", + "description": "Create or transfer a subscription.", "parameters": { "customerAuthToken": { "type": "string", - "description": "An auth token needed for transferring a subscription. Can be generated at https://www.google.com/a/cpanel/customer-domain/TransferToken. Optional.", + "description": "The customerAuthToken query string is required when creating a resold account that transfers a direct customer's subscription or transfers another reseller customer's subscription to your reseller management. This is a hexadecimal authentication token needed to complete the subscription transfer. For more information, see the administrator help center.", "location": "query" }, "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" } @@ -739,26 +824,26 @@ "id": "reseller.subscriptions.list", "path": "subscriptions", "httpMethod": "GET", - "description": "Lists subscriptions of a reseller, optionally filtered by a customer name prefix.", + "description": "List of subscriptions managed by the reseller. The list can be all subscriptions, all of a customer's subscriptions, or all of a customer's transferable subscriptions.", "parameters": { "customerAuthToken": { "type": "string", - "description": "An auth token needed if the customer is not a resold customer of this reseller. Can be generated at https://www.google.com/a/cpanel/customer-domain/TransferToken.Optional.", + "description": "The customerAuthToken query string is required when creating a resold account that transfers a direct customer's subscription or transfers another reseller customer's subscription to your reseller management. This is a hexadecimal authentication token needed to complete the subscription transfer. For more information, see the administrator help center.", "location": "query" }, "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "location": "query" }, "customerNamePrefix": { "type": "string", - "description": "Prefix of the customer's domain name by which the subscriptions should be filtered. Optional", + "description": "When retrieving all of your subscriptions and filtering for specific customers, you can enter a prefix for a customer name. Using an example customer group that includes exam.com, example20.com and example.com: \n- exa -- Returns all customer names that start with 'exa' which could include exam.com, example20.com, and example.com. A name prefix is similar to using a regular expression's asterisk, exa*. \n- example -- Returns example20.com and example.com.", "location": "query" }, "maxResults": { "type": "integer", - "description": "Maximum number of results to return", + "description": "When retrieving a large list, the maxResults is the maximum number of results per page. The nextPageToken value takes you to the next page. The default is 20.", "format": "uint32", "minimum": "1", "maximum": "100", @@ -782,17 +867,17 @@ "id": "reseller.subscriptions.startPaidService", "path": "customers/{customerId}/subscriptions/{subscriptionId}/startPaidService", "httpMethod": "POST", - "description": "Starts paid service of a trial subscription", + "description": "Immediately move a 30-day free trial subscription to a paid service subscription.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } @@ -812,17 +897,17 @@ "id": "reseller.subscriptions.suspend", "path": "customers/{customerId}/subscriptions/{subscriptionId}/suspend", "httpMethod": "POST", - "description": "Suspends an active subscription", + "description": "Suspends an active subscription.", "parameters": { "customerId": { "type": "string", - "description": "Id of the Customer", + "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", "required": true, "location": "path" }, "subscriptionId": { "type": "string", - "description": "Id of the subscription, which is unique for a customer", + "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", "required": true, "location": "path" } diff --git a/vendor/google.golang.org/api/reseller/v1/reseller-gen.go b/vendor/google.golang.org/api/reseller/v1/reseller-gen.go index 7d4c2af8a..6c2c0b8b0 100644 --- a/vendor/google.golang.org/api/reseller/v1/reseller-gen.go +++ b/vendor/google.golang.org/api/reseller/v1/reseller-gen.go @@ -60,17 +60,21 @@ func New(client *http.Client) (*Service, error) { } s := &Service{client: client, BasePath: basePath} s.Customers = NewCustomersService(s) + s.Resellernotify = NewResellernotifyService(s) s.Subscriptions = NewSubscriptionsService(s) return s, nil } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Customers *CustomersService + Resellernotify *ResellernotifyService + Subscriptions *SubscriptionsService } @@ -81,6 +85,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewCustomersService(s *Service) *CustomersService { rs := &CustomersService{s: s} return rs @@ -90,6 +98,15 @@ type CustomersService struct { s *Service } +func NewResellernotifyService(s *Service) *ResellernotifyService { + rs := &ResellernotifyService{s: s} + return rs +} + +type ResellernotifyService struct { + s *Service +} + func NewSubscriptionsService(s *Service) *SubscriptionsService { rs := &SubscriptionsService{s: s} return rs @@ -101,37 +118,43 @@ type SubscriptionsService struct { // Address: JSON template for address of a customer. type Address struct { - // AddressLine1: Address line 1 of the address. + // AddressLine1: A customer's physical address. An address can be + // composed of one to three lines. The addressline2 and addressLine3 are + // optional. AddressLine1 string `json:"addressLine1,omitempty"` - // AddressLine2: Address line 2 of the address. + // AddressLine2: Line 2 of the address. AddressLine2 string `json:"addressLine2,omitempty"` - // AddressLine3: Address line 3 of the address. + // AddressLine3: Line 3 of the address. AddressLine3 string `json:"addressLine3,omitempty"` - // ContactName: Name of the contact person. + // ContactName: The customer contact's name. This is required. ContactName string `json:"contactName,omitempty"` - // CountryCode: ISO 3166 country code. + // CountryCode: For countryCode information, see the ISO 3166 country + // code elements. Verify that country is approved for resale of Google + // products. This property is required when creating a new customer. CountryCode string `json:"countryCode,omitempty"` - // Kind: Identifies the resource as a customer address. + // Kind: Identifies the resource as a customer address. Value: + // customers#address Kind string `json:"kind,omitempty"` - // Locality: Name of the locality. This is in accordance with - - // http://portablecontacts.net/draft-spec.html#address_element. + // Locality: An example of a locality value is the city of San + // Francisco. Locality string `json:"locality,omitempty"` - // OrganizationName: Name of the organization. + // OrganizationName: The company or company division name. This is + // required. OrganizationName string `json:"organizationName,omitempty"` - // PostalCode: The postal code. This is in accordance with - - // http://portablecontacts.net/draft-spec.html#address_element. + // PostalCode: A postalCode example is a postal zip code such as 94043. + // This property is required when creating a new customer. PostalCode string `json:"postalCode,omitempty"` - // Region: Name of the region. This is in accordance with - - // http://portablecontacts.net/draft-spec.html#address_element. + // Region: An example of a region value is CA for the state of + // California. Region string `json:"region,omitempty"` // ForceSendFields is a list of field names (e.g. "AddressLine1") to @@ -159,21 +182,42 @@ func (s *Address) MarshalJSON() ([]byte, error) { // ChangePlanRequest: JSON template for the ChangePlan rpc request. type ChangePlanRequest struct { - // DealCode: External name of the deal code applicable for the - // subscription. This field is optional. If missing, the deal price plan - // won't be used. + // DealCode: Google-issued code (100 char max) for discounted pricing on + // subscription plans. Deal code must be included in changePlan request + // in order to receive discounted rate. This property is optional. If a + // deal code has already been added to a subscription, this property may + // be left empty and the existing discounted rate will still apply (if + // not empty, only provide the deal code that is already present on the + // subscription). If a deal code has never been added to a subscription + // and this property is left blank, regular pricing will apply. DealCode string `json:"dealCode,omitempty"` // Kind: Identifies the resource as a subscription change plan request. + // Value: subscriptions#changePlanRequest Kind string `json:"kind,omitempty"` - // PlanName: Name of the plan to change to. + // PlanName: The planName property is required. This is the name of the + // subscription's payment plan. For more information about the Google + // payment plans, see API concepts. + // + // Possible values are: + // - ANNUAL_MONTHLY_PAY - The annual commitment plan with monthly + // payments + // - ANNUAL_YEARLY_PAY - The annual commitment plan with yearly payments + // + // - FLEXIBLE - The flexible plan + // - TRIAL - The 30-day free trial plan PlanName string `json:"planName,omitempty"` - // PurchaseOrderId: Purchase order id for your order tracking purposes. + // PurchaseOrderId: This is an optional property. This purchase order + // (PO) information is for resellers to use for their company tracking + // usage. If a purchaseOrderId value is given it appears in the API + // responses and shows up in the invoice. The property accepts up to 80 + // plain text characters. PurchaseOrderId string `json:"purchaseOrderId,omitempty"` - // Seats: Number/Limit of seats in the new plan. + // Seats: This is a required property. The seats property is the number + // of user seat licenses. Seats *Seats `json:"seats,omitempty"` // ForceSendFields is a list of field names (e.g. "DealCode") to @@ -201,29 +245,44 @@ func (s *ChangePlanRequest) MarshalJSON() ([]byte, error) { // Customer: JSON template for a customer. type Customer struct { - // AlternateEmail: The alternate email of the customer. + // AlternateEmail: Like the "Customer email" in the reseller tools, this + // email is the secondary contact used if something happens to the + // customer's service such as service outage or a security issue. This + // property is required when creating a new customer and should not use + // the same domain as customerDomain. AlternateEmail string `json:"alternateEmail,omitempty"` - // CustomerDomain: The domain name of the customer. + // CustomerDomain: The customer's primary domain name string. + // customerDomain is required when creating a new customer. Do not + // include the www prefix in the domain when adding a customer. CustomerDomain string `json:"customerDomain,omitempty"` // CustomerDomainVerified: Whether the customer's primary domain has // been verified. CustomerDomainVerified bool `json:"customerDomainVerified,omitempty"` - // CustomerId: The id of the customer. + // CustomerId: This property will always be returned in a response as + // the unique identifier generated by Google. In a request, this + // property can be either the primary domain or the unique identifier + // generated by Google. CustomerId string `json:"customerId,omitempty"` - // Kind: Identifies the resource as a customer. + // Kind: Identifies the resource as a customer. Value: reseller#customer Kind string `json:"kind,omitempty"` - // PhoneNumber: The phone number of the customer. + // PhoneNumber: Customer contact phone number. This can be continuous + // numbers, with spaces, etc. But it must be a real phone number and + // not, for example, "123". See phone local format conventions. PhoneNumber string `json:"phoneNumber,omitempty"` - // PostalAddress: The postal address of the customer. + // PostalAddress: A customer's address information. Each field has a + // limit of 255 charcters. PostalAddress *Address `json:"postalAddress,omitempty"` - // ResourceUiUrl: Ui url for customer resource. + // ResourceUiUrl: URL to customer's Admin console dashboard. The + // read-only URL is generated by the API service. This is used if your + // client application requires the customer to complete a task in the + // Admin console. ResourceUiUrl string `json:"resourceUiUrl,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -257,9 +316,13 @@ func (s *Customer) MarshalJSON() ([]byte, error) { // RenewalSettings: JSON template for a subscription renewal settings. type RenewalSettings struct { // Kind: Identifies the resource as a subscription renewal setting. + // Value: subscriptions#renewalSettings Kind string `json:"kind,omitempty"` - // RenewalType: Subscription renewal type. + // RenewalType: Renewal settings for the annual commitment plan. For + // more detailed information, see renewal options in the administrator + // help center. When renewing a subscription, the renewalType is a + // required property. RenewalType string `json:"renewalType,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -285,9 +348,80 @@ func (s *RenewalSettings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResellernotifyGetwatchdetailsResponse: JSON template for +// resellernotify getwatchdetails response. +type ResellernotifyGetwatchdetailsResponse struct { + // ServiceAccountEmailAddresses: List of registered service accounts. + ServiceAccountEmailAddresses []string `json:"serviceAccountEmailAddresses,omitempty"` + + // TopicName: Topic name of the PubSub + TopicName string `json:"topicName,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "ServiceAccountEmailAddresses") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "ServiceAccountEmailAddresses") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResellernotifyGetwatchdetailsResponse) MarshalJSON() ([]byte, error) { + type noMethod ResellernotifyGetwatchdetailsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResellernotifyResource: JSON template for resellernotify response. +type ResellernotifyResource struct { + // TopicName: Topic name of the PubSub + TopicName string `json:"topicName,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "TopicName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TopicName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResellernotifyResource) MarshalJSON() ([]byte, error) { + type noMethod ResellernotifyResource + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Seats: JSON template for subscription seats. type Seats struct { // Kind: Identifies the resource as a subscription change plan request. + // Value: subscriptions#seats Kind string `json:"kind,omitempty"` // LicensedNumberOfSeats: Read-only field containing the current number @@ -295,13 +429,42 @@ type Seats struct { // secondary subscriptions such as Google-Vault and Drive-storage. LicensedNumberOfSeats int64 `json:"licensedNumberOfSeats,omitempty"` - // MaximumNumberOfSeats: Maximum number of seats that can be purchased. - // This needs to be provided only for a non-commitment plan. For a - // commitment plan it is decided by the contract. + // MaximumNumberOfSeats: The maximumNumberOfSeats property is the + // maximum number of licenses that the customer can purchase. This + // property applies to plans other than the annual commitment plan. How + // a user's licenses are managed depends on the subscription's payment + // plan: + // - annual commitment plan (with monthly or yearly payments) — For + // this plan, a reseller is invoiced on the number of user licenses in + // the numberOfSeats property. The maximumNumberOfSeats property is a + // read-only property in the API's response. + // - flexible plan — For this plan, a reseller is invoiced on the + // actual number of users which is capped by the maximumNumberOfSeats. + // This is the maximum number of user licenses a customer has for user + // license provisioning. This quantity can be increased up to the + // maximum limit defined in the reseller's contract. And the minimum + // quantity is the current number of users in the customer account. + // - 30-day free trial plan — A subscription in a 30-day free trial is + // restricted to maximum 10 seats. MaximumNumberOfSeats int64 `json:"maximumNumberOfSeats,omitempty"` - // NumberOfSeats: Number of seats to purchase. This is applicable only - // for a commitment plan. + // NumberOfSeats: The numberOfSeats property holds the customer's number + // of user licenses. How a user's licenses are managed depends on the + // subscription's plan: + // - annual commitment plan (with monthly or yearly pay) — For this + // plan, a reseller is invoiced on the number of user licenses in the + // numberOfSeats property. This is the maximum number of user licenses + // that a reseller's customer can create. The reseller can add more + // licenses, but once set, the numberOfSeats can not be reduced until + // renewal. The reseller is invoiced based on the numberOfSeats value + // regardless of how many of these user licenses are provisioned users. + // + // - flexible plan — For this plan, a reseller is invoiced on the + // actual number of users which is capped by the maximumNumberOfSeats. + // The numberOfSeats property is not used in the request or response for + // flexible plan customers. + // - 30-day free trial plan — The numberOfSeats property is not used + // in the request or response for an account in a 30-day trial. NumberOfSeats int64 `json:"numberOfSeats,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -329,57 +492,87 @@ func (s *Seats) MarshalJSON() ([]byte, error) { // Subscription: JSON template for a subscription. type Subscription struct { - // BillingMethod: Billing method of this subscription. + // BillingMethod: Read-only field that returns the current billing + // method for a subscription. BillingMethod string `json:"billingMethod,omitempty"` - // CreationTime: Creation time of this subscription in milliseconds - // since Unix epoch. + // CreationTime: The creationTime property is the date when subscription + // was created. It is in milliseconds using the Epoch format. See an + // example Epoch converter. CreationTime int64 `json:"creationTime,omitempty,string"` // CustomerDomain: Primary domain name of the customer CustomerDomain string `json:"customerDomain,omitempty"` - // CustomerId: The id of the customer to whom the subscription belongs. + // CustomerId: This property will always be returned in a response as + // the unique identifier generated by Google. In a request, this + // property can be either the primary domain or the unique identifier + // generated by Google. CustomerId string `json:"customerId,omitempty"` - // DealCode: External name of the deal, if this subscription was - // provisioned under one. Otherwise this field will be empty. + // DealCode: Google-issued code (100 char max) for discounted pricing on + // subscription plans. Deal code must be included in insert requests in + // order to receive discounted rate. This property is optional, regular + // pricing applies if left empty. DealCode string `json:"dealCode,omitempty"` - // Kind: Identifies the resource as a Subscription. + // Kind: Identifies the resource as a Subscription. Value: + // reseller#subscription Kind string `json:"kind,omitempty"` - // Plan: Plan details of the subscription + // Plan: The plan property is required. In this version of the API, the + // G Suite plans are the flexible plan, annual commitment plan, and the + // 30-day free trial plan. For more information about the API"s payment + // plans, see the API concepts. Plan *SubscriptionPlan `json:"plan,omitempty"` - // PurchaseOrderId: Purchase order id for your order tracking purposes. + // PurchaseOrderId: This is an optional property. This purchase order + // (PO) information is for resellers to use for their company tracking + // usage. If a purchaseOrderId value is given it appears in the API + // responses and shows up in the invoice. The property accepts up to 80 + // plain text characters. PurchaseOrderId string `json:"purchaseOrderId,omitempty"` - // RenewalSettings: Renewal settings of the subscription. + // RenewalSettings: Renewal settings for the annual commitment plan. For + // more detailed information, see renewal options in the administrator + // help center. RenewalSettings *RenewalSettings `json:"renewalSettings,omitempty"` - // ResourceUiUrl: Ui url for subscription resource. + // ResourceUiUrl: URL to customer's Subscriptions page in the Admin + // console. The read-only URL is generated by the API service. This is + // used if your client application requires the customer to complete a + // task using the Subscriptions page in the Admin console. ResourceUiUrl string `json:"resourceUiUrl,omitempty"` - // Seats: Number/Limit of seats in the new plan. + // Seats: This is a required property. The number and limit of user seat + // licenses in the plan. Seats *Seats `json:"seats,omitempty"` - // SkuId: Name of the sku for which this subscription is purchased. + // SkuId: A required property. The skuId is a unique system identifier + // for a product's SKU assigned to a customer in the subscription. For + // products and SKUs available in this version of the API, see Product + // and SKU IDs. SkuId string `json:"skuId,omitempty"` - // Status: Status of the subscription. + // Status: This is an optional property. Status string `json:"status,omitempty"` - // SubscriptionId: The id of the subscription. + // SubscriptionId: The subscriptionId is the subscription identifier and + // is unique for each customer. This is a required property. Since a + // subscriptionId changes when a subscription is updated, we recommend + // not using this ID as a key for persistent data. Use the + // subscriptionId as described in retrieve all reseller subscriptions. SubscriptionId string `json:"subscriptionId,omitempty"` // SuspensionReasons: Read-only field containing an enumerable of all // the current suspension reasons for a subscription. It is possible for // a subscription to have many concurrent, overlapping suspension // reasons. A subscription's STATUS is SUSPENDED until all pending - // suspensions are removed. Possible options include: + // suspensions are removed. + // + // Possible options include: // - PENDING_TOS_ACCEPTANCE - The customer has not logged in and - // accepted the Google Apps Resold Terms of Services. + // accepted the G Suite Resold Terms of Services. // - RENEWAL_WITH_TYPE_CANCEL - The customer's commitment ended and // their service was cancelled at the end of their term. // - RESELLER_INITIATED - A manual suspension invoked by a Reseller. @@ -389,10 +582,14 @@ type Subscription struct { // (e.g. abuse or otherwise). SuspensionReasons []string `json:"suspensionReasons,omitempty"` - // TransferInfo: Transfer related information for the subscription. + // TransferInfo: Read-only transfer related information for the + // subscription. For more information, see retrieve transferable + // subscriptions for a customer. TransferInfo *SubscriptionTransferInfo `json:"transferInfo,omitempty"` - // TrialSettings: Trial Settings of the subscription. + // TrialSettings: The G Suite annual commitment and flexible payment + // plans can be in a 30-day free trial. For more information, see the + // API concepts. TrialSettings *SubscriptionTrialSettings `json:"trialSettings,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -422,16 +619,37 @@ func (s *Subscription) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubscriptionPlan: Plan details of the subscription +// SubscriptionPlan: The plan property is required. In this version of +// the API, the G Suite plans are the flexible plan, annual commitment +// plan, and the 30-day free trial plan. For more information about the +// API"s payment plans, see the API concepts. type SubscriptionPlan struct { - // CommitmentInterval: Interval of the commitment if it is a commitment - // plan. + // CommitmentInterval: In this version of the API, annual commitment + // plan's interval is one year. CommitmentInterval *SubscriptionPlanCommitmentInterval `json:"commitmentInterval,omitempty"` - // IsCommitmentPlan: Whether the plan is a commitment plan or not. + // IsCommitmentPlan: The isCommitmentPlan property's boolean value + // identifies the plan as an annual commitment plan: + // - true — The subscription's plan is an annual commitment plan. + // - false — The plan is not an annual commitment plan. IsCommitmentPlan bool `json:"isCommitmentPlan,omitempty"` - // PlanName: The plan name of this subscription's plan. + // PlanName: The planName property is required. This is the name of the + // subscription's plan. For more information about the Google payment + // plans, see the API concepts. + // + // Possible values are: + // - ANNUAL_MONTHLY_PAY — The annual commitment plan with monthly + // payments + // - ANNUAL_YEARLY_PAY — The annual commitment plan with yearly + // payments + // - FLEXIBLE — The flexible plan + // - TRIAL — The 30-day free trial plan. A subscription in trial will + // be suspended after the 30th free day if no payment plan is assigned. + // Calling changePlan will assign a payment plan to a trial but will not + // activate the plan. A trial will automatically begin its assigned + // payment plan after its 30th free day or immediately after calling + // startPaidService. PlanName string `json:"planName,omitempty"` // ForceSendFields is a list of field names (e.g. "CommitmentInterval") @@ -458,15 +676,16 @@ func (s *SubscriptionPlan) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubscriptionPlanCommitmentInterval: Interval of the commitment if it -// is a commitment plan. +// SubscriptionPlanCommitmentInterval: In this version of the API, +// annual commitment plan's interval is one year. type SubscriptionPlanCommitmentInterval struct { - // EndTime: End time of the commitment interval in milliseconds since - // Unix epoch. + // EndTime: An annual commitment plan's interval's endTime in + // milliseconds using the UNIX Epoch format. See an example Epoch + // converter. EndTime int64 `json:"endTime,omitempty,string"` - // StartTime: Start time of the commitment interval in milliseconds - // since Unix epoch. + // StartTime: An annual commitment plan's interval's startTime in + // milliseconds using UNIX Epoch format. See an example Epoch converter. StartTime int64 `json:"startTime,omitempty,string"` // ForceSendFields is a list of field names (e.g. "EndTime") to @@ -492,13 +711,19 @@ func (s *SubscriptionPlanCommitmentInterval) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubscriptionTransferInfo: Transfer related information for the -// subscription. +// SubscriptionTransferInfo: Read-only transfer related information for +// the subscription. For more information, see retrieve transferable +// subscriptions for a customer. type SubscriptionTransferInfo struct { + // MinimumTransferableSeats: When inserting a subscription, this is the + // minimum number of seats listed in the transfer order for this + // product. For example, if the customer has 20 users, the reseller + // cannot place a transfer order of 15 seats. The minimum is 20 seats. MinimumTransferableSeats int64 `json:"minimumTransferableSeats,omitempty"` - // TransferabilityExpirationTime: Time when transfer token or intent to - // transfer will expire. + // TransferabilityExpirationTime: The time when transfer token or intent + // to transfer will expire. The time is in milliseconds using UNIX Epoch + // format. TransferabilityExpirationTime int64 `json:"transferabilityExpirationTime,omitempty,string"` // ForceSendFields is a list of field names (e.g. @@ -526,12 +751,18 @@ func (s *SubscriptionTransferInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubscriptionTrialSettings: Trial Settings of the subscription. +// SubscriptionTrialSettings: The G Suite annual commitment and flexible +// payment plans can be in a 30-day free trial. For more information, +// see the API concepts. type SubscriptionTrialSettings struct { - // IsInTrial: Whether the subscription is in trial. + // IsInTrial: Determines if a subscription's plan is in a 30-day free + // trial or not: + // - true — The plan is in trial. + // - false — The plan is not in trial. IsInTrial bool `json:"isInTrial,omitempty"` - // TrialEndTime: End time of the trial in milliseconds since Unix epoch. + // TrialEndTime: Date when the trial ends. The value is in milliseconds + // using the UNIX Epoch format. See an example Epoch converter. TrialEndTime int64 `json:"trialEndTime,omitempty,string"` // ForceSendFields is a list of field names (e.g. "IsInTrial") to @@ -560,6 +791,7 @@ func (s *SubscriptionTrialSettings) MarshalJSON() ([]byte, error) { // Subscriptions: JSON template for a subscription list. type Subscriptions struct { // Kind: Identifies the resource as a collection of subscriptions. + // Value: reseller#subscriptions Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large @@ -608,8 +840,7 @@ type CustomersGetCall struct { header_ http.Header } -// Get: Gets a customer resource if one exists and is owned by the -// reseller. +// Get: Get a customer account. func (r *CustomersService) Get(customerId string) *CustomersGetCall { c := &CustomersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -657,6 +888,7 @@ func (c *CustomersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -710,7 +942,7 @@ func (c *CustomersGetCall) Do(opts ...googleapi.CallOption) (*Customer, error) { } return ret, nil // { - // "description": "Gets a customer resource if one exists and is owned by the reseller.", + // "description": "Get a customer account.", // "httpMethod": "GET", // "id": "reseller.customers.get", // "parameterOrder": [ @@ -718,7 +950,7 @@ func (c *CustomersGetCall) Do(opts ...googleapi.CallOption) (*Customer, error) { // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" @@ -746,16 +978,20 @@ type CustomersInsertCall struct { header_ http.Header } -// Insert: Creates a customer resource if one does not already exist. +// Insert: Order a new customer's account. func (r *CustomersService) Insert(customer *Customer) *CustomersInsertCall { c := &CustomersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customer = customer return c } -// CustomerAuthToken sets the optional parameter "customerAuthToken": An -// auth token needed for inserting a customer for which domain already -// exists. Can be generated at https://admin.google.com/TransferToken. +// CustomerAuthToken sets the optional parameter "customerAuthToken": +// The customerAuthToken query string is required when creating a resold +// account that transfers a direct customer's subscription or transfers +// another reseller customer's subscription to your reseller management. +// This is a hexadecimal authentication token needed to complete the +// subscription transfer. For more information, see the administrator +// help center. func (c *CustomersInsertCall) CustomerAuthToken(customerAuthToken string) *CustomersInsertCall { c.urlParams_.Set("customerAuthToken", customerAuthToken) return c @@ -792,6 +1028,7 @@ func (c *CustomersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customer) if err != nil { @@ -844,12 +1081,12 @@ func (c *CustomersInsertCall) Do(opts ...googleapi.CallOption) (*Customer, error } return ret, nil // { - // "description": "Creates a customer resource if one does not already exist.", + // "description": "Order a new customer's account.", // "httpMethod": "POST", // "id": "reseller.customers.insert", // "parameters": { // "customerAuthToken": { - // "description": "An auth token needed for inserting a customer for which domain already exists. Can be generated at https://admin.google.com/TransferToken. Optional.", + // "description": "The customerAuthToken query string is required when creating a resold account that transfers a direct customer's subscription or transfers another reseller customer's subscription to your reseller management. This is a hexadecimal authentication token needed to complete the subscription transfer. For more information, see the administrator help center.", // "location": "query", // "type": "string" // } @@ -879,8 +1116,8 @@ type CustomersPatchCall struct { header_ http.Header } -// Patch: Update a customer resource if one it exists and is owned by -// the reseller. This method supports patch semantics. +// Patch: Update a customer account's settings. This method supports +// patch semantics. func (r *CustomersService) Patch(customerId string, customer *Customer) *CustomersPatchCall { c := &CustomersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -919,6 +1156,7 @@ func (c *CustomersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customer) if err != nil { @@ -974,7 +1212,7 @@ func (c *CustomersPatchCall) Do(opts ...googleapi.CallOption) (*Customer, error) } return ret, nil // { - // "description": "Update a customer resource if one it exists and is owned by the reseller. This method supports patch semantics.", + // "description": "Update a customer account's settings. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "reseller.customers.patch", // "parameterOrder": [ @@ -982,7 +1220,7 @@ func (c *CustomersPatchCall) Do(opts ...googleapi.CallOption) (*Customer, error) // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" @@ -1013,8 +1251,7 @@ type CustomersUpdateCall struct { header_ http.Header } -// Update: Update a customer resource if one it exists and is owned by -// the reseller. +// Update: Update a customer account's settings. func (r *CustomersService) Update(customerId string, customer *Customer) *CustomersUpdateCall { c := &CustomersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -1053,6 +1290,7 @@ func (c *CustomersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.customer) if err != nil { @@ -1108,7 +1346,7 @@ func (c *CustomersUpdateCall) Do(opts ...googleapi.CallOption) (*Customer, error } return ret, nil // { - // "description": "Update a customer resource if one it exists and is owned by the reseller.", + // "description": "Update a customer account's settings.", // "httpMethod": "PUT", // "id": "reseller.customers.update", // "parameterOrder": [ @@ -1116,7 +1354,7 @@ func (c *CustomersUpdateCall) Do(opts ...googleapi.CallOption) (*Customer, error // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" @@ -1136,6 +1374,377 @@ func (c *CustomersUpdateCall) Do(opts ...googleapi.CallOption) (*Customer, error } +// method id "reseller.resellernotify.getwatchdetails": + +type ResellernotifyGetwatchdetailsCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Getwatchdetails: Returns all the details of the watch corresponding +// to the reseller. +func (r *ResellernotifyService) Getwatchdetails() *ResellernotifyGetwatchdetailsCall { + c := &ResellernotifyGetwatchdetailsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResellernotifyGetwatchdetailsCall) Fields(s ...googleapi.Field) *ResellernotifyGetwatchdetailsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResellernotifyGetwatchdetailsCall) IfNoneMatch(entityTag string) *ResellernotifyGetwatchdetailsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResellernotifyGetwatchdetailsCall) Context(ctx context.Context) *ResellernotifyGetwatchdetailsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResellernotifyGetwatchdetailsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResellernotifyGetwatchdetailsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "resellernotify/getwatchdetails") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "reseller.resellernotify.getwatchdetails" call. +// Exactly one of *ResellernotifyGetwatchdetailsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *ResellernotifyGetwatchdetailsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResellernotifyGetwatchdetailsCall) Do(opts ...googleapi.CallOption) (*ResellernotifyGetwatchdetailsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResellernotifyGetwatchdetailsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns all the details of the watch corresponding to the reseller.", + // "httpMethod": "GET", + // "id": "reseller.resellernotify.getwatchdetails", + // "path": "resellernotify/getwatchdetails", + // "response": { + // "$ref": "ResellernotifyGetwatchdetailsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/apps.order", + // "https://www.googleapis.com/auth/apps.order.readonly" + // ] + // } + +} + +// method id "reseller.resellernotify.register": + +type ResellernotifyRegisterCall struct { + s *Service + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Register: Registers a Reseller for receiving notifications. +func (r *ResellernotifyService) Register() *ResellernotifyRegisterCall { + c := &ResellernotifyRegisterCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// ServiceAccountEmailAddress sets the optional parameter +// "serviceAccountEmailAddress": The service account which will own the +// created Cloud-PubSub topic. +func (c *ResellernotifyRegisterCall) ServiceAccountEmailAddress(serviceAccountEmailAddress string) *ResellernotifyRegisterCall { + c.urlParams_.Set("serviceAccountEmailAddress", serviceAccountEmailAddress) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResellernotifyRegisterCall) Fields(s ...googleapi.Field) *ResellernotifyRegisterCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResellernotifyRegisterCall) Context(ctx context.Context) *ResellernotifyRegisterCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResellernotifyRegisterCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResellernotifyRegisterCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "resellernotify/register") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "reseller.resellernotify.register" call. +// Exactly one of *ResellernotifyResource or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResellernotifyResource.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResellernotifyRegisterCall) Do(opts ...googleapi.CallOption) (*ResellernotifyResource, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResellernotifyResource{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Registers a Reseller for receiving notifications.", + // "httpMethod": "POST", + // "id": "reseller.resellernotify.register", + // "parameters": { + // "serviceAccountEmailAddress": { + // "description": "The service account which will own the created Cloud-PubSub topic.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "resellernotify/register", + // "response": { + // "$ref": "ResellernotifyResource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/apps.order" + // ] + // } + +} + +// method id "reseller.resellernotify.unregister": + +type ResellernotifyUnregisterCall struct { + s *Service + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Unregister: Unregisters a Reseller for receiving notifications. +func (r *ResellernotifyService) Unregister() *ResellernotifyUnregisterCall { + c := &ResellernotifyUnregisterCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// ServiceAccountEmailAddress sets the optional parameter +// "serviceAccountEmailAddress": The service account which owns the +// Cloud-PubSub topic. +func (c *ResellernotifyUnregisterCall) ServiceAccountEmailAddress(serviceAccountEmailAddress string) *ResellernotifyUnregisterCall { + c.urlParams_.Set("serviceAccountEmailAddress", serviceAccountEmailAddress) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResellernotifyUnregisterCall) Fields(s ...googleapi.Field) *ResellernotifyUnregisterCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResellernotifyUnregisterCall) Context(ctx context.Context) *ResellernotifyUnregisterCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResellernotifyUnregisterCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResellernotifyUnregisterCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "resellernotify/unregister") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "reseller.resellernotify.unregister" call. +// Exactly one of *ResellernotifyResource or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResellernotifyResource.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResellernotifyUnregisterCall) Do(opts ...googleapi.CallOption) (*ResellernotifyResource, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResellernotifyResource{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Unregisters a Reseller for receiving notifications.", + // "httpMethod": "POST", + // "id": "reseller.resellernotify.unregister", + // "parameters": { + // "serviceAccountEmailAddress": { + // "description": "The service account which owns the Cloud-PubSub topic.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "resellernotify/unregister", + // "response": { + // "$ref": "ResellernotifyResource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/apps.order" + // ] + // } + +} + // method id "reseller.subscriptions.activate": type SubscriptionsActivateCall struct { @@ -1187,6 +1796,7 @@ func (c *SubscriptionsActivateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customers/{customerId}/subscriptions/{subscriptionId}/activate") @@ -1247,13 +1857,13 @@ func (c *SubscriptionsActivateCall) Do(opts ...googleapi.CallOption) (*Subscript // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" @@ -1282,7 +1892,9 @@ type SubscriptionsChangePlanCall struct { header_ http.Header } -// ChangePlan: Changes the plan of a subscription +// ChangePlan: Update a subscription plan. Use this method to update a +// plan for a 30-day trial or a flexible plan subscription to an annual +// commitment plan with monthly or yearly payments. func (r *SubscriptionsService) ChangePlan(customerId string, subscriptionId string, changeplanrequest *ChangePlanRequest) *SubscriptionsChangePlanCall { c := &SubscriptionsChangePlanCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -1322,6 +1934,7 @@ func (c *SubscriptionsChangePlanCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.changeplanrequest) if err != nil { @@ -1378,7 +1991,7 @@ func (c *SubscriptionsChangePlanCall) Do(opts ...googleapi.CallOption) (*Subscri } return ret, nil // { - // "description": "Changes the plan of a subscription", + // "description": "Update a subscription plan. Use this method to update a plan for a 30-day trial or a flexible plan subscription to an annual commitment plan with monthly or yearly payments.", // "httpMethod": "POST", // "id": "reseller.subscriptions.changePlan", // "parameterOrder": [ @@ -1387,13 +2000,13 @@ func (c *SubscriptionsChangePlanCall) Do(opts ...googleapi.CallOption) (*Subscri // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" @@ -1425,7 +2038,8 @@ type SubscriptionsChangeRenewalSettingsCall struct { header_ http.Header } -// ChangeRenewalSettings: Changes the renewal settings of a subscription +// ChangeRenewalSettings: Update a user license's renewal settings. This +// is applicable for accounts with annual commitment plans only. func (r *SubscriptionsService) ChangeRenewalSettings(customerId string, subscriptionId string, renewalsettings *RenewalSettings) *SubscriptionsChangeRenewalSettingsCall { c := &SubscriptionsChangeRenewalSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -1465,6 +2079,7 @@ func (c *SubscriptionsChangeRenewalSettingsCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.renewalsettings) if err != nil { @@ -1521,7 +2136,7 @@ func (c *SubscriptionsChangeRenewalSettingsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Changes the renewal settings of a subscription", + // "description": "Update a user license's renewal settings. This is applicable for accounts with annual commitment plans only.", // "httpMethod": "POST", // "id": "reseller.subscriptions.changeRenewalSettings", // "parameterOrder": [ @@ -1530,13 +2145,13 @@ func (c *SubscriptionsChangeRenewalSettingsCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" @@ -1568,7 +2183,7 @@ type SubscriptionsChangeSeatsCall struct { header_ http.Header } -// ChangeSeats: Changes the seats configuration of a subscription +// ChangeSeats: Update a subscription's user license settings. func (r *SubscriptionsService) ChangeSeats(customerId string, subscriptionId string, seats *Seats) *SubscriptionsChangeSeatsCall { c := &SubscriptionsChangeSeatsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -1608,6 +2223,7 @@ func (c *SubscriptionsChangeSeatsCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.seats) if err != nil { @@ -1664,7 +2280,7 @@ func (c *SubscriptionsChangeSeatsCall) Do(opts ...googleapi.CallOption) (*Subscr } return ret, nil // { - // "description": "Changes the seats configuration of a subscription", + // "description": "Update a subscription's user license settings.", // "httpMethod": "POST", // "id": "reseller.subscriptions.changeSeats", // "parameterOrder": [ @@ -1673,13 +2289,13 @@ func (c *SubscriptionsChangeSeatsCall) Do(opts ...googleapi.CallOption) (*Subscr // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" @@ -1710,7 +2326,7 @@ type SubscriptionsDeleteCall struct { header_ http.Header } -// Delete: Cancels/Downgrades a subscription. +// Delete: Cancel, suspend or transfer a subscription to direct. func (r *SubscriptionsService) Delete(customerId string, subscriptionId string, deletionType string) *SubscriptionsDeleteCall { c := &SubscriptionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -1750,6 +2366,7 @@ func (c *SubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customers/{customerId}/subscriptions/{subscriptionId}") @@ -1776,7 +2393,7 @@ func (c *SubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Cancels/Downgrades a subscription.", + // "description": "Cancel, suspend or transfer a subscription to direct.", // "httpMethod": "DELETE", // "id": "reseller.subscriptions.delete", // "parameterOrder": [ @@ -1786,13 +2403,13 @@ func (c *SubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) error { // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "deletionType": { - // "description": "Whether the subscription is to be fully cancelled or downgraded", + // "description": "The deletionType query string enables the cancellation, downgrade, or suspension of a subscription.", // "enum": [ // "cancel", // "downgrade", @@ -1800,17 +2417,17 @@ func (c *SubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) error { // "transfer_to_direct" // ], // "enumDescriptions": [ - // "Cancels the subscription immediately", - // "Downgrades a Google Apps for Business subscription to Google Apps", - // "Suspends the subscriptions for 4 days before cancelling it", - // "Transfers a subscription directly to Google" + // "Cancels the subscription immediately. This does not apply to a G Suite subscription.", + // "Downgrades a G Suite subscription to a Google Apps Free edition subscription only if the customer was initially subscribed to a Google Apps Free edition (also known as the Standard edition). Once downgraded, the customer no longer has access to the previous G Suite subscription and is no longer managed by the reseller.\n\nA G Suite subscription's downgrade cannot be invoked if an active or suspended Google Drive or Google Vault subscription is present. The Google Drive or Google Vault subscription must be cancelled before the G Suite subscription's downgrade is invoked.\n\nThe downgrade deletionType does not apply to other products or G Suite SKUs.", + // "(DEPRECATED) The G Suite account is suspended for four days and then cancelled. Once suspended, an administrator has access to the suspended account, but the account users can not access their services. A suspension can be lifted, using the reseller tools.\n\nA G Suite subscription's suspension can not be invoked if an active or suspended Google Drive or Google Vault subscription is present. The Google Drive or Google Vault subscription must be cancelled before the G Suite subscription's suspension is invoked.", + // "Transfers a subscription directly to Google.  The customer is immediately transferred to a direct billing relationship with Google and is given a short amount of time with no service interruption. The customer can then choose to set up billing directly with Google by using a credit card, or they can transfer to another reseller." // ], // "location": "query", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" @@ -1836,7 +2453,7 @@ type SubscriptionsGetCall struct { header_ http.Header } -// Get: Gets a subscription of the customer. +// Get: Get a specific subscription. func (r *SubscriptionsService) Get(customerId string, subscriptionId string) *SubscriptionsGetCall { c := &SubscriptionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -1885,6 +2502,7 @@ func (c *SubscriptionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1939,7 +2557,7 @@ func (c *SubscriptionsGetCall) Do(opts ...googleapi.CallOption) (*Subscription, } return ret, nil // { - // "description": "Gets a subscription of the customer.", + // "description": "Get a specific subscription.", // "httpMethod": "GET", // "id": "reseller.subscriptions.get", // "parameterOrder": [ @@ -1948,13 +2566,13 @@ func (c *SubscriptionsGetCall) Do(opts ...googleapi.CallOption) (*Subscription, // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" @@ -1983,7 +2601,7 @@ type SubscriptionsInsertCall struct { header_ http.Header } -// Insert: Creates/Transfers a subscription for the customer. +// Insert: Create or transfer a subscription. func (r *SubscriptionsService) Insert(customerId string, subscription *Subscription) *SubscriptionsInsertCall { c := &SubscriptionsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -1991,9 +2609,13 @@ func (r *SubscriptionsService) Insert(customerId string, subscription *Subscript return c } -// CustomerAuthToken sets the optional parameter "customerAuthToken": An -// auth token needed for transferring a subscription. Can be generated -// at https://www.google.com/a/cpanel/customer-domain/TransferToken. +// CustomerAuthToken sets the optional parameter "customerAuthToken": +// The customerAuthToken query string is required when creating a resold +// account that transfers a direct customer's subscription or transfers +// another reseller customer's subscription to your reseller management. +// This is a hexadecimal authentication token needed to complete the +// subscription transfer. For more information, see the administrator +// help center. func (c *SubscriptionsInsertCall) CustomerAuthToken(customerAuthToken string) *SubscriptionsInsertCall { c.urlParams_.Set("customerAuthToken", customerAuthToken) return c @@ -2030,6 +2652,7 @@ func (c *SubscriptionsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { @@ -2085,7 +2708,7 @@ func (c *SubscriptionsInsertCall) Do(opts ...googleapi.CallOption) (*Subscriptio } return ret, nil // { - // "description": "Creates/Transfers a subscription for the customer.", + // "description": "Create or transfer a subscription.", // "httpMethod": "POST", // "id": "reseller.subscriptions.insert", // "parameterOrder": [ @@ -2093,12 +2716,12 @@ func (c *SubscriptionsInsertCall) Do(opts ...googleapi.CallOption) (*Subscriptio // ], // "parameters": { // "customerAuthToken": { - // "description": "An auth token needed for transferring a subscription. Can be generated at https://www.google.com/a/cpanel/customer-domain/TransferToken. Optional.", + // "description": "The customerAuthToken query string is required when creating a resold account that transfers a direct customer's subscription or transfers another reseller customer's subscription to your reseller management. This is a hexadecimal authentication token needed to complete the subscription transfer. For more information, see the administrator help center.", // "location": "query", // "type": "string" // }, // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" @@ -2128,39 +2751,54 @@ type SubscriptionsListCall struct { header_ http.Header } -// List: Lists subscriptions of a reseller, optionally filtered by a -// customer name prefix. +// List: List of subscriptions managed by the reseller. The list can be +// all subscriptions, all of a customer's subscriptions, or all of a +// customer's transferable subscriptions. func (r *SubscriptionsService) List() *SubscriptionsListCall { c := &SubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } -// CustomerAuthToken sets the optional parameter "customerAuthToken": An -// auth token needed if the customer is not a resold customer of this -// reseller. Can be generated at -// https://www.google.com/a/cpanel/customer-domain/TransferToken. +// CustomerAuthToken sets the optional parameter "customerAuthToken": +// The customerAuthToken query string is required when creating a resold +// account that transfers a direct customer's subscription or transfers +// another reseller customer's subscription to your reseller management. +// This is a hexadecimal authentication token needed to complete the +// subscription transfer. For more information, see the administrator +// help center. func (c *SubscriptionsListCall) CustomerAuthToken(customerAuthToken string) *SubscriptionsListCall { c.urlParams_.Set("customerAuthToken", customerAuthToken) return c } -// CustomerId sets the optional parameter "customerId": Id of the -// Customer +// CustomerId sets the optional parameter "customerId": Either the +// customer's primary domain name or the customer's unique identifier. +// If using the domain name, we do not recommend using a customerId as a +// key for persistent data. If the domain name for a customerId is +// changed, the Google system automatically updates. func (c *SubscriptionsListCall) CustomerId(customerId string) *SubscriptionsListCall { c.urlParams_.Set("customerId", customerId) return c } // CustomerNamePrefix sets the optional parameter "customerNamePrefix": -// Prefix of the customer's domain name by which the subscriptions -// should be filtered. Optional +// When retrieving all of your subscriptions and filtering for specific +// customers, you can enter a prefix for a customer name. Using an +// example customer group that includes exam.com, example20.com and +// example.com: +// - exa -- Returns all customer names that start with 'exa' which could +// include exam.com, example20.com, and example.com. A name prefix is +// similar to using a regular expression's asterisk, exa*. +// - example -- Returns example20.com and example.com. func (c *SubscriptionsListCall) CustomerNamePrefix(customerNamePrefix string) *SubscriptionsListCall { c.urlParams_.Set("customerNamePrefix", customerNamePrefix) return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to return +// MaxResults sets the optional parameter "maxResults": When retrieving +// a large list, the maxResults is the maximum number of results per +// page. The nextPageToken value takes you to the next page. The default +// is 20. func (c *SubscriptionsListCall) MaxResults(maxResults int64) *SubscriptionsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -2214,6 +2852,7 @@ func (c *SubscriptionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2264,27 +2903,27 @@ func (c *SubscriptionsListCall) Do(opts ...googleapi.CallOption) (*Subscriptions } return ret, nil // { - // "description": "Lists subscriptions of a reseller, optionally filtered by a customer name prefix.", + // "description": "List of subscriptions managed by the reseller. The list can be all subscriptions, all of a customer's subscriptions, or all of a customer's transferable subscriptions.", // "httpMethod": "GET", // "id": "reseller.subscriptions.list", // "parameters": { // "customerAuthToken": { - // "description": "An auth token needed if the customer is not a resold customer of this reseller. Can be generated at https://www.google.com/a/cpanel/customer-domain/TransferToken.Optional.", + // "description": "The customerAuthToken query string is required when creating a resold account that transfers a direct customer's subscription or transfers another reseller customer's subscription to your reseller management. This is a hexadecimal authentication token needed to complete the subscription transfer. For more information, see the administrator help center.", // "location": "query", // "type": "string" // }, // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "query", // "type": "string" // }, // "customerNamePrefix": { - // "description": "Prefix of the customer's domain name by which the subscriptions should be filtered. Optional", + // "description": "When retrieving all of your subscriptions and filtering for specific customers, you can enter a prefix for a customer name. Using an example customer group that includes exam.com, example20.com and example.com: \n- exa -- Returns all customer names that start with 'exa' which could include exam.com, example20.com, and example.com. A name prefix is similar to using a regular expression's asterisk, exa*. \n- example -- Returns example20.com and example.com.", // "location": "query", // "type": "string" // }, // "maxResults": { - // "description": "Maximum number of results to return", + // "description": "When retrieving a large list, the maxResults is the maximum number of results per page. The nextPageToken value takes you to the next page. The default is 20.", // "format": "uint32", // "location": "query", // "maximum": "100", @@ -2341,7 +2980,8 @@ type SubscriptionsStartPaidServiceCall struct { header_ http.Header } -// StartPaidService: Starts paid service of a trial subscription +// StartPaidService: Immediately move a 30-day free trial subscription +// to a paid service subscription. func (r *SubscriptionsService) StartPaidService(customerId string, subscriptionId string) *SubscriptionsStartPaidServiceCall { c := &SubscriptionsStartPaidServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -2380,6 +3020,7 @@ func (c *SubscriptionsStartPaidServiceCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customers/{customerId}/subscriptions/{subscriptionId}/startPaidService") @@ -2431,7 +3072,7 @@ func (c *SubscriptionsStartPaidServiceCall) Do(opts ...googleapi.CallOption) (*S } return ret, nil // { - // "description": "Starts paid service of a trial subscription", + // "description": "Immediately move a 30-day free trial subscription to a paid service subscription.", // "httpMethod": "POST", // "id": "reseller.subscriptions.startPaidService", // "parameterOrder": [ @@ -2440,13 +3081,13 @@ func (c *SubscriptionsStartPaidServiceCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" @@ -2474,7 +3115,7 @@ type SubscriptionsSuspendCall struct { header_ http.Header } -// Suspend: Suspends an active subscription +// Suspend: Suspends an active subscription. func (r *SubscriptionsService) Suspend(customerId string, subscriptionId string) *SubscriptionsSuspendCall { c := &SubscriptionsSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.customerId = customerId @@ -2513,6 +3154,7 @@ func (c *SubscriptionsSuspendCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "customers/{customerId}/subscriptions/{subscriptionId}/suspend") @@ -2564,7 +3206,7 @@ func (c *SubscriptionsSuspendCall) Do(opts ...googleapi.CallOption) (*Subscripti } return ret, nil // { - // "description": "Suspends an active subscription", + // "description": "Suspends an active subscription.", // "httpMethod": "POST", // "id": "reseller.subscriptions.suspend", // "parameterOrder": [ @@ -2573,13 +3215,13 @@ func (c *SubscriptionsSuspendCall) Do(opts ...googleapi.CallOption) (*Subscripti // ], // "parameters": { // "customerId": { - // "description": "Id of the Customer", + // "description": "Either the customer's primary domain name or the customer's unique identifier. If using the domain name, we do not recommend using a customerId as a key for persistent data. If the domain name for a customerId is changed, the Google system automatically updates.", // "location": "path", // "required": true, // "type": "string" // }, // "subscriptionId": { - // "description": "Id of the subscription, which is unique for a customer", + // "description": "This is a required property. The subscriptionId is the subscription identifier and is unique for each customer. Since a subscriptionId changes when a subscription is updated, we recommend to not use this ID as a key for persistent data. And the subscriptionId can be found using the retrieve all reseller subscriptions method.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/resourceviews/v1beta1/resourceviews-gen.go b/vendor/google.golang.org/api/resourceviews/v1beta1/resourceviews-gen.go index 27ac84b8d..f56ca7bd5 100644 --- a/vendor/google.golang.org/api/resourceviews/v1beta1/resourceviews-gen.go +++ b/vendor/google.golang.org/api/resourceviews/v1beta1/resourceviews-gen.go @@ -79,9 +79,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only RegionViews *RegionViewsService @@ -95,6 +96,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewRegionViewsService(s *Service) *RegionViewsService { rs := &RegionViewsService{s: s} return rs @@ -582,6 +587,7 @@ func (c *RegionViewsAddresourcesCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionviewsaddresourcesrequest) if err != nil { @@ -707,6 +713,7 @@ func (c *RegionViewsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{projectName}/regions/{region}/resourceViews/{resourceViewName}") @@ -835,6 +842,7 @@ func (c *RegionViewsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -986,6 +994,7 @@ func (c *RegionViewsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourceview) if err != nil { @@ -1157,6 +1166,7 @@ func (c *RegionViewsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1352,6 +1362,7 @@ func (c *RegionViewsListresourcesCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{projectName}/regions/{region}/resourceViews/{resourceViewName}/resources") @@ -1537,6 +1548,7 @@ func (c *RegionViewsRemoveresourcesCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionviewsremoveresourcesrequest) if err != nil { @@ -1664,6 +1676,7 @@ func (c *ZoneViewsAddresourcesCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.zoneviewsaddresourcesrequest) if err != nil { @@ -1789,6 +1802,7 @@ func (c *ZoneViewsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{projectName}/zones/{zone}/resourceViews/{resourceViewName}") @@ -1917,6 +1931,7 @@ func (c *ZoneViewsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2068,6 +2083,7 @@ func (c *ZoneViewsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourceview) if err != nil { @@ -2239,6 +2255,7 @@ func (c *ZoneViewsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2434,6 +2451,7 @@ func (c *ZoneViewsListresourcesCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{projectName}/zones/{zone}/resourceViews/{resourceViewName}/resources") @@ -2619,6 +2637,7 @@ func (c *ZoneViewsRemoveresourcesCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.zoneviewsremoveresourcesrequest) if err != nil { diff --git a/vendor/google.golang.org/api/resourceviews/v1beta2/resourceviews-gen.go b/vendor/google.golang.org/api/resourceviews/v1beta2/resourceviews-gen.go index 79f061db0..32dede347 100644 --- a/vendor/google.golang.org/api/resourceviews/v1beta2/resourceviews-gen.go +++ b/vendor/google.golang.org/api/resourceviews/v1beta2/resourceviews-gen.go @@ -79,9 +79,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only ZoneOperations *ZoneOperationsService @@ -95,6 +96,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewZoneOperationsService(s *Service) *ZoneOperationsService { rs := &ZoneOperationsService{s: s} return rs @@ -840,6 +845,7 @@ func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1027,6 +1033,7 @@ func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1214,6 +1221,7 @@ func (c *ZoneViewsAddResourcesCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.zoneviewsaddresourcesrequest) if err != nil { @@ -1367,6 +1375,7 @@ func (c *ZoneViewsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/resourceViews/{resourceView}") @@ -1523,6 +1532,7 @@ func (c *ZoneViewsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1683,6 +1693,7 @@ func (c *ZoneViewsGetServiceCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/resourceViews/{resourceView}/getService") @@ -1833,6 +1844,7 @@ func (c *ZoneViewsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourceview) if err != nil { @@ -2004,6 +2016,7 @@ func (c *ZoneViewsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2243,6 +2256,7 @@ func (c *ZoneViewsListResourcesCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2465,6 +2479,7 @@ func (c *ZoneViewsRemoveResourcesCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.zoneviewsremoveresourcesrequest) if err != nil { @@ -2621,6 +2636,7 @@ func (c *ZoneViewsSetServiceCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.zoneviewssetservicerequest) if err != nil { diff --git a/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-api.json b/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-api.json index 1a081f480..52d19f3a3 100644 --- a/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-api.json +++ b/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-api.json @@ -1,45 +1,36 @@ { - "version": "v1", - "baseUrl": "https://runtimeconfig.googleapis.com/", - "servicePath": "", - "description": "Provides capabilities for dynamic configuration and coordination for applications running on Google Cloud Platform.\n", - "kind": "discovery#restDescription", - "basePath": "", - "id": "runtimeconfig:v1", - "documentationLink": "https://cloud.google.com/deployment-manager/runtime-configurator/", - "revision": "20170123", "discoveryVersion": "v1", "version_module": "True", "schemas": { "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", "type": "object", "properties": {}, - "id": "CancelOperationRequest", - "description": "The request message for Operations.CancelOperation." + "id": "CancelOperationRequest" }, "Status": { "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", "type": "object", "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" - }, - "message": { - "type": "string", - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client." - }, "details": { "type": "array", "items": { - "type": "object", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" - } + }, + "type": "object" }, "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use." + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" } }, "id": "Status" @@ -66,18 +57,6 @@ "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", "type": "object", "properties": { - "metadata": { - "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any." - }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" - }, "response": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -91,22 +70,34 @@ "type": "string" }, "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure or cancellation." + "description": "The error result of the operation in case of failure or cancellation.", + "$ref": "Status" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object" + }, + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" } }, "id": "Operation" }, "Empty": { - "type": "object", "properties": {}, "id": "Empty", - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`." + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object" } }, "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" }, "protocol": "rest", "canonicalName": "Cloud RuntimeConfig", @@ -131,49 +122,11 @@ "resources": { "operations": { "methods": { - "list": { - "response": { - "$ref": "ListOperationsResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "filter": { - "location": "query", - "description": "The standard list filter.", - "type": "string" - }, - "name": { - "location": "path", - "description": "The name of the operation collection.", - "required": true, - "type": "string", - "pattern": "^operations$" - }, - "pageToken": { - "type": "string", - "location": "query", - "description": "The standard list page token." - }, - "pageSize": { - "location": "query", - "description": "The standard list page size.", - "format": "int32", - "type": "integer" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1/operations", - "path": "v1/{+name}", - "id": "runtimeconfig.operations.list", - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`." - }, "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "request": { + "$ref": "CancelOperationRequest" + }, "httpMethod": "POST", "parameterOrder": [ "name" @@ -187,26 +140,18 @@ ], "parameters": { "name": { - "location": "path", "description": "The name of the operation resource to be cancelled.", "required": true, "type": "string", - "pattern": "^operations/.+$" + "pattern": "^operations/.+$", + "location": "path" } }, "flatPath": "v1/operations/{operationsId}:cancel", "id": "runtimeconfig.operations.cancel", - "path": "v1/{+name}:cancel", - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", - "request": { - "$ref": "CancelOperationRequest" - } + "path": "v1/{+name}:cancel" }, "delete": { - "flatPath": "v1/operations/{operationsId}", - "path": "v1/{+name}", - "id": "runtimeconfig.operations.delete", - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", "response": { "$ref": "Empty" }, @@ -214,6 +159,33 @@ "name" ], "httpMethod": "DELETE", + "parameters": { + "name": { + "description": "The name of the operation resource to be deleted.", + "required": true, + "type": "string", + "pattern": "^operations/.+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "flatPath": "v1/operations/{operationsId}", + "path": "v1/{+name}", + "id": "runtimeconfig.operations.delete", + "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`." + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListOperationsResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloudruntimeconfig" @@ -221,21 +193,40 @@ "parameters": { "name": { "location": "path", - "description": "The name of the operation resource to be deleted.", + "description": "The name of the operation collection.", "required": true, "type": "string", - "pattern": "^operations/.+$" + "pattern": "^operations$" + }, + "pageToken": { + "description": "The standard list page token.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + }, + "filter": { + "description": "The standard list filter.", + "type": "string", + "location": "query" } - } + }, + "flatPath": "v1/operations", + "id": "runtimeconfig.operations.list", + "path": "v1/{+name}" } } } }, "parameters": { "upload_protocol": { - "type": "string", "location": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\")." + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" }, "prettyPrint": { "location": "query", @@ -244,35 +235,34 @@ "default": "true" }, "uploadType": { - "type": "string", "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\")." + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, "fields": { - "type": "string", "location": "query", - "description": "Selector specifying which fields to include in a partial response." + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, "callback": { - "type": "string", "location": "query", - "description": "JSONP" + "description": "JSONP", + "type": "string" }, "$.xgafv": { - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], "location": "query", "enum": [ "1", "2" ], - "description": "V1 error format." + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ] }, "alt": { - "type": "string", "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", @@ -285,17 +275,18 @@ "json", "media", "proto" - ] + ], + "type": "string" }, "key": { - "location": "query", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string" + "type": "string", + "location": "query" }, "access_token": { - "type": "string", "location": "query", - "description": "OAuth access token." + "description": "OAuth access token.", + "type": "string" }, "quotaUser": { "location": "query", @@ -303,20 +294,29 @@ "type": "string" }, "pp": { - "type": "boolean", - "default": "true", "location": "query", - "description": "Pretty-print response." + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" }, - "bearer_token": { + "oauth_token": { "location": "query", - "description": "OAuth bearer token.", + "description": "OAuth 2.0 token for the current user.", "type": "string" }, - "oauth_token": { + "bearer_token": { "location": "query", - "description": "OAuth 2.0 token for the current user.", + "description": "OAuth bearer token.", "type": "string" } - } + }, + "version": "v1", + "baseUrl": "https://runtimeconfig.googleapis.com/", + "kind": "discovery#restDescription", + "description": "Provides capabilities for dynamic configuration and coordination for applications running on Google Cloud Platform.\n", + "servicePath": "", + "basePath": "", + "id": "runtimeconfig:v1", + "revision": "20170123", + "documentationLink": "https://cloud.google.com/deployment-manager/runtime-configurator/" } diff --git a/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-gen.go b/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-gen.go index a1dc854e0..e4afffc8b 100644 --- a/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-gen.go +++ b/vendor/google.golang.org/api/runtimeconfig/v1/runtimeconfig-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Operations *OperationsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -404,6 +409,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { @@ -544,6 +550,7 @@ func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") @@ -709,6 +716,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json index 0ee3d3143..ba767f0f6 100644 --- a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json +++ b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json @@ -1,819 +1,152 @@ { - "resources": { - "projects": { - "resources": { - "configs": { - "methods": { - "list": { - "description": "Lists all the RuntimeConfig resources within project.", - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "response": { - "$ref": "ListConfigsResponse" - }, - "parameters": { - "pageToken": { - "location": "query", - "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", - "format": "int32", - "type": "integer" - }, - "parent": { - "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs", - "path": "v1beta1/{+parent}/configs", - "id": "runtimeconfig.projects.configs.list" - }, - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "RuntimeConfig" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "parent": { - "location": "path", - "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$" - }, - "requestId": { - "description": "An optional but recommended unique \u003ccode\u003erequest_id\u003c/code\u003e. If the server\nreceives two \u003ccode\u003ecreate()\u003c/code\u003e requests with the same\n\u003ccode\u003erequest_id\u003c/code\u003e, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty \u003ccode\u003erequest_id\u003c/code\u003e fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n\u003ccode\u003erequest_id\u003c/code\u003e strings.\n\n\u003ccode\u003erequest_id\u003c/code\u003e strings are limited to 64 characters.", - "type": "string", - "location": "query" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs", - "id": "runtimeconfig.projects.configs.create", - "path": "v1beta1/{+parent}/configs", - "description": "Creates a new RuntimeConfig resource. The configuration name must be\nunique within project.", - "request": { - "$ref": "RuntimeConfig" - } - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "resource": { - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path", - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:setIamPolicy", - "id": "runtimeconfig.projects.configs.setIamPolicy", - "path": "v1beta1/{+resource}:setIamPolicy" - }, - "getIamPolicy": { - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:getIamPolicy", - "path": "v1beta1/{+resource}:getIamPolicy", - "id": "runtimeconfig.projects.configs.getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "httpMethod": "GET", - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ] - }, - "get": { - "httpMethod": "GET", - "response": { - "$ref": "RuntimeConfig" - }, - "parameterOrder": [ - "name" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "name": { - "location": "path", - "description": "The name of the RuntimeConfig resource to retrieve, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+$" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}", - "id": "runtimeconfig.projects.configs.get", - "path": "v1beta1/{+name}", - "description": "Gets information about a RuntimeConfig resource." - }, - "update": { - "description": "Updates a RuntimeConfig resource. The configuration must exist beforehand.", - "request": { - "$ref": "RuntimeConfig" - }, - "response": { - "$ref": "RuntimeConfig" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PUT", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "name": { - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path", - "description": "The name of the RuntimeConfig resource to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}", - "path": "v1beta1/{+name}", - "id": "runtimeconfig.projects.configs.update" - }, - "testIamPermissions": { - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:testIamPermissions", - "id": "runtimeconfig.projects.configs.testIamPermissions", - "path": "v1beta1/{+resource}:testIamPermissions", - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path" - } - } + "basePath": "", + "revision": "20170123", + "documentationLink": "https://cloud.google.com/deployment-manager/runtime-configurator/", + "id": "runtimeconfig:v1beta1", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "SetIamPolicyRequest": { + "type": "object", + "properties": { + "policy": { + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", + "$ref": "Policy" + } + }, + "id": "SetIamPolicyRequest", + "description": "Request message for `SetIamPolicy` method." + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" }, - "delete": { - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}", - "id": "runtimeconfig.projects.configs.delete", - "path": "v1beta1/{+name}", - "description": "Deletes a RuntimeConfig resource.", - "httpMethod": "DELETE", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "name": { - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path", - "description": "The RuntimeConfig resource to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`" - } - } - } + "type": "object" + } + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + } + }, + "id": "Status" + }, + "Binding": { + "id": "Binding", + "description": "Associates `members` with a `role`.", + "type": "object", + "properties": { + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + }, + "members": { + "type": "array", + "items": { + "type": "string" }, - "resources": { - "operations": { - "methods": { - "testIamPermissions": { - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "httpMethod": "GET", - "parameterOrder": [ - "resource" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/operations/.+$" - }, - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "string", - "repeated": true, - "location": "query" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}:testIamPermissions", - "path": "v1beta1/{+resource}:testIamPermissions", - "id": "runtimeconfig.projects.configs.operations.testIamPermissions", - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." - }, - "get": { - "parameters": { - "name": { - "location": "path", - "description": "The name of the operation resource.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/operations/.+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}", - "path": "v1beta1/{+name}", - "id": "runtimeconfig.projects.configs.operations.get", - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET" - } - } - }, - "waiters": { - "methods": { - "delete": { - "description": "Deletes the waiter with the specified name.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "name": { - "description": "The Waiter resource to delete, in the format:\n\n `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}", - "path": "v1beta1/{+name}", - "id": "runtimeconfig.projects.configs.waiters.delete" - }, - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListWaitersResponse" - }, - "parameters": { - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", - "format": "int32", - "type": "integer" - }, - "parent": { - "description": "The path to the configuration for which you want to get a list of waiters.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters", - "id": "runtimeconfig.projects.configs.waiters.list", - "path": "v1beta1/{+parent}/waiters", - "description": "List waiters within the given configuration." - }, - "get": { - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Waiter" - }, - "parameters": { - "name": { - "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", - "location": "path", - "description": "The fully-qualified name of the Waiter resource object to retrieve, in the\nformat:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}", - "id": "runtimeconfig.projects.configs.waiters.get", - "path": "v1beta1/{+name}", - "description": "Gets information about a single waiter." - }, - "create": { - "description": "Creates a Waiter resource. This operation returns a long-running Operation\nresource which can be polled for completion. However, a waiter with the\ngiven name will exist (and can be retrieved) prior to the operation\ncompleting. If the operation fails, the failed Waiter resource will\nstill exist and must be deleted prior to subsequent creation attempts.", - "request": { - "$ref": "Waiter" - }, - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "requestId": { - "location": "query", - "description": "An optional but recommended unique \u003ccode\u003erequest_id\u003c/code\u003e. If the server\nreceives two \u003ccode\u003ecreate()\u003c/code\u003e requests with the same\n\u003ccode\u003erequest_id\u003c/code\u003e, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty \u003ccode\u003erequest_id\u003c/code\u003e fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n\u003ccode\u003erequest_id\u003c/code\u003e strings.\n\n\u003ccode\u003erequest_id\u003c/code\u003e strings are limited to 64 characters.", - "type": "string" - }, - "parent": { - "description": "The path to the configuration that will own the waiter.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters", - "id": "runtimeconfig.projects.configs.waiters.create", - "path": "v1beta1/{+parent}/waiters" - }, - "testIamPermissions": { - "httpMethod": "GET", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$" - }, - "permissions": { - "repeated": true, - "location": "query", - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}:testIamPermissions", - "id": "runtimeconfig.projects.configs.waiters.testIamPermissions", - "path": "v1beta1/{+resource}:testIamPermissions", - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." - } - } - }, - "variables": { - "methods": { - "delete": { - "description": "Deletes a variable or multiple variables.\n\nIf you specify a variable name, then that variable is deleted. If you\nspecify a prefix and `recursive` is true, then all variables with that\nprefix are deleted. You must set a `recursive` to true if you delete\nvariables by prefix.", - "httpMethod": "DELETE", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "recursive": { - "description": "Set to `true` to recursively delete multiple variables with the same\nprefix.", - "type": "boolean", - "location": "query" - }, - "name": { - "description": "The name of the variable to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", - "id": "runtimeconfig.projects.configs.variables.delete", - "path": "v1beta1/{+name}" - }, - "list": { - "description": "Lists variables within given a configuration, matching any provided filters.\nThis only lists variable names, not the values.", - "response": { - "$ref": "ListVariablesResponse" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "pageSize": { - "type": "integer", - "location": "query", - "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", - "format": "int32" - }, - "parent": { - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path", - "description": "The path to the RuntimeConfig resource for which you want to list variables.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", - "required": true, - "type": "string" - }, - "filter": { - "description": "Filters variables by matching the specified filter. For example:\n\n`projects/example-project/config/[CONFIG_NAME]/variables/example-variable`.", - "type": "string", - "location": "query" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", - "type": "string", - "location": "query" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables", - "path": "v1beta1/{+parent}/variables", - "id": "runtimeconfig.projects.configs.variables.list" - }, - "create": { - "id": "runtimeconfig.projects.configs.variables.create", - "path": "v1beta1/{+parent}/variables", - "request": { - "$ref": "Variable" - }, - "description": "Creates a variable within the given configuration. You cannot create\na variable with a name that is a prefix of an existing variable name, or a\nname that has an existing variable name as a prefix.\n\nTo learn more about creating a variable, read the\n[Setting and Getting Data](/deployment-manager/runtime-configurator/set-and-get-variables)\ndocumentation.", - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "Variable" - }, - "parameters": { - "requestId": { - "description": "An optional but recommended unique \u003ccode\u003erequest_id\u003c/code\u003e. If the server\nreceives two \u003ccode\u003ecreate()\u003c/code\u003e requests with the same\n\u003ccode\u003erequest_id\u003c/code\u003e, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty \u003ccode\u003erequest_id\u003c/code\u003e fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n\u003ccode\u003erequest_id\u003c/code\u003e strings.\n\n\u003ccode\u003erequest_id\u003c/code\u003e strings are limited to 64 characters.", - "type": "string", - "location": "query" - }, - "parent": { - "pattern": "^projects/[^/]+/configs/[^/]+$", - "location": "path", - "description": "The path to the RutimeConfig resource that this variable should belong to.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables" - }, - "get": { - "description": "Gets information about a single variable.", - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "response": { - "$ref": "Variable" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "name": { - "description": "The name of the variable to return, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]`", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", - "location": "path" - } - }, - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", - "path": "v1beta1/{+name}", - "id": "runtimeconfig.projects.configs.variables.get" - }, - "watch": { - "parameters": { - "name": { - "location": "path", - "description": "The name of the variable to watch, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:watch", - "path": "v1beta1/{+name}:watch", - "id": "runtimeconfig.projects.configs.variables.watch", - "request": { - "$ref": "WatchVariableRequest" - }, - "description": "Watches a specific variable and waits for a change in the variable's value.\nWhen there is a change, this method returns the new value or times out.\n\nIf a variable is deleted while being watched, the `variableState` state is\nset to `DELETED` and the method returns the last known variable `value`.\n\nIf you set the deadline for watching to a larger value than internal timeout\n(60 seconds), the current variable value is returned and the `variableState`\nwill be `VARIABLE_STATE_UNSPECIFIED`.\n\nTo learn more about creating a watcher, read the\n[Watching a Variable for Changes](/deployment-manager/runtime-configurator/watching-a-variable)\ndocumentation.", - "response": { - "$ref": "Variable" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST" - }, - "update": { - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", - "path": "v1beta1/{+name}", - "id": "runtimeconfig.projects.configs.variables.update", - "description": "Updates an existing variable with a new value.", - "request": { - "$ref": "Variable" - }, - "response": { - "$ref": "Variable" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PUT", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "parameters": { - "name": { - "description": "The name of the variable to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", - "location": "path" - } - } - }, - "testIamPermissions": { - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "GET", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", - "location": "path" - }, - "permissions": { - "repeated": true, - "location": "query", - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudruntimeconfig" - ], - "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:testIamPermissions", - "path": "v1beta1/{+resource}:testIamPermissions", - "id": "runtimeconfig.projects.configs.variables.testIamPermissions", - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." - } - } - } - } + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n" } } - } - }, - "parameters": { - "$.xgafv": { - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string" }, - "callback": { - "type": "string", - "location": "query", - "description": "JSONP" + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" }, - "alt": { - "description": "Data format for response.", - "default": "json", - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" + "Cardinality": { + "description": "A Cardinality condition for the Waiter resource. A cardinality condition is\nmet when the number of variables under a specified path prefix reaches a\npredefined number. For example, if you set a Cardinality condition where\nthe `path` is set to `/foo` and the number of paths is set to 2, the\nfollowing variables would meet the condition in a RuntimeConfig resource:\n\n+ `/foo/variable1 = \"value1\"`\n+ `/foo/variable2 = \"value2\"`\n+ `/bar/variable3 = \"value3\"`\n\nIt would not would not satisify the same condition with the `number` set to\n3, however, because there is only 2 paths that start with `/foo`.\nCardinality conditions are recursive; all subtrees under the specific\npath prefix are counted.", + "type": "object", + "properties": { + "path": { + "description": "The root of the variable subtree to monitor. For example, `/foo`.", + "type": "string" + }, + "number": { + "description": "The number variables under the `path` that must exist to meet this\ncondition. Defaults to 1 if not specified.", + "format": "int32", + "type": "integer" + } + }, + "id": "Cardinality" }, - "access_token": { - "location": "query", - "description": "OAuth access token.", - "type": "string" - }, - "key": { - "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string" - }, - "quotaUser": { - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" - }, - "pp": { - "location": "query", - "description": "Pretty-print response.", - "type": "boolean", - "default": "true" - }, - "oauth_token": { - "type": "string", - "location": "query", - "description": "OAuth 2.0 token for the current user." - }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "ListConfigsResponse": { + "description": "`ListConfigs()` returns the following response. The order of returned\nobjects is arbitrary; that is, it is not ordered in any particular way.", + "type": "object", + "properties": { + "configs": { + "description": "A list of the configurations in the project. The order of returned\nobjects is arbitrary; that is, it is not ordered in any particular way.", + "type": "array", + "items": { + "$ref": "RuntimeConfig" + } + }, + "nextPageToken": { + "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `pageSize`, use the `nextPageToken`\nas a value for the query parameter `pageToken` in the next list request.\nSubsequent list requests will have their own `nextPageToken` to continue\npaging through the results", + "type": "string" + } + }, + "id": "ListConfigsResponse" }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true", - "location": "query" + "EndCondition": { + "description": "The condition that a Waiter resource is waiting for.", + "type": "object", + "properties": { + "cardinality": { + "description": "The cardinality of the `EndCondition`.", + "$ref": "Cardinality" + } + }, + "id": "EndCondition" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse" }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - } - }, - "version": "v1beta1", - "baseUrl": "https://runtimeconfig.googleapis.com/", - "description": "Provides capabilities for dynamic configuration and coordination for applications running on Google Cloud Platform.\n", - "servicePath": "", - "kind": "discovery#restDescription", - "basePath": "", - "id": "runtimeconfig:v1beta1", - "documentationLink": "https://cloud.google.com/deployment-manager/runtime-configurator/", - "revision": "20170123", - "discoveryVersion": "v1", - "version_module": "True", - "schemas": { "ListVariablesResponse": { + "id": "ListVariablesResponse", "description": "Response for the `ListVariables()` method.", "type": "object", "properties": { + "nextPageToken": { + "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `pageSize`, use the `nextPageToken`\nas a value for the query parameter `pageToken` in the next list request.\nSubsequent list requests will have their own `nextPageToken` to continue\npaging through the results", + "type": "string" + }, "variables": { - "description": "A list of variables and their values. The order of returned variable\nobjects is arbitrary.", "type": "array", "items": { "$ref": "Variable" - } - }, - "nextPageToken": { - "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `pageSize`, use the `nextPageToken`\nas a value for the query parameter `pageToken` in the next list request.\nSubsequent list requests will have their own `nextPageToken` to continue\npaging through the results", - "type": "string" + }, + "description": "A list of variables and their values. The order of returned variable\nobjects is arbitrary." } - }, - "id": "ListVariablesResponse" + } }, "RuntimeConfig": { + "id": "RuntimeConfig", "description": "A RuntimeConfig resource is the primary resource in the Cloud RuntimeConfig\nservice. A RuntimeConfig resource consists of metadata and a hierarchy of\nvariables.", "type": "object", "properties": { @@ -825,8 +158,7 @@ "description": "An optional description of the RuntimeConfig object.", "type": "string" } - }, - "id": "RuntimeConfig" + } }, "WatchVariableRequest": { "description": "Request for the `WatchVariable()` method.", @@ -858,31 +190,17 @@ }, "id": "ListWaitersResponse" }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions)." - } - }, - "id": "TestIamPermissionsRequest" - }, "Waiter": { "description": "A Waiter resource waits for some end condition within a RuntimeConfig resource\nto be met before it returns. For example, assume you have a distributed\nsystem where each node writes to a Variable resource indidicating the node's\nreadiness as part of the startup process.\n\nYou then configure a Waiter resource with the success condition set to wait\nuntil some number of nodes have checked in. Afterwards, your application\nruns some arbitrary code after the condition has been met and the waiter\nreturns successfully.\n\nOnce created, a Waiter resource is immutable.\n\nTo learn more about using waiters, read the\n[Creating a Waiter](/deployment-manager/runtime-configurator/creating-a-waiter)\ndocumentation.", "type": "object", "properties": { "error": { - "description": "[Output Only] If the waiter ended due to a failure or timeout, this value\nwill be set.", - "$ref": "Status" + "$ref": "Status", + "description": "[Output Only] If the waiter ended due to a failure or timeout, this value\nwill be set." }, "failure": { - "$ref": "EndCondition", - "description": "[Optional] The failure condition of this waiter. If this condition is met,\n`done` will be set to `true` and the `error` code will be set to `ABORTED`.\nThe failure condition takes precedence over the success condition. If both\nconditions are met, a failure will be indicated. This value is optional; if\nno failure condition is set, the only failure scenario will be a timeout." + "description": "[Optional] The failure condition of this waiter. If this condition is met,\n`done` will be set to `true` and the `error` code will be set to `ABORTED`.\nThe failure condition takes precedence over the success condition. If both\nconditions are met, a failure will be indicated. This value is optional; if\nno failure condition is set, the only failure scenario will be a timeout.", + "$ref": "EndCondition" }, "success": { "$ref": "EndCondition", @@ -909,11 +227,31 @@ }, "id": "Waiter" }, + "TestIamPermissionsRequest": { + "properties": { + "permissions": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions)." + } + }, + "id": "TestIamPermissionsRequest", + "description": "Request message for `TestIamPermissions` method.", + "type": "object" + }, "Policy": { - "id": "Policy", "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", "type": "object", "properties": { + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } + }, "etag": { "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", "format": "byte", @@ -923,42 +261,19 @@ "description": "Version of the `Policy`. The default version is 0.", "format": "int32", "type": "integer" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", - "type": "array", - "items": { - "$ref": "Binding" - } } - } + }, + "id": "Policy" }, "Variable": { - "id": "Variable", - "description": "Describes a single variable within a RuntimeConfig resource.\nThe name denotes the hierarchical variable name. For example,\n`ports/serving_port` is a valid variable name. The variable value is an\nopaque string and only leaf variables can have values (that is, variables\nthat do not have any child variables).", "type": "object", "properties": { - "name": { - "type": "string", - "description": "The name of the variable resource, in the format:\n\n projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]\n\nThe `[PROJECT_ID]` must be a valid project ID, `[CONFIG_NAME]` must be a\nvalid RuntimeConfig reource and `[VARIABLE_NAME]` follows Unix file system\nfile path naming.\n\nThe `[VARIABLE_NAME]` can contain ASCII letters, numbers, slashes and\ndashes. Slashes are used as path element separators and are not part of the\n`[VARIABLE_NAME]` itself, so `[VARIABLE_NAME]` must contain at least one\nnon-slash character. Multiple slashes are coalesced into single slash\ncharacter. Each path segment should follow RFC 1035 segment specification.\nThe length of a `[VARIABLE_NAME]` must be less than 256 bytes.\n\nOnce you create a variable, you cannot change the variable name." - }, - "text": { - "type": "string", - "description": "The string value of the variable. The length of the value must be less\nthan 4096 bytes. Empty values are also accepted. For example,\n\u003ccode\u003etext: \"my text value\"\u003c/code\u003e." - }, - "value": { - "description": "The binary value of the variable. The length of the value must be less\nthan 4096 bytes. Empty values are also accepted. The value must be\nbase64 encoded. Only one of `value` or `text` can be set.", - "format": "byte", - "type": "string" - }, "updateTime": { "description": "[Output Only] The time of the last variable update.", "format": "google-datetime", "type": "string" }, "state": { - "description": "[Ouput only] The current state of the variable. The variable state indicates\nthe outcome of the `variables().watch` call and is visible through the\n`get` and `list` calls.", - "type": "string", "enumDescriptions": [ "Default variable state.", "The variable was updated, while `variables().watch` was executing.", @@ -968,189 +283,874 @@ "VARIABLE_STATE_UNSPECIFIED", "UPDATED", "DELETED" - ] + ], + "description": "[Ouput only] The current state of the variable. The variable state indicates\nthe outcome of the `variables().watch` call and is visible through the\n`get` and `list` calls.", + "type": "string" + }, + "name": { + "description": "The name of the variable resource, in the format:\n\n projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]\n\nThe `[PROJECT_ID]` must be a valid project ID, `[CONFIG_NAME]` must be a\nvalid RuntimeConfig reource and `[VARIABLE_NAME]` follows Unix file system\nfile path naming.\n\nThe `[VARIABLE_NAME]` can contain ASCII letters, numbers, slashes and\ndashes. Slashes are used as path element separators and are not part of the\n`[VARIABLE_NAME]` itself, so `[VARIABLE_NAME]` must contain at least one\nnon-slash character. Multiple slashes are coalesced into single slash\ncharacter. Each path segment should follow RFC 1035 segment specification.\nThe length of a `[VARIABLE_NAME]` must be less than 256 bytes.\n\nOnce you create a variable, you cannot change the variable name.", + "type": "string" + }, + "text": { + "description": "The string value of the variable. The length of the value must be less\nthan 4096 bytes. Empty values are also accepted. For example,\n\u003ccode\u003etext: \"my text value\"\u003c/code\u003e.", + "type": "string" + }, + "value": { + "type": "string", + "description": "The binary value of the variable. The length of the value must be less\nthan 4096 bytes. Empty values are also accepted. The value must be\nbase64 encoded. Only one of `value` or `text` can be set.", + "format": "byte" + } + }, + "id": "Variable", + "description": "Describes a single variable within a RuntimeConfig resource.\nThe name denotes the hierarchical variable name. For example,\n`ports/serving_port` is a valid variable name. The variable value is an\nopaque string and only leaf variables can have values (that is, variables\nthat do not have any child variables)." + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "type": "object", + "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "type": "string" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any." + } + }, + "id": "Operation" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "canonicalName": "Cloud RuntimeConfig", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloudruntimeconfig": { + "description": "Manage your Google Cloud Platform services' runtime configuration" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://runtimeconfig.googleapis.com/", + "ownerDomain": "google.com", + "name": "runtimeconfig", + "batchPath": "batch", + "title": "Google Cloud RuntimeConfig API", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "configs": { + "methods": { + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "resource": { + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path", + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:testIamPermissions", + "path": "v1beta1/{+resource}:testIamPermissions", + "id": "runtimeconfig.projects.configs.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "request": { + "$ref": "TestIamPermissionsRequest" + } + }, + "delete": { + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "location": "path", + "description": "The RuntimeConfig resource to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}", + "path": "v1beta1/{+name}", + "id": "runtimeconfig.projects.configs.delete", + "description": "Deletes a RuntimeConfig resource." + }, + "list": { + "response": { + "$ref": "ListConfigsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + "format": "int32", + "type": "integer" + }, + "parent": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`." + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs", + "path": "v1beta1/{+parent}/configs", + "id": "runtimeconfig.projects.configs.list", + "description": "Lists all the RuntimeConfig resources within project." + }, + "create": { + "flatPath": "v1beta1/projects/{projectsId}/configs", + "id": "runtimeconfig.projects.configs.create", + "path": "v1beta1/{+parent}/configs", + "request": { + "$ref": "RuntimeConfig" + }, + "description": "Creates a new RuntimeConfig resource. The configuration name must be\nunique within project.", + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "RuntimeConfig" + }, + "parameters": { + "requestId": { + "location": "query", + "description": "An optional but recommended unique \u003ccode\u003erequest_id\u003c/code\u003e. If the server\nreceives two \u003ccode\u003ecreate()\u003c/code\u003e requests with the same\n\u003ccode\u003erequest_id\u003c/code\u003e, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty \u003ccode\u003erequest_id\u003c/code\u003e fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n\u003ccode\u003erequest_id\u003c/code\u003e strings.\n\n\u003ccode\u003erequest_id\u003c/code\u003e strings are limited to 64 characters.", + "type": "string" + }, + "parent": { + "location": "path", + "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ] + }, + "setIamPolicy": { + "path": "v1beta1/{+resource}:setIamPolicy", + "id": "runtimeconfig.projects.configs.setIamPolicy", + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:setIamPolicy" + }, + "getIamPolicy": { + "response": { + "$ref": "Policy" + }, + "httpMethod": "GET", + "parameterOrder": [ + "resource" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "resource": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path", + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`." + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:getIamPolicy", + "path": "v1beta1/{+resource}:getIamPolicy", + "id": "runtimeconfig.projects.configs.getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." + }, + "get": { + "parameters": { + "name": { + "description": "The name of the RuntimeConfig resource to retrieve, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}", + "id": "runtimeconfig.projects.configs.get", + "path": "v1beta1/{+name}", + "description": "Gets information about a RuntimeConfig resource.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "RuntimeConfig" + } + }, + "update": { + "request": { + "$ref": "RuntimeConfig" + }, + "description": "Updates a RuntimeConfig resource. The configuration must exist beforehand.", + "httpMethod": "PUT", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "RuntimeConfig" + }, + "parameters": { + "name": { + "description": "The name of the RuntimeConfig resource to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}", + "id": "runtimeconfig.projects.configs.update", + "path": "v1beta1/{+name}" + } + }, + "resources": { + "operations": { + "methods": { + "testIamPermissions": { + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}:testIamPermissions", + "id": "runtimeconfig.projects.configs.operations.testIamPermissions", + "path": "v1beta1/{+resource}:testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "httpMethod": "GET", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+/operations/.+$", + "location": "path" + }, + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "string", + "repeated": true, + "location": "query" + } + } + }, + "get": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/configs/[^/]+/operations/.+$", + "location": "path", + "description": "The name of the operation resource.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}", + "id": "runtimeconfig.projects.configs.operations.get", + "path": "v1beta1/{+name}", + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Operation" + } + } + } + }, + "waiters": { + "methods": { + "list": { + "response": { + "$ref": "ListWaitersResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "parent": { + "description": "The path to the configuration for which you want to get a list of waiters.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + "type": "string" + }, + "pageSize": { + "type": "integer", + "location": "query", + "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + "format": "int32" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters", + "path": "v1beta1/{+parent}/waiters", + "id": "runtimeconfig.projects.configs.waiters.list", + "description": "List waiters within the given configuration." + }, + "get": { + "response": { + "$ref": "Waiter" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", + "location": "path", + "description": "The fully-qualified name of the Waiter resource object to retrieve, in the\nformat:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}", + "path": "v1beta1/{+name}", + "id": "runtimeconfig.projects.configs.waiters.get", + "description": "Gets information about a single waiter." + }, + "create": { + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters", + "id": "runtimeconfig.projects.configs.waiters.create", + "path": "v1beta1/{+parent}/waiters", + "description": "Creates a Waiter resource. This operation returns a long-running Operation\nresource which can be polled for completion. However, a waiter with the\ngiven name will exist (and can be retrieved) prior to the operation\ncompleting. If the operation fails, the failed Waiter resource will\nstill exist and must be deleted prior to subsequent creation attempts.", + "request": { + "$ref": "Waiter" + }, + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "requestId": { + "description": "An optional but recommended unique \u003ccode\u003erequest_id\u003c/code\u003e. If the server\nreceives two \u003ccode\u003ecreate()\u003c/code\u003e requests with the same\n\u003ccode\u003erequest_id\u003c/code\u003e, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty \u003ccode\u003erequest_id\u003c/code\u003e fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n\u003ccode\u003erequest_id\u003c/code\u003e strings.\n\n\u003ccode\u003erequest_id\u003c/code\u003e strings are limited to 64 characters.", + "type": "string", + "location": "query" + }, + "parent": { + "description": "The path to the configuration that will own the waiter.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path" + } + } + }, + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "resource": { + "location": "path", + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$" + }, + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "string", + "repeated": true, + "location": "query" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}:testIamPermissions", + "path": "v1beta1/{+resource}:testIamPermissions", + "id": "runtimeconfig.projects.configs.waiters.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error." + }, + "delete": { + "path": "v1beta1/{+name}", + "id": "runtimeconfig.projects.configs.waiters.delete", + "description": "Deletes the waiter with the specified name.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "name": { + "description": "The Waiter resource to delete, in the format:\n\n `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}" + } + } + }, + "variables": { + "methods": { + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListVariablesResponse" + }, + "parameters": { + "parent": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path", + "description": "The path to the RuntimeConfig resource for which you want to list variables.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`" + }, + "filter": { + "location": "query", + "description": "Filters variables by matching the specified filter. For example:\n\n`projects/example-project/config/[CONFIG_NAME]/variables/example-variable`.", + "type": "string" + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results." + }, + "pageSize": { + "location": "query", + "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + "format": "int32", + "type": "integer" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables", + "id": "runtimeconfig.projects.configs.variables.list", + "path": "v1beta1/{+parent}/variables", + "description": "Lists variables within given a configuration, matching any provided filters.\nThis only lists variable names, not the values." + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "Variable" + }, + "parameters": { + "requestId": { + "location": "query", + "description": "An optional but recommended unique \u003ccode\u003erequest_id\u003c/code\u003e. If the server\nreceives two \u003ccode\u003ecreate()\u003c/code\u003e requests with the same\n\u003ccode\u003erequest_id\u003c/code\u003e, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty \u003ccode\u003erequest_id\u003c/code\u003e fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n\u003ccode\u003erequest_id\u003c/code\u003e strings.\n\n\u003ccode\u003erequest_id\u003c/code\u003e strings are limited to 64 characters.", + "type": "string" + }, + "parent": { + "description": "The path to the RutimeConfig resource that this variable should belong to.\nThe configuration must exist beforehand; the path must by in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables", + "id": "runtimeconfig.projects.configs.variables.create", + "path": "v1beta1/{+parent}/variables", + "request": { + "$ref": "Variable" + }, + "description": "Creates a variable within the given configuration. You cannot create\na variable with a name that is a prefix of an existing variable name, or a\nname that has an existing variable name as a prefix.\n\nTo learn more about creating a variable, read the\n[Setting and Getting Data](/deployment-manager/runtime-configurator/set-and-get-variables)\ndocumentation." + }, + "get": { + "description": "Gets information about a single variable.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Variable" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", + "location": "path", + "description": "The name of the variable to return, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]`", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", + "id": "runtimeconfig.projects.configs.variables.get", + "path": "v1beta1/{+name}" + }, + "watch": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", + "location": "path", + "description": "The name of the variable to watch, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:watch", + "path": "v1beta1/{+name}:watch", + "id": "runtimeconfig.projects.configs.variables.watch", + "description": "Watches a specific variable and waits for a change in the variable's value.\nWhen there is a change, this method returns the new value or times out.\n\nIf a variable is deleted while being watched, the `variableState` state is\nset to `DELETED` and the method returns the last known variable `value`.\n\nIf you set the deadline for watching to a larger value than internal timeout\n(60 seconds), the current variable value is returned and the `variableState`\nwill be `VARIABLE_STATE_UNSPECIFIED`.\n\nTo learn more about creating a watcher, read the\n[Watching a Variable for Changes](/deployment-manager/runtime-configurator/watching-a-variable)\ndocumentation.", + "request": { + "$ref": "WatchVariableRequest" + }, + "response": { + "$ref": "Variable" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST" + }, + "update": { + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", + "path": "v1beta1/{+name}", + "id": "runtimeconfig.projects.configs.variables.update", + "description": "Updates an existing variable with a new value.", + "request": { + "$ref": "Variable" + }, + "response": { + "$ref": "Variable" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", + "location": "path", + "description": "The name of the variable to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", + "required": true, + "type": "string" + } + } + }, + "testIamPermissions": { + "id": "runtimeconfig.projects.configs.variables.testIamPermissions", + "path": "v1beta1/{+resource}:testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "httpMethod": "GET", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", + "location": "path" + }, + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "string", + "repeated": true, + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:testIamPermissions" + }, + "delete": { + "description": "Deletes a variable or multiple variables.\n\nIf you specify a variable name, then that variable is deleted. If you\nspecify a prefix and `recursive` is true, then all variables with that\nprefix are deleted. You must set a `recursive` to true if you delete\nvariables by prefix.", + "httpMethod": "DELETE", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudruntimeconfig" + ], + "parameters": { + "recursive": { + "description": "Set to `true` to recursively delete multiple variables with the same\nprefix.", + "type": "boolean", + "location": "query" + }, + "name": { + "pattern": "^projects/[^/]+/configs/[^/]+/variables/.+$", + "location": "path", + "description": "The name of the variable to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", + "required": true, + "type": "string" + } + }, + "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", + "id": "runtimeconfig.projects.configs.variables.delete", + "path": "v1beta1/{+name}" + } + } + } + } } } + } + }, + "parameters": { + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", - "type": "object", - "properties": { - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" - }, - "response": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", - "type": "object" - }, - "name": { - "type": "string", - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`." - }, - "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure or cancellation." - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", - "type": "object" - } - }, - "id": "Operation" + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" }, - "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "type": "object", - "properties": { - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." - } - }, - "id": "SetIamPolicyRequest" + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" }, - "Status": { - "type": "object", - "properties": { - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" - }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", - "type": "array", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - } - }, - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" - } - }, - "id": "Status", - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons." + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" }, - "Binding": { - "description": "Associates `members` with a `role`.", - "type": "object", - "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", - "type": "array", - "items": { - "type": "string" - } - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" - } - }, - "id": "Binding" + "$.xgafv": { + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query" }, - "Cardinality": { - "description": "A Cardinality condition for the Waiter resource. A cardinality condition is\nmet when the number of variables under a specified path prefix reaches a\npredefined number. For example, if you set a Cardinality condition where\nthe `path` is set to `/foo` and the number of paths is set to 2, the\nfollowing variables would meet the condition in a RuntimeConfig resource:\n\n+ `/foo/variable1 = \"value1\"`\n+ `/foo/variable2 = \"value2\"`\n+ `/bar/variable3 = \"value3\"`\n\nIt would not would not satisify the same condition with the `number` set to\n3, however, because there is only 2 paths that start with `/foo`.\nCardinality conditions are recursive; all subtrees under the specific\npath prefix are counted.", - "type": "object", - "properties": { - "path": { - "description": "The root of the variable subtree to monitor. For example, `/foo`.", - "type": "string" - }, - "number": { - "type": "integer", - "description": "The number variables under the `path` that must exist to meet this\ncondition. Defaults to 1 if not specified.", - "format": "int32" - } - }, - "id": "Cardinality" + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" }, - "Empty": { - "id": "Empty", - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {} + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" }, - "ListConfigsResponse": { - "description": "`ListConfigs()` returns the following response. The order of returned\nobjects is arbitrary; that is, it is not ordered in any particular way.", - "type": "object", - "properties": { - "configs": { - "description": "A list of the configurations in the project. The order of returned\nobjects is arbitrary; that is, it is not ordered in any particular way.", - "type": "array", - "items": { - "$ref": "RuntimeConfig" - } - }, - "nextPageToken": { - "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `pageSize`, use the `nextPageToken`\nas a value for the query parameter `pageToken` in the next list request.\nSubsequent list requests will have their own `nextPageToken` to continue\npaging through the results", - "type": "string" - } - }, - "id": "ListConfigsResponse" + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" }, - "EndCondition": { - "description": "The condition that a Waiter resource is waiting for.", - "type": "object", - "properties": { - "cardinality": { - "description": "The cardinality of the `EndCondition`.", - "$ref": "Cardinality" - } - }, - "id": "EndCondition" + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" }, - "TestIamPermissionsResponse": { - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "TestIamPermissionsResponse", - "description": "Response message for `TestIamPermissions` method.", - "type": "object" - } - }, - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "protocol": "rest", - "canonicalName": "Cloud RuntimeConfig", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloudruntimeconfig": { - "description": "Manage your Google Cloud Platform services' runtime configuration" - }, - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" } }, - "rootUrl": "https://runtimeconfig.googleapis.com/", - "ownerDomain": "google.com", - "name": "runtimeconfig", - "batchPath": "batch", - "title": "Google Cloud RuntimeConfig API", - "ownerName": "Google" + "version": "v1beta1", + "baseUrl": "https://runtimeconfig.googleapis.com/", + "servicePath": "", + "kind": "discovery#restDescription", + "description": "Provides capabilities for dynamic configuration and coordination for applications running on Google Cloud Platform.\n" } diff --git a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go index dc0667ea6..72e6e4594 100644 --- a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go +++ b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Configs = NewProjectsConfigsService(s) @@ -1204,6 +1209,7 @@ func (c *ProjectsConfigsCreateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runtimeconfig) if err != nil { @@ -1343,6 +1349,7 @@ func (c *ProjectsConfigsDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") @@ -1480,6 +1487,7 @@ func (c *ProjectsConfigsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1623,6 +1631,7 @@ func (c *ProjectsConfigsGetIamPolicyCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1779,6 +1788,7 @@ func (c *ProjectsConfigsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1944,6 +1954,7 @@ func (c *ProjectsConfigsSetIamPolicyCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -2084,6 +2095,7 @@ func (c *ProjectsConfigsTestIamPermissionsCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -2221,6 +2233,7 @@ func (c *ProjectsConfigsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.runtimeconfig) if err != nil { @@ -2370,6 +2383,7 @@ func (c *ProjectsConfigsOperationsGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2526,6 +2540,7 @@ func (c *ProjectsConfigsOperationsTestIamPermissionsCall) doRequest(alt string) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2692,6 +2707,7 @@ func (c *ProjectsConfigsVariablesCreateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variable) if err != nil { @@ -2847,6 +2863,7 @@ func (c *ProjectsConfigsVariablesDeleteCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") @@ -2989,6 +3006,7 @@ func (c *ProjectsConfigsVariablesGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3158,6 +3176,7 @@ func (c *ProjectsConfigsVariablesListCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3351,6 +3370,7 @@ func (c *ProjectsConfigsVariablesTestIamPermissionsCall) doRequest(alt string) ( reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3488,6 +3508,7 @@ func (c *ProjectsConfigsVariablesUpdateCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variable) if err != nil { @@ -3644,6 +3665,7 @@ func (c *ProjectsConfigsVariablesWatchCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.watchvariablerequest) if err != nil { @@ -3808,6 +3830,7 @@ func (c *ProjectsConfigsWaitersCreateCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.waiter) if err != nil { @@ -3947,6 +3970,7 @@ func (c *ProjectsConfigsWaitersDeleteCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") @@ -4084,6 +4108,7 @@ func (c *ProjectsConfigsWaitersGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4240,6 +4265,7 @@ func (c *ProjectsConfigsWaitersListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4428,6 +4454,7 @@ func (c *ProjectsConfigsWaitersTestIamPermissionsCall) doRequest(alt string) (*h reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/safebrowsing/v4/safebrowsing-gen.go b/vendor/google.golang.org/api/safebrowsing/v4/safebrowsing-gen.go index 729772b61..1ce67a254 100644 --- a/vendor/google.golang.org/api/safebrowsing/v4/safebrowsing-gen.go +++ b/vendor/google.golang.org/api/safebrowsing/v4/safebrowsing-gen.go @@ -58,9 +58,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only FullHashes *FullHashesService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewFullHashesService(s *Service) *FullHashesService { rs := &FullHashesService{s: s} return rs @@ -1121,6 +1126,7 @@ func (c *FullHashesFindCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.findfullhashesrequest) if err != nil { @@ -1236,6 +1242,7 @@ func (c *ThreatListUpdatesFetchCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.fetchthreatlistupdatesrequest) if err != nil { @@ -1359,6 +1366,7 @@ func (c *ThreatListsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1468,6 +1476,7 @@ func (c *ThreatMatchesFindCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.findthreatmatchesrequest) if err != nil { diff --git a/vendor/google.golang.org/api/script/v1/script-api.json b/vendor/google.golang.org/api/script/v1/script-api.json index 48eb38cf5..7625840a6 100644 --- a/vendor/google.golang.org/api/script/v1/script-api.json +++ b/vendor/google.golang.org/api/script/v1/script-api.json @@ -1,330 +1,332 @@ { - "kind": "discovery#restDescription", - "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/FcNWZezE05ypidUunx03uCyJR70\"", - "discoveryVersion": "v1", - "id": "script:v1", - "name": "script", - "version": "v1", - "revision": "20160801", - "title": "Google Apps Script Execution API", - "description": "Executes Google Apps Script projects.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://developers.google.com/apps-script/execution/rest/v1/scripts/run", - "protocol": "rest", - "baseUrl": "https://script.googleapis.com/", - "basePath": "", - "rootUrl": "https://script.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" - }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" - }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" + "ownerDomain": "google.com", + "name": "script", + "batchPath": "batch", + "revision": "20170209", + "documentationLink": "https://developers.google.com/apps-script/execution/rest/v1/scripts/run", + "id": "script:v1", + "title": "Google Apps Script Execution API", + "discoveryVersion": "v1", + "ownerName": "Google", + "resources": { + "scripts": { + "methods": { + "run": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "scriptId" + ], + "httpMethod": "POST", + "parameters": { + "scriptId": { + "location": "path", + "description": "The project key of the script to be executed. To find the project key, open\nthe project in the script editor and select **File \u003e Project properties**.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://mail.google.com/", + "https://www.google.com/calendar/feeds", + "https://www.google.com/m8/feeds", + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/forms", + "https://www.googleapis.com/auth/forms.currentonly", + "https://www.googleapis.com/auth/groups", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/userinfo.email" + ], + "flatPath": "v1/scripts/{scriptId}:run", + "path": "v1/scripts/{scriptId}:run", + "id": "script.scripts.run", + "request": { + "$ref": "ExecutionRequest" + }, + "description": "Runs a function in an Apps Script project. The project must be deployed\nfor use with the Apps Script Execution API.\n\nThis method requires authorization with an OAuth 2.0 token that includes at\nleast one of the scopes listed in the [Authorization](#authorization)\nsection; script projects that do not require authorization cannot be\nexecuted through this API. To find the correct scopes to include in the\nauthentication token, open the project in the script editor, then select\n**File \u003e Project properties** and click the **Scopes** tab." + } + } + } }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://mail.google.com/": { - "description": "View and manage your mail" - }, - "https://www.google.com/calendar/feeds": { - "description": "Manage your calendars" + "parameters": { + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" }, - "https://www.google.com/m8/feeds": { - "description": "Manage your contacts" + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" }, - "https://www.googleapis.com/auth/admin.directory.group": { - "description": "View and manage the provisioning of groups on your domain" + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" }, - "https://www.googleapis.com/auth/admin.directory.user": { - "description": "View and manage the provisioning of users on your domain" + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "https://www.googleapis.com/auth/drive": { - "description": "View and manage the files in your Google Drive" + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" }, - "https://www.googleapis.com/auth/forms": { - "description": "View and manage your forms in Google Drive" + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" }, - "https://www.googleapis.com/auth/forms.currentonly": { - "description": "View and manage forms that this application has been installed in" + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" }, - "https://www.googleapis.com/auth/groups": { - "description": "View and manage your Google Groups" + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" }, - "https://www.googleapis.com/auth/spreadsheets": { - "description": "View and manage your spreadsheets in Google Drive" + "alt": { + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string" }, - "https://www.googleapis.com/auth/userinfo.email": { - "description": "View your email address" - } - } - } - }, - "schemas": { - "ExecutionRequest": { - "id": "ExecutionRequest", - "type": "object", - "description": "A request to run the function in a script. The script is identified by the specified `script_id`. Executing a function on a script will return results based on the implementation of the script.", - "properties": { - "function": { - "type": "string", - "description": "The name of the function to execute in the given script. The name does not include parentheses or parameters." + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" }, - "parameters": { - "type": "array", - "description": "The parameters to be passed to the function being executed. The type for each parameter should match the expected type in Apps Script. Parameters cannot be Apps Script-specific objects (such as a `Document` or `Calendar`); they can only be primitive types such as a `string`, `number`, `array`, `object`, or `boolean`. Optional.", - "items": { - "type": "any" - } + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" }, - "sessionState": { - "type": "string", - "description": "This field is not used." + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" }, - "devMode": { - "type": "boolean", - "description": "If `true` and the user is an owner of the script, the script runs at the most recently saved version rather than the version deployed for use with the Execution API. Optional; default is `false`." + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" } - } }, - "Operation": { - "id": "Operation", - "type": "object", - "description": "The response will not arrive until the function finishes executing. The maximum runtime is listed in the guide to [limitations in Apps Script](https://developers.google.com/apps-script/guides/services/quotas#current_limitations).\nIf the script function returns successfully, the `response` field will contain an `ExecutionResponse` object with the function's return value in the object's `result` field.\n\nIf the script function (or Apps Script itself) throws an exception, the `error` field will contain a `Status` object. The `Status` object's `details` field will contain an array with a single `ExecutionError` object that provides information about the nature of the error.\n\nIf the `run` call itself fails (for example, because of a malformed request or an authorization error), the method will return an HTTP response code in the 4XX range with a different format for the response body. Client libraries will automatically convert a 4XX response into an exception class.", - "properties": { - "name": { - "type": "string", - "description": "This field is not used." - }, - "metadata": { - "type": "object", - "description": "This field is not used.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - }, - "done": { - "type": "boolean", - "description": "This field is not used." - }, - "error": { - "$ref": "Status", - "description": "If a `run` call succeeds but the script function (or Apps Script itself) throws an exception, this field will contain a `Status` object. The `Status` object's `details` field will contain an array with a single `ExecutionError` object that provides information about the nature of the error." + "schemas": { + "ExecutionResponse": { + "description": "An object that provides the return value of a function executed through the\nApps Script Execution API. If a\n`run` call succeeds and the\nscript function returns successfully, the response body's\n`response` field contains this\n`ExecutionResponse` object.", + "type": "object", + "properties": { + "result": { + "description": "The return value of the script function. The type matches the object type\nreturned in Apps Script. Functions called through the Execution API cannot\nreturn Apps Script-specific objects (such as a `Document` or a `Calendar`);\nthey can only return primitive types such as a `string`, `number`, `array`,\n`object`, or `boolean`.", + "type": "any" + } + }, + "id": "ExecutionResponse" }, - "response": { - "type": "object", - "description": "If the script function returns successfully, this field will contain an `ExecutionResponse` object with the function's return value as the object's `result` field.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - }, - "Status": { - "id": "Status", - "type": "object", - "description": "If a `run` call succeeds but the script function (or Apps Script itself) throws an exception, the response body's `error` field will contain this `Status` object.", - "properties": { - "code": { - "type": "integer", - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32" + "Operation": { + "description": "The response will not arrive until the function finishes executing. The maximum runtime is listed in the guide to [limitations in Apps Script](https://developers.google.com/apps-script/guides/services/quotas#current_limitations).\n\u003cp\u003eIf the script function returns successfully, the `response` field will contain an `ExecutionResponse` object with the function's return value in the object's `result` field.\u003c/p\u003e\n\u003cp\u003eIf the script function (or Apps Script itself) throws an exception, the `error` field will contain a `Status` object. The `Status` object's `details` field will contain an array with a single `ExecutionError` object that provides information about the nature of the error.\u003c/p\u003e\n\u003cp\u003eIf the `run` call itself fails (for example, because of a malformed request or an authorization error), the method will return an HTTP response code in the 4XX range with a different format for the response body. Client libraries will automatically convert a 4XX response into an exception class.\u003c/p\u003e", + "type": "object", + "properties": { + "done": { + "description": "This field is not used.", + "type": "boolean" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "If the script function returns successfully, this field will contain an `ExecutionResponse` object with the function's return value as the object's `result` field.", + "type": "object" + }, + "name": { + "description": "This field is not used.", + "type": "string" + }, + "error": { + "description": "If a `run` call succeeds but the script function (or Apps Script itself) throws an exception, this field will contain a `Status` object. The `Status` object's `details` field will contain an array with a single `ExecutionError` object that provides information about the nature of the error.", + "$ref": "Status" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "This field is not used.", + "type": "object" + } + }, + "id": "Operation" }, - "message": { - "type": "string", - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client." + "ScriptStackTraceElement": { + "description": "A stack trace through the script that shows where the execution failed.", + "type": "object", + "properties": { + "function": { + "description": "The name of the function that failed.", + "type": "string" + }, + "lineNumber": { + "description": "The line number where the script failed.", + "format": "int32", + "type": "integer" + } + }, + "id": "ScriptStackTraceElement" }, - "details": { - "type": "array", - "description": "An array that contains a single `ExecutionError` object that provides information about the nature of the error.", - "items": { + "ExecutionError": { + "description": "An object that provides information about the nature of an error in the Apps\nScript Execution API. If an\n`run` call succeeds but the\nscript function (or Apps Script itself) throws an exception, the response\nbody's `error` field contains a\n`Status` object. The `Status` object's `details` field\ncontains an array with a single one of these `ExecutionError` objects.", "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - } - } - } - }, - "ExecutionError": { - "id": "ExecutionError", - "type": "object", - "description": "An object that provides information about the nature of an error in the Apps Script Execution API. If an `run` call succeeds but the script function (or Apps Script itself) throws an exception, the response body's `error` field will contain a `Status` object. The `Status` object's `details` field will contain an array with a single one of these `ExecutionError` objects.", - "properties": { - "scriptStackTraceElements": { - "type": "array", - "description": "An array of objects that provide a stack trace through the script to show where the execution failed, with the deepest call first.", - "items": { - "$ref": "ScriptStackTraceElement" - } + "properties": { + "scriptStackTraceElements": { + "description": "An array of objects that provide a stack trace through the script to show\nwhere the execution failed, with the deepest call first.", + "type": "array", + "items": { + "$ref": "ScriptStackTraceElement" + } + }, + "errorType": { + "description": "The error type, for example `TypeError` or `ReferenceError`. If the error\ntype is unavailable, this field is not included.", + "type": "string" + }, + "errorMessage": { + "description": "The error message thrown by Apps Script, usually localized into the user's\nlanguage.", + "type": "string" + } + }, + "id": "ExecutionError" }, - "errorMessage": { - "type": "string", - "description": "The error message thrown by Apps Script, usually localized into the user's language." + "Status": { + "properties": { + "details": { + "description": "An array that contains a single `ExecutionError` object that provides information about the nature of the error.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } + }, + "code": { + "description": "The status code. For this API, this value will always be 3, corresponding to an INVALID_ARGUMENT error.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which is in English. Any user-facing error message is localized and sent in the [`google.rpc.Status.details`](google.rpc.Status.details) field, or localized by the client.", + "type": "string" + } + }, + "id": "Status", + "description": "If a `run` call succeeds but the script function (or Apps Script itself) throws an exception, the response body's `error` field will contain this `Status` object.", + "type": "object" }, - "errorType": { - "type": "string", - "description": "The error type, for example `TypeError` or `ReferenceError`. If the error type is unavailable, this field is not included." + "ExecutionRequest": { + "description": "A request to run the function in a script. The script is identified by the\nspecified `script_id`. Executing a function on a script returns results\nbased on the implementation of the script.", + "type": "object", + "properties": { + "function": { + "description": "The name of the function to execute in the given script. The name does not\ninclude parentheses or parameters.", + "type": "string" + }, + "devMode": { + "description": "If `true` and the user is an owner of the script, the script runs at the\nmost recently saved version rather than the version deployed for use with\nthe Execution API. Optional; default is `false`.", + "type": "boolean" + }, + "parameters": { + "description": "The parameters to be passed to the function being executed. The object type\nfor each parameter should match the expected type in Apps Script.\nParameters cannot be Apps Script-specific object types (such as a\n`Document` or a `Calendar`); they can only be primitive types such as\n`string`, `number`, `array`, `object`, or `boolean`. Optional.", + "type": "array", + "items": { + "type": "any" + } + }, + "sessionState": { + "description": "For Android add-ons only. An ID that represents the user's current session\nin the Android app for Google Docs or Sheets, included as extra data in the\n[`Intent`](https://developer.android.com/guide/components/intents-filters.html)\nthat launches the add-on. When an Android add-on is run with a session\nstate, it gains the privileges of a\n[bound](https://developers.google.com/apps-script/guides/bound) script —\nthat is, it can access information like the user's current cursor position\n(in Docs) or selected cell (in Sheets). To retrieve the state, call\n`Intent.getStringExtra(\"com.google.android.apps.docs.addons.SessionState\")`.\nOptional.", + "type": "string" + } + }, + "id": "ExecutionRequest" } - } }, - "ScriptStackTraceElement": { - "id": "ScriptStackTraceElement", - "type": "object", - "description": "A stack trace through the script that shows where the execution failed.", - "properties": { - "function": { - "type": "string", - "description": "The name of the function that failed." - }, - "lineNumber": { - "type": "integer", - "description": "The line number where the script failed.", - "format": "int32" - } - } + "protocol": "rest", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, - "ExecutionResponse": { - "id": "ExecutionResponse", - "type": "object", - "description": "An object that provides the return value of a function executed through the Apps Script Execution API. If an `run` call succeeds and the script function returns successfully, the response body's `response` field will contain this `ExecutionResponse` object.", - "properties": { - "result": { - "type": "any", - "description": "The return value of the script function. The type will match the type returned in Apps Script. Functions called through the Execution API cannot return Apps Script-specific objects (such as a `Document` or `Calendar`); they can only return primitive types such as a `string`, `number`, `array`, `object`, or `boolean`." - }, - "status": { - "type": "string", - "enum": [ - "SUCCESS", - "CANCELED" - ] - } - } - } - }, - "resources": { - "scripts": { - "methods": { - "run": { - "id": "script.scripts.run", - "path": "v1/scripts/{scriptId}:run", - "httpMethod": "POST", - "description": "Runs a function in an Apps Script project that has been deployed for use with the Apps Script Execution API. This method requires authorization with an OAuth 2.0 token that includes at least one of the scopes listed in the [Authentication](#authentication) section; script projects that do not require authorization cannot be executed through this API. To find the correct scopes to include in the authentication token, open the project in the script editor, then select **File \u003e Project properties** and click the **Scopes** tab.", - "parameters": { - "scriptId": { - "type": "string", - "description": "The project key of the script to be executed. To find the project key, open the project in the script editor, then select **File \u003e Project properties**.", - "required": true, - "location": "path" + "version": "v1", + "baseUrl": "https://script.googleapis.com/", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/forms": { + "description": "View and manage your forms in Google Drive" + }, + "https://www.google.com/m8/feeds": { + "description": "Manage your contacts" + }, + "https://www.googleapis.com/auth/userinfo.email": { + "description": "View your email address" + }, + "https://www.google.com/calendar/feeds": { + "description": "Manage your calendars" + }, + "https://www.googleapis.com/auth/groups": { + "description": "View and manage your Google Groups" + }, + "https://www.googleapis.com/auth/forms.currentonly": { + "description": "View and manage forms that this application has been installed in" + }, + "https://www.googleapis.com/auth/drive": { + "description": "View and manage the files in your Google Drive" + }, + "https://www.googleapis.com/auth/spreadsheets": { + "description": "View and manage your spreadsheets in Google Drive" + }, + "https://mail.google.com/": { + "description": "View and manage your mail" + }, + "https://www.googleapis.com/auth/admin.directory.group": { + "description": "View and manage the provisioning of groups on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.user": { + "description": "View and manage the provisioning of users on your domain" + } } - }, - "parameterOrder": [ - "scriptId" - ], - "request": { - "$ref": "ExecutionRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://mail.google.com/", - "https://www.google.com/calendar/feeds", - "https://www.google.com/m8/feeds", - "https://www.googleapis.com/auth/admin.directory.group", - "https://www.googleapis.com/auth/admin.directory.user", - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/forms", - "https://www.googleapis.com/auth/forms.currentonly", - "https://www.googleapis.com/auth/groups", - "https://www.googleapis.com/auth/spreadsheets", - "https://www.googleapis.com/auth/userinfo.email" - ] } - } - } - } + }, + "servicePath": "", + "description": "Executes Google Apps Script projects.", + "kind": "discovery#restDescription", + "rootUrl": "https://script.googleapis.com/", + "basePath": "" } diff --git a/vendor/google.golang.org/api/script/v1/script-gen.go b/vendor/google.golang.org/api/script/v1/script-gen.go index b1baf1110..75840260d 100644 --- a/vendor/google.golang.org/api/script/v1/script-gen.go +++ b/vendor/google.golang.org/api/script/v1/script-gen.go @@ -91,9 +91,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Scripts *ScriptsService } @@ -105,6 +106,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewScriptsService(s *Service) *ScriptsService { rs := &ScriptsService{s: s} return rs @@ -115,24 +120,29 @@ type ScriptsService struct { } // ExecutionError: An object that provides information about the nature -// of an error in the Apps Script Execution API. If an `run` call -// succeeds but the script function (or Apps Script itself) throws an -// exception, the response body's `error` field will contain a `Status` -// object. The `Status` object's `details` field will contain an array -// with a single one of these `ExecutionError` objects. +// of an error in the Apps +// Script Execution API. If an +// `run` call succeeds but the +// script function (or Apps Script itself) throws an exception, the +// response +// body's `error` field contains a +// `Status` object. The `Status` object's `details` field +// contains an array with a single one of these `ExecutionError` +// objects. type ExecutionError struct { // ErrorMessage: The error message thrown by Apps Script, usually - // localized into the user's language. + // localized into the user's + // language. ErrorMessage string `json:"errorMessage,omitempty"` // ErrorType: The error type, for example `TypeError` or - // `ReferenceError`. If the error type is unavailable, this field is not - // included. + // `ReferenceError`. If the error + // type is unavailable, this field is not included. ErrorType string `json:"errorType,omitempty"` // ScriptStackTraceElements: An array of objects that provide a stack - // trace through the script to show where the execution failed, with the - // deepest call first. + // trace through the script to show + // where the execution failed, with the deepest call first. ScriptStackTraceElements []*ScriptStackTraceElement `json:"scriptStackTraceElements,omitempty"` // ForceSendFields is a list of field names (e.g. "ErrorMessage") to @@ -159,29 +169,54 @@ func (s *ExecutionError) MarshalJSON() ([]byte, error) { } // ExecutionRequest: A request to run the function in a script. The -// script is identified by the specified `script_id`. Executing a -// function on a script will return results based on the implementation -// of the script. +// script is identified by the +// specified `script_id`. Executing a function on a script returns +// results +// based on the implementation of the script. type ExecutionRequest struct { // DevMode: If `true` and the user is an owner of the script, the script - // runs at the most recently saved version rather than the version - // deployed for use with the Execution API. Optional; default is - // `false`. + // runs at the + // most recently saved version rather than the version deployed for use + // with + // the Execution API. Optional; default is `false`. DevMode bool `json:"devMode,omitempty"` // Function: The name of the function to execute in the given script. - // The name does not include parentheses or parameters. + // The name does not + // include parentheses or parameters. Function string `json:"function,omitempty"` // Parameters: The parameters to be passed to the function being - // executed. The type for each parameter should match the expected type - // in Apps Script. Parameters cannot be Apps Script-specific objects - // (such as a `Document` or `Calendar`); they can only be primitive - // types such as a `string`, `number`, `array`, `object`, or `boolean`. - // Optional. + // executed. The object type + // for each parameter should match the expected type in Apps + // Script. + // Parameters cannot be Apps Script-specific object types (such as + // a + // `Document` or a `Calendar`); they can only be primitive types such + // as + // `string`, `number`, `array`, `object`, or `boolean`. Optional. Parameters []interface{} `json:"parameters,omitempty"` - // SessionState: This field is not used. + // SessionState: For Android add-ons only. An ID that represents the + // user's current session + // in the Android app for Google Docs or Sheets, included as extra data + // in + // the + // [`Intent`](https://developer.android.com/guide/components/intents- + // filters.html) + // that launches the add-on. When an Android add-on is run with a + // session + // state, it gains the privileges of + // a + // [bound](https://developers.google.com/apps-script/guides/bound) + // script — + // that is, it can access information like the user's current cursor + // position + // (in Docs) or selected cell (in Sheets). To retrieve the state, + // call + // `Intent.getStringExtra("com.google.android.apps.docs.addons.Sessi + // onState")`. + // Optional. SessionState string `json:"sessionState,omitempty"` // ForceSendFields is a list of field names (e.g. "DevMode") to @@ -208,23 +243,24 @@ func (s *ExecutionRequest) MarshalJSON() ([]byte, error) { } // ExecutionResponse: An object that provides the return value of a -// function executed through the Apps Script Execution API. If an `run` -// call succeeds and the script function returns successfully, the -// response body's `response` field will contain this +// function executed through the +// Apps Script Execution API. If a +// `run` call succeeds and the +// script function returns successfully, the response body's +// `response` field contains this // `ExecutionResponse` object. type ExecutionResponse struct { - // Result: The return value of the script function. The type will match - // the type returned in Apps Script. Functions called through the - // Execution API cannot return Apps Script-specific objects (such as a - // `Document` or `Calendar`); they can only return primitive types such - // as a `string`, `number`, `array`, `object`, or `boolean`. + // Result: The return value of the script function. The type matches the + // object type + // returned in Apps Script. Functions called through the Execution API + // cannot + // return Apps Script-specific objects (such as a `Document` or a + // `Calendar`); + // they can only return primitive types such as a `string`, `number`, + // `array`, + // `object`, or `boolean`. Result interface{} `json:"result,omitempty"` - // Possible values: - // "SUCCESS" - // "CANCELED" - Status string `json:"status,omitempty"` - // ForceSendFields is a list of field names (e.g. "Result") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -253,21 +289,19 @@ func (s *ExecutionResponse) MarshalJSON() ([]byte, error) { // in Apps // Script](https://developers.google.com/apps-script/guides/services/quot // as#current_limitations). -// If the script function returns successfully, the `response` field +//

      If the script function returns successfully, the `response` field // will contain an `ExecutionResponse` object with the function's return -// value in the object's `result` field. -// -// If the script function (or Apps Script itself) throws an exception, -// the `error` field will contain a `Status` object. The `Status` -// object's `details` field will contain an array with a single +// value in the object's `result` field.

      +//

      If the script function (or Apps Script itself) throws an +// exception, the `error` field will contain a `Status` object. The +// `Status` object's `details` field will contain an array with a single // `ExecutionError` object that provides information about the nature of -// the error. -// -// If the `run` call itself fails (for example, because of a malformed -// request or an authorization error), the method will return an HTTP -// response code in the 4XX range with a different format for the -// response body. Client libraries will automatically convert a 4XX -// response into an exception class. +// the error.

      +//

      If the `run` call itself fails (for example, because of a +// malformed request or an authorization error), the method will return +// an HTTP response code in the 4XX range with a different format for +// the response body. Client libraries will automatically convert a 4XX +// response into an exception class.

      type Operation struct { // Done: This field is not used. Done bool `json:"done,omitempty"` @@ -353,17 +387,18 @@ func (s *ScriptStackTraceElement) MarshalJSON() ([]byte, error) { // Script itself) throws an exception, the response body's `error` field // will contain this `Status` object. type Status struct { - // Code: The status code, which should be an enum value of - // google.rpc.Code. + // Code: The status code. For this API, this value will always be 3, + // corresponding to an INVALID_ARGUMENT error. Code int64 `json:"code,omitempty"` // Details: An array that contains a single `ExecutionError` object that // provides information about the nature of the error. Details []googleapi.RawMessage `json:"details,omitempty"` - // Message: A developer-facing error message, which should be in - // English. Any user-facing error message should be localized and sent - // in the google.rpc.Status.details field, or localized by the client. + // Message: A developer-facing error message, which is in English. Any + // user-facing error message is localized and sent in the + // [`google.rpc.Status.details`](google.rpc.Status.details) field, or + // localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -400,14 +435,21 @@ type ScriptsRunCall struct { header_ http.Header } -// Run: Runs a function in an Apps Script project that has been deployed -// for use with the Apps Script Execution API. This method requires -// authorization with an OAuth 2.0 token that includes at least one of -// the scopes listed in the [Authentication](#authentication) section; -// script projects that do not require authorization cannot be executed -// through this API. To find the correct scopes to include in the +// Run: Runs a function in an Apps Script project. The project must be +// deployed +// for use with the Apps Script Execution API. +// +// This method requires authorization with an OAuth 2.0 token that +// includes at +// least one of the scopes listed in the +// [Authorization](#authorization) +// section; script projects that do not require authorization cannot +// be +// executed through this API. To find the correct scopes to include in +// the // authentication token, open the project in the script editor, then -// select **File > Project properties** and click the **Scopes** tab. +// select +// **File > Project properties** and click the **Scopes** tab. func (r *ScriptsService) Run(scriptId string, executionrequest *ExecutionRequest) *ScriptsRunCall { c := &ScriptsRunCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.scriptId = scriptId @@ -446,6 +488,7 @@ func (c *ScriptsRunCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.executionrequest) if err != nil { @@ -501,7 +544,8 @@ func (c *ScriptsRunCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Runs a function in an Apps Script project that has been deployed for use with the Apps Script Execution API. This method requires authorization with an OAuth 2.0 token that includes at least one of the scopes listed in the [Authentication](#authentication) section; script projects that do not require authorization cannot be executed through this API. To find the correct scopes to include in the authentication token, open the project in the script editor, then select **File \u003e Project properties** and click the **Scopes** tab.", + // "description": "Runs a function in an Apps Script project. The project must be deployed\nfor use with the Apps Script Execution API.\n\nThis method requires authorization with an OAuth 2.0 token that includes at\nleast one of the scopes listed in the [Authorization](#authorization)\nsection; script projects that do not require authorization cannot be\nexecuted through this API. To find the correct scopes to include in the\nauthentication token, open the project in the script editor, then select\n**File \u003e Project properties** and click the **Scopes** tab.", + // "flatPath": "v1/scripts/{scriptId}:run", // "httpMethod": "POST", // "id": "script.scripts.run", // "parameterOrder": [ @@ -509,7 +553,7 @@ func (c *ScriptsRunCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // ], // "parameters": { // "scriptId": { - // "description": "The project key of the script to be executed. To find the project key, open the project in the script editor, then select **File \u003e Project properties**.", + // "description": "The project key of the script to be executed. To find the project key, open\nthe project in the script editor and select **File \u003e Project properties**.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/searchconsole/v1/searchconsole-api.json b/vendor/google.golang.org/api/searchconsole/v1/searchconsole-api.json new file mode 100644 index 000000000..167c986dc --- /dev/null +++ b/vendor/google.golang.org/api/searchconsole/v1/searchconsole-api.json @@ -0,0 +1,292 @@ +{ + "baseUrl": "https://searchconsole.googleapis.com/", + "canonicalName": "Search Console", + "kind": "discovery#restDescription", + "servicePath": "", + "description": "Provides tools for running validation tests against single URLs", + "rootUrl": "https://searchconsole.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "searchconsole", + "batchPath": "batch", + "revision": "20170208", + "documentationLink": "https://developers.google.com/webmaster-tools/search-console-api/", + "id": "searchconsole:v1", + "title": "Google Search Console URL Testing Tools API", + "ownerName": "Google", + "discoveryVersion": "v1", + "version_module": "True", + "resources": { + "urlTestingTools": { + "resources": { + "mobileFriendlyTest": { + "methods": { + "run": { + "response": { + "$ref": "RunMobileFriendlyTestResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "flatPath": "v1/urlTestingTools/mobileFriendlyTest:run", + "path": "v1/urlTestingTools/mobileFriendlyTest:run", + "id": "searchconsole.urlTestingTools.mobileFriendlyTest.run", + "description": "Runs Mobile-Friendly Test for a given URL.", + "request": { + "$ref": "RunMobileFriendlyTestRequest" + } + } + } + } + } + } + }, + "parameters": { + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" + }, + "bearer_token": { + "type": "string", + "location": "query", + "description": "OAuth bearer token." + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "$.xgafv": { + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format." + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "alt": { + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + } + }, + "schemas": { + "MobileFriendlyIssue": { + "type": "object", + "properties": { + "rule": { + "enum": [ + "MOBILE_FRIENDLY_RULE_UNSPECIFIED", + "USES_INCOMPATIBLE_PLUGINS", + "CONFIGURE_VIEWPORT", + "FIXED_WIDTH_VIEWPORT", + "SIZE_CONTENT_TO_VIEWPORT", + "USE_LEGIBLE_FONT_SIZES", + "TAP_TARGETS_TOO_CLOSE" + ], + "description": "Rule violated.", + "type": "string", + "enumDescriptions": [ + "Unknown rule. Sorry, we don't have any description for the rule that was\nbroken.", + "Plugins incompatible with mobile devices are being used. [Learn more]\n(https://support.google.com/webmasters/answer/6352293#flash_usage).", + "Viewsport is not specified using the meta viewport tag. [Learn more]\n(https://support.google.com/webmasters/answer/6352293#viewport_not_configured).", + "Viewport defined to a fixed width. [Learn more]\n(https://support.google.com/webmasters/answer/6352293#fixed-width_viewport).", + "Content not sized to viewport. [Learn more]\n(https://support.google.com/webmasters/answer/6352293#content_not_sized_to_viewport).", + "Font size is too small for easy reading on a small screen. [Learn More]\n(https://support.google.com/webmasters/answer/6352293#small_font_size).", + "Touch elements are too close to each other. [Learn more]\n(https://support.google.com/webmasters/answer/6352293#touch_elements_too_close)." + ] + } + }, + "id": "MobileFriendlyIssue", + "description": "Mobile-friendly issue." + }, + "RunMobileFriendlyTestResponse": { + "description": "Mobile-friendly test response, including mobile-friendly issues and resource\nissues.", + "type": "object", + "properties": { + "mobileFriendliness": { + "enumDescriptions": [ + "Internal error when running this test. Please try running the test again.", + "The page is mobile friendly.", + "The page is not mobile friendly." + ], + "enum": [ + "MOBILE_FRIENDLY_TEST_RESULT_UNSPECIFIED", + "MOBILE_FRIENDLY", + "NOT_MOBILE_FRIENDLY" + ], + "description": "Test verdict, whether the page is mobile friendly or not.", + "type": "string" + }, + "mobileFriendlyIssues": { + "description": "List of mobile-usability issues.", + "type": "array", + "items": { + "$ref": "MobileFriendlyIssue" + } + }, + "screenshot": { + "description": "Screenshot of the requested URL.", + "$ref": "Image" + }, + "testStatus": { + "description": "Final state of the test, can be either complete or an error.", + "$ref": "TestStatus" + }, + "resourceIssues": { + "description": "Information about embedded resources issues.", + "type": "array", + "items": { + "$ref": "ResourceIssue" + } + } + }, + "id": "RunMobileFriendlyTestResponse" + }, + "ResourceIssue": { + "properties": { + "blockedResource": { + "$ref": "BlockedResource", + "description": "Describes a blocked resource issue." + } + }, + "id": "ResourceIssue", + "description": "Information about a resource with issue.", + "type": "object" + }, + "BlockedResource": { + "description": "Blocked resource.", + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the blocked resource." + } + }, + "id": "BlockedResource" + }, + "TestStatus": { + "description": "Final state of the test, including error details if necessary.", + "type": "object", + "properties": { + "details": { + "description": "Error details if applicable.", + "type": "string" + }, + "status": { + "type": "string", + "enumDescriptions": [ + "Internal error when running this test. Please try running the test again.", + "Inspection has completed without errors.", + "Inspection terminated in an error state. This indicates a problem in\nGoogle's infrastructure, not a user error. Please try again later.", + "Google can not access the URL because of a user error such as a robots.txt\nblockage, a 403 or 500 code etc. Please make sure that the URL provided is\naccessible by Googlebot and is not password protected." + ], + "enum": [ + "TEST_STATUS_UNSPECIFIED", + "COMPLETE", + "INTERNAL_ERROR", + "PAGE_UNREACHABLE" + ], + "description": "Status of the test." + } + }, + "id": "TestStatus" + }, + "Image": { + "properties": { + "mimeType": { + "description": "The mime-type of the image data.", + "type": "string" + }, + "data": { + "type": "string", + "description": "Image data in format determined by the mime type. Currently, the format\nwill always be \"image/png\", but this might change in the future.", + "format": "byte" + } + }, + "id": "Image", + "description": "Describe image data.", + "type": "object" + }, + "RunMobileFriendlyTestRequest": { + "description": "Mobile-friendly test request.", + "type": "object", + "properties": { + "url": { + "description": "URL for inspection.", + "type": "string" + }, + "requestScreenshot": { + "description": "Whether or not screenshot is requested. Default is false.", + "type": "boolean" + } + }, + "id": "RunMobileFriendlyTestRequest" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "version": "v1" +} diff --git a/vendor/google.golang.org/api/searchconsole/v1/searchconsole-gen.go b/vendor/google.golang.org/api/searchconsole/v1/searchconsole-gen.go new file mode 100644 index 000000000..b88c90011 --- /dev/null +++ b/vendor/google.golang.org/api/searchconsole/v1/searchconsole-gen.go @@ -0,0 +1,498 @@ +// Package searchconsole provides access to the Google Search Console URL Testing Tools API. +// +// See https://developers.google.com/webmaster-tools/search-console-api/ +// +// Usage example: +// +// import "google.golang.org/api/searchconsole/v1" +// ... +// searchconsoleService, err := searchconsole.New(oauthHttpClient) +package searchconsole // import "google.golang.org/api/searchconsole/v1" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "searchconsole:v1" +const apiName = "searchconsole" +const apiVersion = "v1" +const basePath = "https://searchconsole.googleapis.com/" + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.UrlTestingTools = NewUrlTestingToolsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + UrlTestingTools *UrlTestingToolsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewUrlTestingToolsService(s *Service) *UrlTestingToolsService { + rs := &UrlTestingToolsService{s: s} + rs.MobileFriendlyTest = NewUrlTestingToolsMobileFriendlyTestService(s) + return rs +} + +type UrlTestingToolsService struct { + s *Service + + MobileFriendlyTest *UrlTestingToolsMobileFriendlyTestService +} + +func NewUrlTestingToolsMobileFriendlyTestService(s *Service) *UrlTestingToolsMobileFriendlyTestService { + rs := &UrlTestingToolsMobileFriendlyTestService{s: s} + return rs +} + +type UrlTestingToolsMobileFriendlyTestService struct { + s *Service +} + +// BlockedResource: Blocked resource. +type BlockedResource struct { + // Url: URL of the blocked resource. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Url") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Url") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BlockedResource) MarshalJSON() ([]byte, error) { + type noMethod BlockedResource + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Image: Describe image data. +type Image struct { + // Data: Image data in format determined by the mime type. Currently, + // the format + // will always be "image/png", but this might change in the future. + Data string `json:"data,omitempty"` + + // MimeType: The mime-type of the image data. + MimeType string `json:"mimeType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Data") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Data") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MobileFriendlyIssue: Mobile-friendly issue. +type MobileFriendlyIssue struct { + // Rule: Rule violated. + // + // Possible values: + // "MOBILE_FRIENDLY_RULE_UNSPECIFIED" - Unknown rule. Sorry, we don't + // have any description for the rule that was + // broken. + // "USES_INCOMPATIBLE_PLUGINS" - Plugins incompatible with mobile + // devices are being used. [Learn + // more] + // (https://support.google.com/webmasters/answer/6352293#flash_usag + // e). + // "CONFIGURE_VIEWPORT" - Viewsport is not specified using the meta + // viewport tag. [Learn + // more] + // (https://support.google.com/webmasters/answer/6352293#viewport_n + // ot_configured). + // "FIXED_WIDTH_VIEWPORT" - Viewport defined to a fixed width. [Learn + // more] + // (https://support.google.com/webmasters/answer/6352293#fixed-widt + // h_viewport). + // "SIZE_CONTENT_TO_VIEWPORT" - Content not sized to viewport. [Learn + // more] + // (https://support.google.com/webmasters/answer/6352293#content_no + // t_sized_to_viewport). + // "USE_LEGIBLE_FONT_SIZES" - Font size is too small for easy reading + // on a small screen. [Learn + // More] + // (https://support.google.com/webmasters/answer/6352293#small_font + // _size). + // "TAP_TARGETS_TOO_CLOSE" - Touch elements are too close to each + // other. [Learn + // more] + // (https://support.google.com/webmasters/answer/6352293#touch_elem + // ents_too_close). + Rule string `json:"rule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rule") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MobileFriendlyIssue) MarshalJSON() ([]byte, error) { + type noMethod MobileFriendlyIssue + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResourceIssue: Information about a resource with issue. +type ResourceIssue struct { + // BlockedResource: Describes a blocked resource issue. + BlockedResource *BlockedResource `json:"blockedResource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BlockedResource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BlockedResource") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ResourceIssue) MarshalJSON() ([]byte, error) { + type noMethod ResourceIssue + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RunMobileFriendlyTestRequest: Mobile-friendly test request. +type RunMobileFriendlyTestRequest struct { + // RequestScreenshot: Whether or not screenshot is requested. Default is + // false. + RequestScreenshot bool `json:"requestScreenshot,omitempty"` + + // Url: URL for inspection. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RequestScreenshot") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestScreenshot") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RunMobileFriendlyTestRequest) MarshalJSON() ([]byte, error) { + type noMethod RunMobileFriendlyTestRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RunMobileFriendlyTestResponse: Mobile-friendly test response, +// including mobile-friendly issues and resource +// issues. +type RunMobileFriendlyTestResponse struct { + // MobileFriendliness: Test verdict, whether the page is mobile friendly + // or not. + // + // Possible values: + // "MOBILE_FRIENDLY_TEST_RESULT_UNSPECIFIED" - Internal error when + // running this test. Please try running the test again. + // "MOBILE_FRIENDLY" - The page is mobile friendly. + // "NOT_MOBILE_FRIENDLY" - The page is not mobile friendly. + MobileFriendliness string `json:"mobileFriendliness,omitempty"` + + // MobileFriendlyIssues: List of mobile-usability issues. + MobileFriendlyIssues []*MobileFriendlyIssue `json:"mobileFriendlyIssues,omitempty"` + + // ResourceIssues: Information about embedded resources issues. + ResourceIssues []*ResourceIssue `json:"resourceIssues,omitempty"` + + // Screenshot: Screenshot of the requested URL. + Screenshot *Image `json:"screenshot,omitempty"` + + // TestStatus: Final state of the test, can be either complete or an + // error. + TestStatus *TestStatus `json:"testStatus,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "MobileFriendliness") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MobileFriendliness") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RunMobileFriendlyTestResponse) MarshalJSON() ([]byte, error) { + type noMethod RunMobileFriendlyTestResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestStatus: Final state of the test, including error details if +// necessary. +type TestStatus struct { + // Details: Error details if applicable. + Details string `json:"details,omitempty"` + + // Status: Status of the test. + // + // Possible values: + // "TEST_STATUS_UNSPECIFIED" - Internal error when running this test. + // Please try running the test again. + // "COMPLETE" - Inspection has completed without errors. + // "INTERNAL_ERROR" - Inspection terminated in an error state. This + // indicates a problem in + // Google's infrastructure, not a user error. Please try again later. + // "PAGE_UNREACHABLE" - Google can not access the URL because of a + // user error such as a robots.txt + // blockage, a 403 or 500 code etc. Please make sure that the URL + // provided is + // accessible by Googlebot and is not password protected. + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Details") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Details") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestStatus) MarshalJSON() ([]byte, error) { + type noMethod TestStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "searchconsole.urlTestingTools.mobileFriendlyTest.run": + +type UrlTestingToolsMobileFriendlyTestRunCall struct { + s *Service + runmobilefriendlytestrequest *RunMobileFriendlyTestRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Run: Runs Mobile-Friendly Test for a given URL. +func (r *UrlTestingToolsMobileFriendlyTestService) Run(runmobilefriendlytestrequest *RunMobileFriendlyTestRequest) *UrlTestingToolsMobileFriendlyTestRunCall { + c := &UrlTestingToolsMobileFriendlyTestRunCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.runmobilefriendlytestrequest = runmobilefriendlytestrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlTestingToolsMobileFriendlyTestRunCall) Fields(s ...googleapi.Field) *UrlTestingToolsMobileFriendlyTestRunCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlTestingToolsMobileFriendlyTestRunCall) Context(ctx context.Context) *UrlTestingToolsMobileFriendlyTestRunCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlTestingToolsMobileFriendlyTestRunCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UrlTestingToolsMobileFriendlyTestRunCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.runmobilefriendlytestrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/urlTestingTools/mobileFriendlyTest:run") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "searchconsole.urlTestingTools.mobileFriendlyTest.run" call. +// Exactly one of *RunMobileFriendlyTestResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RunMobileFriendlyTestResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UrlTestingToolsMobileFriendlyTestRunCall) Do(opts ...googleapi.CallOption) (*RunMobileFriendlyTestResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RunMobileFriendlyTestResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Runs Mobile-Friendly Test for a given URL.", + // "flatPath": "v1/urlTestingTools/mobileFriendlyTest:run", + // "httpMethod": "POST", + // "id": "searchconsole.urlTestingTools.mobileFriendlyTest.run", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v1/urlTestingTools/mobileFriendlyTest:run", + // "request": { + // "$ref": "RunMobileFriendlyTestRequest" + // }, + // "response": { + // "$ref": "RunMobileFriendlyTestResponse" + // } + // } + +} diff --git a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json index 04160f5b6..e34efd52b 100644 --- a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json +++ b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json @@ -18,6 +18,62 @@ "resources": { "services": { "methods": { + "endReconciliation": { + "id": "servicecontrol.services.endReconciliation", + "response": { + "$ref": "EndReconciliationResponse" + }, + "parameterOrder": [ + "serviceName" + ], + "description": "Signals the quota controller that service ends the ongoing usage\nreconciliation.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + "request": { + "$ref": "EndReconciliationRequest" + }, + "flatPath": "v1/services/{serviceName}:endReconciliation", + "httpMethod": "POST", + "parameters": { + "serviceName": { + "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + "required": true, + "location": "path", + "type": "string" + } + }, + "path": "v1/services/{serviceName}:endReconciliation", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/servicecontrol" + ] + }, + "releaseQuota": { + "id": "servicecontrol.services.releaseQuota", + "response": { + "$ref": "ReleaseQuotaResponse" + }, + "parameterOrder": [ + "serviceName" + ], + "description": "Releases previously allocated quota done through AllocateQuota method.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + "request": { + "$ref": "ReleaseQuotaRequest" + }, + "flatPath": "v1/services/{serviceName}:releaseQuota", + "httpMethod": "POST", + "parameters": { + "serviceName": { + "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + "required": true, + "location": "path", + "type": "string" + } + }, + "path": "v1/services/{serviceName}:releaseQuota", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/servicecontrol" + ] + }, "check": { "id": "servicecontrol.services.check", "response": { @@ -46,6 +102,34 @@ "https://www.googleapis.com/auth/servicecontrol" ] }, + "allocateQuota": { + "id": "servicecontrol.services.allocateQuota", + "response": { + "$ref": "AllocateQuotaResponse" + }, + "parameterOrder": [ + "serviceName" + ], + "description": "Attempts to allocate quota for the specified consumer. It should be called\nbefore the operation is executed.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + "request": { + "$ref": "AllocateQuotaRequest" + }, + "flatPath": "v1/services/{serviceName}:allocateQuota", + "httpMethod": "POST", + "parameters": { + "serviceName": { + "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + "required": true, + "location": "path", + "type": "string" + } + }, + "path": "v1/services/{serviceName}:allocateQuota", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/servicecontrol" + ] + }, "report": { "id": "servicecontrol.services.report", "response": { @@ -73,11 +157,89 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/servicecontrol" ] + }, + "startReconciliation": { + "id": "servicecontrol.services.startReconciliation", + "response": { + "$ref": "StartReconciliationResponse" + }, + "parameterOrder": [ + "serviceName" + ], + "description": "Unlike rate quota, allocation quota does not get refilled periodically.\nSo, it is possible that the quota usage as seen by the service differs from\nwhat the One Platform considers the usage is. This is expected to happen\nonly rarely, but over time this can accumulate. Services can invoke\nStartReconciliation and EndReconciliation to correct this usage drift, as\ndescribed below:\n1. Service sends StartReconciliation with a timestamp in future for each\n metric that needs to be reconciled. The timestamp being in future allows\n to account for in-flight AllocateQuota and ReleaseQuota requests for the\n same metric.\n2. One Platform records this timestamp and starts tracking subsequent\n AllocateQuota and ReleaseQuota requests until EndReconciliation is\n called.\n3. At or after the time specified in the StartReconciliation, service\n sends EndReconciliation with the usage that needs to be reconciled to.\n4. One Platform adjusts its own record of usage for that metric to the\n value specified in EndReconciliation by taking in to account any\n allocation or release between StartReconciliation and EndReconciliation.\n\nSignals the quota controller that the service wants to perform a usage\nreconciliation as specified in the request.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + "request": { + "$ref": "StartReconciliationRequest" + }, + "flatPath": "v1/services/{serviceName}:startReconciliation", + "httpMethod": "POST", + "parameters": { + "serviceName": { + "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + "required": true, + "location": "path", + "type": "string" + } + }, + "path": "v1/services/{serviceName}:startReconciliation", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/servicecontrol" + ] } } } }, "schemas": { + "Money": { + "description": "Represents an amount of money with its currency type.", + "type": "object", + "properties": { + "currencyCode": { + "description": "The 3-letter currency code defined in ISO 4217.", + "type": "string" + }, + "units": { + "description": "The whole units of the amount.\nFor example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", + "type": "string", + "format": "int64" + }, + "nanos": { + "description": "Number of nano (10^-9) units of the amount.\nThe value must be between -999,999,999 and +999,999,999 inclusive.\nIf `units` is positive, `nanos` must be positive or zero.\nIf `units` is zero, `nanos` can be positive, zero, or negative.\nIf `units` is negative, `nanos` must be negative or zero.\nFor example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", + "type": "integer", + "format": "int32" + } + }, + "id": "Money" + }, + "QuotaInfo": { + "description": "Contains the quota information for a quota check response.", + "type": "object", + "properties": { + "quotaConsumed": { + "description": "Map of quota group name to the actual number of tokens consumed. If the\nquota check was not successful, then this will not be populated due to no\nquota consumption.\nDeprecated: Use quota_metrics to get per quota group usage.", + "additionalProperties": { + "type": "integer", + "format": "int32" + }, + "type": "object" + }, + "limitExceeded": { + "description": "Quota Metrics that have exceeded quota limits.\nFor QuotaGroup-based quota, this is QuotaGroup.name\nFor QuotaLimit-based quota, this is QuotaLimit.name\nSee: google.api.Quota\nDeprecated: Use quota_metrics to get per quota group limit exceeded status.", + "type": "array", + "items": { + "type": "string" + } + }, + "quotaMetrics": { + "description": "Quota metrics to indicate the usage. Depending on the check request, one or\nmore of the following metrics will be included:\n\n1. For rate quota, per quota group or per quota metric incremental usage\nwill be specified using the following delta metric:\n \"serviceruntime.googleapis.com/api/consumer/quota_used_count\"\n\n2. For allocation quota, per quota metric total usage will be specified\nusing the following gauge metric:\n \"serviceruntime.googleapis.com/allocation/consumer/quota_used_count\"\n\n3. For both rate quota and allocation quota, the quota limit reached\ncondition will be specified using the following boolean metric:\n \"serviceruntime.googleapis.com/quota/exceeded\"", + "type": "array", + "items": { + "$ref": "MetricValueSet" + } + } + }, + "id": "QuotaInfo" + }, "CheckError": { "description": "Defines the errors to be returned in\ngoogle.api.servicecontrol.v1.CheckResponse.check_errors.", "type": "object", @@ -89,7 +251,12 @@ "NOT_FOUND", "PERMISSION_DENIED", "RESOURCE_EXHAUSTED", + "BUDGET_EXCEEDED", + "DENIAL_OF_SERVICE_DETECTED", + "LOAD_SHEDDING", + "ABUSER_DETECTED", "SERVICE_NOT_ACTIVATED", + "VISIBILITY_DENIED", "BILLING_DISABLED", "PROJECT_DELETED", "PROJECT_INVALID", @@ -100,16 +267,30 @@ "API_KEY_INVALID", "API_KEY_EXPIRED", "API_KEY_NOT_FOUND", + "SPATULA_HEADER_INVALID", + "LOAS_ROLE_INVALID", + "NO_LOAS_PROJECT", + "LOAS_PROJECT_DISABLED", + "SECURITY_POLICY_VIOLATED", "NAMESPACE_LOOKUP_UNAVAILABLE", "SERVICE_STATUS_UNAVAILABLE", - "BILLING_STATUS_UNAVAILABLE" + "BILLING_STATUS_UNAVAILABLE", + "QUOTA_CHECK_UNAVAILABLE", + "LOAS_PROJECT_LOOKUP_UNAVAILABLE", + "CLOUD_RESOURCE_MANAGER_BACKEND_UNAVAILABLE", + "SECURITY_POLICY_BACKEND_UNAVAILABLE" ], "enumDescriptions": [ "This is never used in `CheckResponse`.", "The consumer's project id was not found.\nSame as google.rpc.Code.NOT_FOUND.", "The consumer doesn't have access to the specified resource.\nSame as google.rpc.Code.PERMISSION_DENIED.", "Quota check failed. Same as google.rpc.Code.RESOURCE_EXHAUSTED.", + "Budget check failed.", + "The consumer's request has been flagged as a DoS attack.", + "The consumer's request should be rejected in order to protect the service\nfrom being overloaded.", + "The consumer has been flagged as an abuser.", "The consumer hasn't activated the service.", + "The consumer cannot access the service due to visibility configuration.", "The consumer cannot access the service because billing is disabled.", "The consumer's project has been marked as deleted (soft deletion).", "The consumer's project number or id does not represent a valid project.", @@ -120,9 +301,18 @@ "The consumer's API key is invalid.", "The consumer's API Key has expired.", "The consumer's API Key was not found in config record.", + "The consumer's spatula header is invalid.", + "The consumer's LOAS role is invalid.", + "The consumer's LOAS role has no associated project.", + "The consumer's LOAS project is not `ACTIVE` in LoquatV2.", + "Request is not allowed as per security policies defined in Org Policy.", "The backend server for looking up project id/number is unavailable.", "The backend server for checking service status is unavailable.", - "The backend server for checking billing status is unavailable." + "The backend server for checking billing status is unavailable.", + "The backend server for checking quota limits is unavailable.", + "The Spanner for looking up LOAS project is unavailable.", + "Cloud Resource Manager backend server is unavailable.", + "Backend server for evaluating security policy is unavailable." ], "type": "string" }, @@ -160,6 +350,20 @@ }, "id": "Status" }, + "StartReconciliationRequest": { + "type": "object", + "properties": { + "reconciliationOperation": { + "description": "Operation that describes the quota reconciliation.", + "$ref": "QuotaOperation" + }, + "serviceConfigId": { + "description": "Specifies which version of service configuration should be used to process\nthe request. If unspecified or no matching version can be found, the latest\none will be used.", + "type": "string" + } + }, + "id": "StartReconciliationRequest" + }, "ReportError": { "description": "Represents the processing error of one `Operation` in the request.", "type": "object", @@ -205,6 +409,13 @@ "description": "Fully qualified name of the operation. Reserved for future use.", "type": "string" }, + "userLabels": { + "description": "User defined labels for the resource that this operation is associated\nwith.", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, "operationId": { "description": "Identity of the operation. This must be unique within the scope of the\nservice that generated the operation. If the service calls\nCheck() and Report() on the same operation, the two calls should carry\nthe same id.\n\nUUID version 4 is recommended, though not required.\nIn scenarios where an operation is computed from existing information\nand an idempotent id is desirable for deduplication purpose, UUID version 5\nis recommended. See RFC 4122 for details.", "type": "string" @@ -225,11 +436,13 @@ "description": "DO NOT USE. This is an experimental field.", "enum": [ "LOW", - "HIGH" + "HIGH", + "DEBUG" ], "enumDescriptions": [ "The API implementation may cache and aggregate the data.\nThe data may be lost when rare and unexpected system failures occur.", - "The API implementation doesn't cache and aggregate the data.\nIf the method returns successfully, it's guaranteed that the data has\nbeen persisted in durable storage." + "The API implementation doesn't cache and aggregate the data.\nIf the method returns successfully, it's guaranteed that the data has\nbeen persisted in durable storage.", + "In addition to the behavior described in HIGH, DEBUG enables\nadditional validation logic that is only useful during the onboarding\nprocess. This is only available to Google internal services and\nthe service must be whitelisted by chemist-dev@google.com in order\nto use this level." ], "type": "string" }, @@ -237,6 +450,10 @@ "description": "Identity of the consumer who is using the service.\nThis field should be filled in for the operations initiated by a\nconsumer, but not for service-initiated operations that are\nnot related to a specific consumer.\n\nThis can be in one of the following formats:\n project:\u003cproject_id\u003e,\n project_number:\u003cproject_number\u003e,\n api_key:\u003capi_key\u003e.", "type": "string" }, + "quotaProperties": { + "description": "Represents the properties needed for quota check. Applicable only if this\noperation is for a quota check request.", + "$ref": "QuotaProperties" + }, "startTime": { "description": "Required. Start time of the operation.", "type": "string", @@ -255,18 +472,59 @@ "items": { "$ref": "MetricValueSet" } + }, + "resourceContainer": { + "description": "The resource name of the parent of a resource in the resource hierarchy.\n\nThis can be in one of the following formats:\n - “projects/\u003cproject-id or project-number\u003e”\n - “folders/\u003cfolder-id\u003e”\n - “organizations/\u003corganization-id\u003e”", + "type": "string" } }, "id": "Operation" }, + "AllocateQuotaResponse": { + "description": "Response message for the AllocateQuota method.", + "type": "object", + "properties": { + "allocateErrors": { + "description": "Indicates the decision of the allocate.", + "type": "array", + "items": { + "$ref": "QuotaError" + } + }, + "operationId": { + "description": "The same operation_id value used in the AllocateQuotaRequest. Used for\nlogging and diagnostics purposes.", + "type": "string" + }, + "quotaMetrics": { + "description": "Quota metrics to indicate the result of allocation. Depending on the\nrequest, one or more of the following metrics will be included:\n\n1. For rate quota, per quota group or per quota metric incremental usage\nwill be specified using the following delta metric:\n \"serviceruntime.googleapis.com/api/consumer/quota_used_count\"\n\n2. For allocation quota, per quota metric total usage will be specified\nusing the following gauge metric:\n \"serviceruntime.googleapis.com/allocation/consumer/quota_used_count\"\n\n3. For both rate quota and allocation quota, the quota limit reached\ncondition will be specified using the following boolean metric:\n \"serviceruntime.googleapis.com/quota/exceeded\"", + "type": "array", + "items": { + "$ref": "MetricValueSet" + } + }, + "serviceConfigId": { + "description": "ID of the actual config used to process the request.", + "type": "string" + } + }, + "id": "AllocateQuotaResponse" + }, "CheckRequest": { "description": "Request message for the Check method.", "type": "object", "properties": { + "requestProjectSettings": { + "description": "Requests the project settings to be returned as part of the check response.", + "type": "boolean" + }, "operation": { "description": "The operation to be checked.", "$ref": "Operation" }, + "skipActivationCheck": { + "description": "Indicates if service activation check should be skipped for this request.\nDefault behavior is to perform the check and apply relevant quota.", + "type": "boolean" + }, "serviceConfigId": { "description": "Specifies which version of service configuration should be used to process\nthe request.\n\nIf unspecified or no matching version can be found, the\nlatest one will be used.", "type": "string" @@ -274,6 +532,79 @@ }, "id": "CheckRequest" }, + "QuotaProperties": { + "description": "Represents the properties needed for quota operations.\n\nUse the metric_value_sets field in Operation message to provide cost\noverride with metric_name in \u003cservice_name\u003e/quota/\u003cquota_group_name\u003e/cost\nformat. Overrides for unmatched quota groups will be ignored.\nCosts are expected to be \u003e= 0. Cost 0 will cause no quota check,\nbut still traffic restrictions will be enforced.", + "type": "object", + "properties": { + "quotaMode": { + "description": "Quota mode for this operation.", + "enum": [ + "ACQUIRE", + "ACQUIRE_BEST_EFFORT", + "CHECK", + "RELEASE" + ], + "enumDescriptions": [ + "Decreases available quota by the cost specified for the operation.\nIf cost is higher than available quota, operation fails and returns\nerror.", + "Decreases available quota by the cost specified for the operation.\nIf cost is higher than available quota, operation does not fail and\navailable quota goes down to zero but it returns error.", + "Does not change any available quota. Only checks if there is enough\nquota.\nNo lock is placed on the checked tokens neither.", + "Increases available quota by the operation cost specified for the\noperation." + ], + "type": "string" + }, + "limitByIds": { + "description": "LimitType IDs that should be used for checking quota. Key in this map\nshould be a valid LimitType string, and the value is the ID to be used. For\nexample, an entry \u003cUSER, 123\u003e will cause all user quota limits to use 123\nas the user ID. See google/api/quota.proto for the definition of LimitType.\nCLIENT_PROJECT: Not supported.\nUSER: Value of this entry will be used for enforcing user-level quota\n limits. If none specified, caller IP passed in the\n servicecontrol.googleapis.com/caller_ip label will be used instead.\n If the server cannot resolve a value for this LimitType, an error\n will be thrown. No validation will be performed on this ID.\nDeprecated: use servicecontrol.googleapis.com/user label to send user ID.", + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "id": "QuotaProperties" + }, + "ReportInfo": { + "type": "object", + "properties": { + "operationId": { + "description": "The Operation.operation_id value from the request.", + "type": "string" + }, + "quotaInfo": { + "description": "Quota usage info when processing the `Operation`.", + "$ref": "QuotaInfo" + } + }, + "id": "ReportInfo" + }, + "ReleaseQuotaResponse": { + "description": "Response message for the ReleaseQuota method.", + "type": "object", + "properties": { + "releaseErrors": { + "description": "Indicates the decision of the release.", + "type": "array", + "items": { + "$ref": "QuotaError" + } + }, + "operationId": { + "description": "The same operation_id value used in the ReleaseQuotaRequest. Used for\nlogging and diagnostics purposes.", + "type": "string" + }, + "quotaMetrics": { + "description": "Quota metrics to indicate the result of release. Depending on the\nrequest, one or more of the following metrics will be included:\n\n1. For rate quota, per quota group or per quota metric released amount\nwill be specified using the following delta metric:\n \"serviceruntime.googleapis.com/api/consumer/quota_refund_count\"\n\n2. For allocation quota, per quota metric total usage will be specified\nusing the following gauge metric:\n \"serviceruntime.googleapis.com/allocation/consumer/quota_used_count\"", + "type": "array", + "items": { + "$ref": "MetricValueSet" + } + }, + "serviceConfigId": { + "description": "ID of the actual config used to process the request.", + "type": "string" + } + }, + "id": "ReleaseQuotaResponse" + }, "LogEntry": { "description": "An individual log entry.", "type": "object", @@ -409,6 +740,10 @@ "description": "Response message for the Check method.", "type": "object", "properties": { + "checkInfo": { + "description": "Feedback data returned from the server during processing a Check request.", + "$ref": "CheckInfo" + }, "checkErrors": { "description": "Indicate the decision of the check.\n\nIf no check errors are present, the service should process the operation.\nOtherwise the service should use the list of errors to determine the\nappropriate action.", "type": "array", @@ -420,6 +755,10 @@ "description": "The same operation_id value used in the CheckRequest.\nUsed for logging and diagnostics purposes.", "type": "string" }, + "quotaInfo": { + "description": "Quota information for the check request associated with this response.\n", + "$ref": "QuotaInfo" + }, "serviceConfigId": { "description": "The actual config id used to process the request.", "type": "string" @@ -427,6 +766,33 @@ }, "id": "CheckResponse" }, + "CheckInfo": { + "type": "object", + "properties": { + "unusedArguments": { + "description": "A list of fields and label keys that are ignored by the server.\nThe client doesn't need to send them for following requests to improve\nperformance and allow better aggregation.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "CheckInfo" + }, + "EndReconciliationRequest": { + "type": "object", + "properties": { + "reconciliationOperation": { + "description": "Operation that describes the quota reconciliation.", + "$ref": "QuotaOperation" + }, + "serviceConfigId": { + "description": "Specifies which version of service configuration should be used to process\nthe request. If unspecified or no matching version can be found, the latest\none will be used.", + "type": "string" + } + }, + "id": "EndReconciliationRequest" + }, "RequestMetadata": { "description": "Metadata about the request.", "type": "object", @@ -442,6 +808,55 @@ }, "id": "RequestMetadata" }, + "QuotaOperation": { + "description": "Represents information regarding a quota operation.", + "type": "object", + "properties": { + "methodName": { + "description": "Fully qualified name of the API method for which this quota operation is\nrequested. This name is used for matching quota rules or metric rules and\nbilling status rules defined in service configuration. This field is not\nrequired if the quota operation is performed on non-API resources.\n\nExample of an RPC method name:\n google.example.library.v1.LibraryService.CreateShelf", + "type": "string" + }, + "operationId": { + "description": "Identity of the operation. This must be unique within the scope of the\nservice that generated the operation. If the service calls AllocateQuota\nand ReleaseQuota on the same operation, the two calls should carry the\nsame ID.\n\nUUID version 4 is recommended, though not required. In scenarios where an\noperation is computed from existing information and an idempotent id is\ndesirable for deduplication purpose, UUID version 5 is recommended. See\nRFC 4122 for details.", + "type": "string" + }, + "labels": { + "description": "Labels describing the operation.", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "consumerId": { + "description": "Identity of the consumer for whom this quota operation is being performed.\n\nThis can be in one of the following formats:\n project:\u003cproject_id\u003e,\n project_number:\u003cproject_number\u003e,\n api_key:\u003capi_key\u003e.", + "type": "string" + }, + "quotaMetrics": { + "description": "Represents information about this operation. Each MetricValueSet\ncorresponds to a metric defined in the service configuration.\nThe data type used in the MetricValueSet must agree with\nthe data type specified in the metric definition.\n\nWithin a single operation, it is not allowed to have more than one\nMetricValue instances that have the same metric names and identical\nlabel value combinations. If a request has such duplicated MetricValue\ninstances, the entire request is rejected with\nan invalid argument error.", + "type": "array", + "items": { + "$ref": "MetricValueSet" + } + }, + "quotaMode": { + "description": "Quota mode for this operation.", + "enum": [ + "UNSPECIFIED", + "NORMAL", + "BEST_EFFORT", + "CHECK_ONLY" + ], + "enumDescriptions": [ + "", + "For AllocateQuota request, allocates quota for the amount specified in\nthe service configuration or specified using the quota metrics. If the\namount is higher than the available quota, allocation error will be\nreturned and no quota will be allocated.\nFor ReleaseQuota request, this mode is supported only for precise quota\nlimits. In this case, this operation releases quota for the amount\nspecified in the service configuration or specified using the quota\nmetrics. If the release can make available quota negative, release error\nwill be returned and no quota will be released.", + "For AllocateQuota request, this mode is supported only for imprecise\nquota limits. In this case, the operation allocates quota for the amount\nspecified in the service configuration or specified using the quota\nmetrics. If the amount is higher than the available quota, request does\nnot fail but all available quota will be allocated.\nFor ReleaseQuota request, this mode is supported for both precise quota\nlimits and imprecise quota limits. In this case, this operation releases\nquota for the amount specified in the service configuration or specified\nusing the quota metrics. If the release can make available quota\nnegative, request does not fail but only the available quota will be\nreleased. After the ReleaseQuota request completes, the available quota\nwill be 0, and never goes to negative.", + "For AllocateQuota request, only checks if there is enough quota\navailable and does not change the available quota. No lock is placed on\nthe available quota either. Not supported for ReleaseQuota request." + ], + "type": "string" + } + }, + "id": "QuotaOperation" + }, "ReportResponse": { "description": "Response message for the Report method.", "type": "object", @@ -453,6 +868,13 @@ "$ref": "ReportError" } }, + "reportInfos": { + "description": "Quota usage for each quota release `Operation` request.\n\nFully or partially failed quota release request may or may not be present\nin `report_quota_info`. For example, a failed quota release request will\nhave the current quota usage info when precise quota library returns the\ninfo. A deadline exceeded quota request will not have quota usage info.\n\nIf there is no quota release request, report_quota_info will be empty.\n", + "type": "array", + "items": { + "$ref": "ReportInfo" + } + }, "serviceConfigId": { "description": "The actual config id used to process the request.", "type": "string" @@ -512,6 +934,94 @@ }, "id": "Distribution" }, + "StartReconciliationResponse": { + "type": "object", + "properties": { + "operationId": { + "description": "The same operation_id value used in the StartReconciliationRequest. Used\nfor logging and diagnostics purposes.", + "type": "string" + }, + "reconciliationErrors": { + "description": "Indicates the decision of the reconciliation start.", + "type": "array", + "items": { + "$ref": "QuotaError" + } + }, + "quotaMetrics": { + "description": "Metric values as tracked by One Platform before the start of\nreconciliation.", + "type": "array", + "items": { + "$ref": "MetricValueSet" + } + }, + "serviceConfigId": { + "description": "ID of the actual config used to process the request.", + "type": "string" + } + }, + "id": "StartReconciliationResponse" + }, + "QuotaError": { + "type": "object", + "properties": { + "description": { + "description": "Free-form text that provides details on the cause of the error.", + "type": "string" + }, + "code": { + "description": "Error code.", + "enum": [ + "UNSPECIFIED", + "RESOURCE_EXHAUSTED", + "PROJECT_SUSPENDED", + "SERVICE_NOT_ENABLED", + "BILLING_NOT_ACTIVE", + "PROJECT_DELETED", + "PROJECT_INVALID", + "IP_ADDRESS_BLOCKED", + "REFERER_BLOCKED", + "CLIENT_APP_BLOCKED", + "API_KEY_INVALID", + "API_KEY_EXPIRED", + "SPATULA_HEADER_INVALID", + "LOAS_ROLE_INVALID", + "NO_LOAS_PROJECT", + "PROJECT_STATUS_UNVAILABLE", + "SERVICE_STATUS_UNAVAILABLE", + "BILLING_STATUS_UNAVAILABLE", + "QUOTA_SYSTEM_UNAVAILABLE" + ], + "enumDescriptions": [ + "This is never used.", + "Quota allocation failed.\nSame as google.rpc.Code.RESOURCE_EXHAUSTED.", + "Consumer project has been suspended.", + "Consumer has not enabled the service.", + "Consumer cannot access the service because billing is disabled.", + "Consumer's project has been marked as deleted (soft deletion).", + "Consumer's project number or ID does not represent a valid project.", + "IP address of the consumer is invalid for the specific consumer\nproject.", + "Referer address of the consumer request is invalid for the specific\nconsumer project.", + "Client application of the consumer request is invalid for the\nspecific consumer project.", + "Specified API key is invalid.", + "Specified API Key has expired.", + "Consumer's spatula header is invalid.", + "The consumer's LOAS role is invalid.", + "The consumer's LOAS role has no associated project.", + "The backend server for looking up project id/number is unavailable.", + "The backend server for checking service status is unavailable.", + "The backend server for checking billing status is unavailable.", + "The backend server for checking quota limits is unavailable." + ], + "type": "string" + }, + "subject": { + "description": "Subject to whom this error applies. See the specific enum for more details\non this field. For example, \"clientip:\u003cip address of client\u003e\" or\n\"project:\u003cGoogle developer project id\u003e\".", + "type": "string" + } + }, + "id": "QuotaError" + }, "AuditLog": { "description": "Common audit log format for Google Cloud Platform API operations.\n\n", "type": "object", @@ -628,6 +1138,65 @@ }, "id": "AuthorizationInfo" }, + "EndReconciliationResponse": { + "type": "object", + "properties": { + "operationId": { + "description": "The same operation_id value used in the EndReconciliationRequest. Used for\nlogging and diagnostics purposes.", + "type": "string" + }, + "reconciliationErrors": { + "description": "Indicates the decision of the reconciliation end.", + "type": "array", + "items": { + "$ref": "QuotaError" + } + }, + "quotaMetrics": { + "description": "Metric values as tracked by One Platform before the adjustment was made.", + "type": "array", + "items": { + "$ref": "MetricValueSet" + } + }, + "serviceConfigId": { + "description": "ID of the actual config used to process the request.", + "type": "string" + } + }, + "id": "EndReconciliationResponse" + }, + "AllocateQuotaRequest": { + "description": "Request message for the AllocateQuota method.", + "type": "object", + "properties": { + "allocationMode": { + "description": "Allocation mode for this operation.\nDeprecated: use QuotaMode inside the QuotaOperation.", + "enum": [ + "UNSPECIFIED", + "NORMAL", + "BEST_EFFORT", + "CHECK_ONLY" + ], + "enumDescriptions": [ + "", + "Allocates quota for the amount specified in the service configuration or\nspecified using the quota_metrics. If the amount is higher than the\navailable quota, allocation error will be returned and no quota will be\nallocated.", + "Allocates quota for the amount specified in the service configuration or\nspecified using the quota_metrics. If the amount is higher than the\navailable quota, request does not fail but all available quota will be\nallocated.", + "Only checks if there is enough quota available and does not change the\navailable quota. No lock is placed on the available quota either." + ], + "type": "string" + }, + "allocateOperation": { + "description": "Operation that describes the quota allocation.", + "$ref": "QuotaOperation" + }, + "serviceConfigId": { + "description": "Specifies which version of service configuration should be used to process\nthe request. If unspecified or no matching version can be found, the latest\none will be used.", + "type": "string" + } + }, + "id": "AllocateQuotaRequest" + }, "MetricValue": { "description": "Represents a single metric value.", "type": "object", @@ -670,12 +1239,31 @@ "description": "A signed 64-bit integer value.", "type": "string", "format": "int64" + }, + "moneyValue": { + "description": "A money value.", + "$ref": "Money" } }, "id": "MetricValue" + }, + "ReleaseQuotaRequest": { + "description": "Request message for the ReleaseQuota method.", + "type": "object", + "properties": { + "releaseOperation": { + "description": "Operation that describes the quota release.", + "$ref": "QuotaOperation" + }, + "serviceConfigId": { + "description": "Specifies which version of service configuration should be used to process\nthe request. If unspecified or no matching version can be found, the latest\none will be used.", + "type": "string" + } + }, + "id": "ReleaseQuotaRequest" } }, - "revision": "20170123", + "revision": "20170130", "basePath": "", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", diff --git a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go index 12eac55c7..06c16aa29 100644 --- a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go +++ b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Services *ServicesService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewServicesService(s *Service) *ServicesService { rs := &ServicesService{s: s} return rs @@ -87,6 +92,129 @@ type ServicesService struct { s *Service } +// AllocateQuotaRequest: Request message for the AllocateQuota method. +type AllocateQuotaRequest struct { + // AllocateOperation: Operation that describes the quota allocation. + AllocateOperation *QuotaOperation `json:"allocateOperation,omitempty"` + + // AllocationMode: Allocation mode for this operation. + // Deprecated: use QuotaMode inside the QuotaOperation. + // + // Possible values: + // "UNSPECIFIED" + // "NORMAL" - Allocates quota for the amount specified in the service + // configuration or + // specified using the quota_metrics. If the amount is higher than + // the + // available quota, allocation error will be returned and no quota will + // be + // allocated. + // "BEST_EFFORT" - Allocates quota for the amount specified in the + // service configuration or + // specified using the quota_metrics. If the amount is higher than + // the + // available quota, request does not fail but all available quota will + // be + // allocated. + // "CHECK_ONLY" - Only checks if there is enough quota available and + // does not change the + // available quota. No lock is placed on the available quota either. + AllocationMode string `json:"allocationMode,omitempty"` + + // ServiceConfigId: Specifies which version of service configuration + // should be used to process + // the request. If unspecified or no matching version can be found, the + // latest + // one will be used. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllocateOperation") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllocateOperation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocateQuotaRequest) MarshalJSON() ([]byte, error) { + type noMethod AllocateQuotaRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AllocateQuotaResponse: Response message for the AllocateQuota method. +type AllocateQuotaResponse struct { + // AllocateErrors: Indicates the decision of the allocate. + AllocateErrors []*QuotaError `json:"allocateErrors,omitempty"` + + // OperationId: The same operation_id value used in the + // AllocateQuotaRequest. Used for + // logging and diagnostics purposes. + OperationId string `json:"operationId,omitempty"` + + // QuotaMetrics: Quota metrics to indicate the result of allocation. + // Depending on the + // request, one or more of the following metrics will be included: + // + // 1. For rate quota, per quota group or per quota metric incremental + // usage + // will be specified using the following delta metric: + // "serviceruntime.googleapis.com/api/consumer/quota_used_count" + // + // 2. For allocation quota, per quota metric total usage will be + // specified + // using the following gauge metric: + // + // "serviceruntime.googleapis.com/allocation/consumer/quota_used_count" + // + // + // 3. For both rate quota and allocation quota, the quota limit + // reached + // condition will be specified using the following boolean metric: + // "serviceruntime.googleapis.com/quota/exceeded" + QuotaMetrics []*MetricValueSet `json:"quotaMetrics,omitempty"` + + // ServiceConfigId: ID of the actual config used to process the request. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AllocateErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllocateErrors") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocateQuotaResponse) MarshalJSON() ([]byte, error) { + type noMethod AllocateQuotaResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AuditLog: Common audit log format for Google Cloud Platform API // operations. // @@ -278,8 +406,17 @@ type CheckError struct { // Same as google.rpc.Code.PERMISSION_DENIED. // "RESOURCE_EXHAUSTED" - Quota check failed. Same as // google.rpc.Code.RESOURCE_EXHAUSTED. + // "BUDGET_EXCEEDED" - Budget check failed. + // "DENIAL_OF_SERVICE_DETECTED" - The consumer's request has been + // flagged as a DoS attack. + // "LOAD_SHEDDING" - The consumer's request should be rejected in + // order to protect the service + // from being overloaded. + // "ABUSER_DETECTED" - The consumer has been flagged as an abuser. // "SERVICE_NOT_ACTIVATED" - The consumer hasn't activated the // service. + // "VISIBILITY_DENIED" - The consumer cannot access the service due to + // visibility configuration. // "BILLING_DISABLED" - The consumer cannot access the service because // billing is disabled. // "PROJECT_DELETED" - The consumer's project has been marked as @@ -302,12 +439,29 @@ type CheckError struct { // "API_KEY_EXPIRED" - The consumer's API Key has expired. // "API_KEY_NOT_FOUND" - The consumer's API Key was not found in // config record. + // "SPATULA_HEADER_INVALID" - The consumer's spatula header is + // invalid. + // "LOAS_ROLE_INVALID" - The consumer's LOAS role is invalid. + // "NO_LOAS_PROJECT" - The consumer's LOAS role has no associated + // project. + // "LOAS_PROJECT_DISABLED" - The consumer's LOAS project is not + // `ACTIVE` in LoquatV2. + // "SECURITY_POLICY_VIOLATED" - Request is not allowed as per security + // policies defined in Org Policy. // "NAMESPACE_LOOKUP_UNAVAILABLE" - The backend server for looking up // project id/number is unavailable. // "SERVICE_STATUS_UNAVAILABLE" - The backend server for checking // service status is unavailable. // "BILLING_STATUS_UNAVAILABLE" - The backend server for checking // billing status is unavailable. + // "QUOTA_CHECK_UNAVAILABLE" - The backend server for checking quota + // limits is unavailable. + // "LOAS_PROJECT_LOOKUP_UNAVAILABLE" - The Spanner for looking up LOAS + // project is unavailable. + // "CLOUD_RESOURCE_MANAGER_BACKEND_UNAVAILABLE" - Cloud Resource + // Manager backend server is unavailable. + // "SECURITY_POLICY_BACKEND_UNAVAILABLE" - Backend server for + // evaluating security policy is unavailable. Code string `json:"code,omitempty"` // Detail: Free-form text providing details on the error cause of the @@ -337,11 +491,47 @@ func (s *CheckError) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type CheckInfo struct { + // UnusedArguments: A list of fields and label keys that are ignored by + // the server. + // The client doesn't need to send them for following requests to + // improve + // performance and allow better aggregation. + UnusedArguments []string `json:"unusedArguments,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UnusedArguments") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UnusedArguments") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CheckInfo) MarshalJSON() ([]byte, error) { + type noMethod CheckInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CheckRequest: Request message for the Check method. type CheckRequest struct { // Operation: The operation to be checked. Operation *Operation `json:"operation,omitempty"` + // RequestProjectSettings: Requests the project settings to be returned + // as part of the check response. + RequestProjectSettings bool `json:"requestProjectSettings,omitempty"` + // ServiceConfigId: Specifies which version of service configuration // should be used to process // the request. @@ -350,6 +540,11 @@ type CheckRequest struct { // latest one will be used. ServiceConfigId string `json:"serviceConfigId,omitempty"` + // SkipActivationCheck: Indicates if service activation check should be + // skipped for this request. + // Default behavior is to perform the check and apply relevant quota. + SkipActivationCheck bool `json:"skipActivationCheck,omitempty"` + // ForceSendFields is a list of field names (e.g. "Operation") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -384,11 +579,20 @@ type CheckResponse struct { // appropriate action. CheckErrors []*CheckError `json:"checkErrors,omitempty"` + // CheckInfo: Feedback data returned from the server during processing a + // Check request. + CheckInfo *CheckInfo `json:"checkInfo,omitempty"` + // OperationId: The same operation_id value used in the // CheckRequest. // Used for logging and diagnostics purposes. OperationId string `json:"operationId,omitempty"` + // QuotaInfo: Quota information for the check request associated with + // this response. + // + QuotaInfo *QuotaInfo `json:"quotaInfo,omitempty"` + // ServiceConfigId: The actual config id used to process the request. ServiceConfigId string `json:"serviceConfigId,omitempty"` @@ -524,6 +728,87 @@ func (s *Distribution) UnmarshalJSON(data []byte) error { return nil } +type EndReconciliationRequest struct { + // ReconciliationOperation: Operation that describes the quota + // reconciliation. + ReconciliationOperation *QuotaOperation `json:"reconciliationOperation,omitempty"` + + // ServiceConfigId: Specifies which version of service configuration + // should be used to process + // the request. If unspecified or no matching version can be found, the + // latest + // one will be used. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ReconciliationOperation") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ReconciliationOperation") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *EndReconciliationRequest) MarshalJSON() ([]byte, error) { + type noMethod EndReconciliationRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type EndReconciliationResponse struct { + // OperationId: The same operation_id value used in the + // EndReconciliationRequest. Used for + // logging and diagnostics purposes. + OperationId string `json:"operationId,omitempty"` + + // QuotaMetrics: Metric values as tracked by One Platform before the + // adjustment was made. + QuotaMetrics []*MetricValueSet `json:"quotaMetrics,omitempty"` + + // ReconciliationErrors: Indicates the decision of the reconciliation + // end. + ReconciliationErrors []*QuotaError `json:"reconciliationErrors,omitempty"` + + // ServiceConfigId: ID of the actual config used to process the request. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "OperationId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OperationId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *EndReconciliationResponse) MarshalJSON() ([]byte, error) { + type noMethod EndReconciliationResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ExplicitBuckets: Describing buckets with arbitrary user-provided // width. type ExplicitBuckets struct { @@ -794,6 +1079,9 @@ type MetricValue struct { // the overriding relationship. Labels map[string]string `json:"labels,omitempty"` + // MoneyValue: A money value. + MoneyValue *Money `json:"moneyValue,omitempty"` + // StartTime: The start of the time period over which this metric // value's measurement // applies. The time period has different semantics for different @@ -880,6 +1168,48 @@ func (s *MetricValueSet) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Money: Represents an amount of money with its currency type. +type Money struct { + // CurrencyCode: The 3-letter currency code defined in ISO 4217. + CurrencyCode string `json:"currencyCode,omitempty"` + + // Nanos: Number of nano (10^-9) units of the amount. + // The value must be between -999,999,999 and +999,999,999 inclusive. + // If `units` is positive, `nanos` must be positive or zero. + // If `units` is zero, `nanos` can be positive, zero, or negative. + // If `units` is negative, `nanos` must be negative or zero. + // For example $-1.75 is represented as `units`=-1 and + // `nanos`=-750,000,000. + Nanos int64 `json:"nanos,omitempty"` + + // Units: The whole units of the amount. + // For example if `currencyCode` is "USD", then 1 unit is one US + // dollar. + Units int64 `json:"units,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "CurrencyCode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrencyCode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Money) MarshalJSON() ([]byte, error) { + type noMethod Money + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Operation: Represents information regarding an operation. type Operation struct { // ConsumerId: Identity of the consumer who is using the service. @@ -910,6 +1240,13 @@ type Operation struct { // If the method returns successfully, it's guaranteed that the data // has // been persisted in durable storage. + // "DEBUG" - In addition to the behavior described in HIGH, DEBUG + // enables + // additional validation logic that is only useful during the + // onboarding + // process. This is only available to Google internal services and + // the service must be whitelisted by chemist-dev@google.com in order + // to use this level. Importance string `json:"importance,omitempty"` // Labels: Labels describing the operation. Only the following labels @@ -973,9 +1310,28 @@ type Operation struct { // future use. OperationName string `json:"operationName,omitempty"` + // QuotaProperties: Represents the properties needed for quota check. + // Applicable only if this + // operation is for a quota check request. + QuotaProperties *QuotaProperties `json:"quotaProperties,omitempty"` + + // ResourceContainer: The resource name of the parent of a resource in + // the resource hierarchy. + // + // This can be in one of the following formats: + // - “projects/” + // - “folders/” + // - “organizations/” + ResourceContainer string `json:"resourceContainer,omitempty"` + // StartTime: Required. Start time of the operation. StartTime string `json:"startTime,omitempty"` + // UserLabels: User defined labels for the resource that this operation + // is associated + // with. + UserLabels map[string]string `json:"userLabels,omitempty"` + // ForceSendFields is a list of field names (e.g. "ConsumerId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -999,16 +1355,58 @@ func (s *Operation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReportError: Represents the processing error of one `Operation` in -// the request. -type ReportError struct { - // OperationId: The Operation.operation_id value from the request. - OperationId string `json:"operationId,omitempty"` +type QuotaError struct { + // Code: Error code. + // + // Possible values: + // "UNSPECIFIED" - This is never used. + // "RESOURCE_EXHAUSTED" - Quota allocation failed. + // Same as google.rpc.Code.RESOURCE_EXHAUSTED. + // "PROJECT_SUSPENDED" - Consumer project has been suspended. + // "SERVICE_NOT_ENABLED" - Consumer has not enabled the service. + // "BILLING_NOT_ACTIVE" - Consumer cannot access the service because + // billing is disabled. + // "PROJECT_DELETED" - Consumer's project has been marked as deleted + // (soft deletion). + // "PROJECT_INVALID" - Consumer's project number or ID does not + // represent a valid project. + // "IP_ADDRESS_BLOCKED" - IP address of the consumer is invalid for + // the specific consumer + // project. + // "REFERER_BLOCKED" - Referer address of the consumer request is + // invalid for the specific + // consumer project. + // "CLIENT_APP_BLOCKED" - Client application of the consumer request + // is invalid for the + // specific consumer project. + // "API_KEY_INVALID" - Specified API key is invalid. + // "API_KEY_EXPIRED" - Specified API Key has expired. + // "SPATULA_HEADER_INVALID" - Consumer's spatula header is invalid. + // "LOAS_ROLE_INVALID" - The consumer's LOAS role is invalid. + // "NO_LOAS_PROJECT" - The consumer's LOAS role has no associated + // project. + // "PROJECT_STATUS_UNVAILABLE" - The backend server for looking up + // project id/number is unavailable. + // "SERVICE_STATUS_UNAVAILABLE" - The backend server for checking + // service status is unavailable. + // "BILLING_STATUS_UNAVAILABLE" - The backend server for checking + // billing status is unavailable. + // "QUOTA_SYSTEM_UNAVAILABLE" - The backend server for checking quota + // limits is unavailable. + Code string `json:"code,omitempty"` - // Status: Details of the error when processing the `Operation`. - Status *Status `json:"status,omitempty"` + // Description: Free-form text that provides details on the cause of the + // error. + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "OperationId") to + // Subject: Subject to whom this error applies. See the specific enum + // for more details + // on this field. For example, "clientip:" + // or + // "project:". + Subject string `json:"subject,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1016,49 +1414,62 @@ type ReportError struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "OperationId") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ReportError) MarshalJSON() ([]byte, error) { - type noMethod ReportError +func (s *QuotaError) MarshalJSON() ([]byte, error) { + type noMethod QuotaError raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReportRequest: Request message for the Report method. -type ReportRequest struct { - // Operations: Operations to be reported. +// QuotaInfo: Contains the quota information for a quota check response. +type QuotaInfo struct { + // LimitExceeded: Quota Metrics that have exceeded quota limits. + // For QuotaGroup-based quota, this is QuotaGroup.name + // For QuotaLimit-based quota, this is QuotaLimit.name + // See: google.api.Quota + // Deprecated: Use quota_metrics to get per quota group limit exceeded + // status. + LimitExceeded []string `json:"limitExceeded,omitempty"` + + // QuotaConsumed: Map of quota group name to the actual number of tokens + // consumed. If the + // quota check was not successful, then this will not be populated due + // to no + // quota consumption. + // Deprecated: Use quota_metrics to get per quota group usage. + QuotaConsumed map[string]int64 `json:"quotaConsumed,omitempty"` + + // QuotaMetrics: Quota metrics to indicate the usage. Depending on the + // check request, one or + // more of the following metrics will be included: // - // Typically the service should report one operation per - // request. - // Putting multiple operations into a single request is allowed, but - // should - // be used only when multiple operations are natually available at the - // time - // of the report. + // 1. For rate quota, per quota group or per quota metric incremental + // usage + // will be specified using the following delta metric: + // "serviceruntime.googleapis.com/api/consumer/quota_used_count" // - // If multiple operations are in a single request, the total request - // size - // should be no larger than 1MB. See ReportResponse.report_errors - // for - // partial failure behavior. - Operations []*Operation `json:"operations,omitempty"` - - // ServiceConfigId: Specifies which version of service config should be - // used to process the - // request. + // 2. For allocation quota, per quota metric total usage will be + // specified + // using the following gauge metric: // - // If unspecified or no matching version can be found, the - // latest one will be used. - ServiceConfigId string `json:"serviceConfigId,omitempty"` + // "serviceruntime.googleapis.com/allocation/consumer/quota_used_count" + // + // + // 3. For both rate quota and allocation quota, the quota limit + // reached + // condition will be specified using the following boolean metric: + // "serviceruntime.googleapis.com/quota/exceeded" + QuotaMetrics []*MetricValueSet `json:"quotaMetrics,omitempty"` - // ForceSendFields is a list of field names (e.g. "Operations") to + // ForceSendFields is a list of field names (e.g. "LimitExceeded") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1066,51 +1477,301 @@ type ReportRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Operations") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "LimitExceeded") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ReportRequest) MarshalJSON() ([]byte, error) { - type noMethod ReportRequest +func (s *QuotaInfo) MarshalJSON() ([]byte, error) { + type noMethod QuotaInfo raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReportResponse: Response message for the Report method. -type ReportResponse struct { - // ReportErrors: Partial failures, one for each `Operation` in the - // request that failed - // processing. There are three possible combinations of the RPC - // status: +// QuotaOperation: Represents information regarding a quota operation. +type QuotaOperation struct { + // ConsumerId: Identity of the consumer for whom this quota operation is + // being performed. // - // 1. The combination of a successful RPC status and an empty - // `report_errors` - // list indicates a complete success where all `Operations` in the - // request are processed successfully. - // 2. The combination of a successful RPC status and a non-empty - // `report_errors` list indicates a partial success where some - // `Operations` in the request succeeded. Each - // `Operation` that failed processing has a corresponding item - // in this list. - // 3. A failed RPC status indicates a general non-deterministic - // failure. - // When this happens, it's impossible to know which of the - // 'Operations' in the request succeeded or failed. - ReportErrors []*ReportError `json:"reportErrors,omitempty"` + // This can be in one of the following formats: + // project:, + // project_number:, + // api_key:. + ConsumerId string `json:"consumerId,omitempty"` - // ServiceConfigId: The actual config id used to process the request. + // Labels: Labels describing the operation. + Labels map[string]string `json:"labels,omitempty"` + + // MethodName: Fully qualified name of the API method for which this + // quota operation is + // requested. This name is used for matching quota rules or metric rules + // and + // billing status rules defined in service configuration. This field is + // not + // required if the quota operation is performed on non-API + // resources. + // + // Example of an RPC method name: + // google.example.library.v1.LibraryService.CreateShelf + MethodName string `json:"methodName,omitempty"` + + // OperationId: Identity of the operation. This must be unique within + // the scope of the + // service that generated the operation. If the service calls + // AllocateQuota + // and ReleaseQuota on the same operation, the two calls should carry + // the + // same ID. + // + // UUID version 4 is recommended, though not required. In scenarios + // where an + // operation is computed from existing information and an idempotent id + // is + // desirable for deduplication purpose, UUID version 5 is recommended. + // See + // RFC 4122 for details. + OperationId string `json:"operationId,omitempty"` + + // QuotaMetrics: Represents information about this operation. Each + // MetricValueSet + // corresponds to a metric defined in the service configuration. + // The data type used in the MetricValueSet must agree with + // the data type specified in the metric definition. + // + // Within a single operation, it is not allowed to have more than + // one + // MetricValue instances that have the same metric names and + // identical + // label value combinations. If a request has such duplicated + // MetricValue + // instances, the entire request is rejected with + // an invalid argument error. + QuotaMetrics []*MetricValueSet `json:"quotaMetrics,omitempty"` + + // QuotaMode: Quota mode for this operation. + // + // Possible values: + // "UNSPECIFIED" + // "NORMAL" - For AllocateQuota request, allocates quota for the + // amount specified in + // the service configuration or specified using the quota metrics. If + // the + // amount is higher than the available quota, allocation error will + // be + // returned and no quota will be allocated. + // For ReleaseQuota request, this mode is supported only for precise + // quota + // limits. In this case, this operation releases quota for the + // amount + // specified in the service configuration or specified using the + // quota + // metrics. If the release can make available quota negative, release + // error + // will be returned and no quota will be released. + // "BEST_EFFORT" - For AllocateQuota request, this mode is supported + // only for imprecise + // quota limits. In this case, the operation allocates quota for the + // amount + // specified in the service configuration or specified using the + // quota + // metrics. If the amount is higher than the available quota, request + // does + // not fail but all available quota will be allocated. + // For ReleaseQuota request, this mode is supported for both precise + // quota + // limits and imprecise quota limits. In this case, this operation + // releases + // quota for the amount specified in the service configuration or + // specified + // using the quota metrics. If the release can make available + // quota + // negative, request does not fail but only the available quota will + // be + // released. After the ReleaseQuota request completes, the available + // quota + // will be 0, and never goes to negative. + // "CHECK_ONLY" - For AllocateQuota request, only checks if there is + // enough quota + // available and does not change the available quota. No lock is placed + // on + // the available quota either. Not supported for ReleaseQuota request. + QuotaMode string `json:"quotaMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QuotaOperation) MarshalJSON() ([]byte, error) { + type noMethod QuotaOperation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// QuotaProperties: Represents the properties needed for quota +// operations. +// +// Use the metric_value_sets field in Operation message to provide +// cost +// override with metric_name in +// /quota//cost +// format. Overrides for unmatched quota groups will be ignored. +// Costs are expected to be >= 0. Cost 0 will cause no quota check, +// but still traffic restrictions will be enforced. +type QuotaProperties struct { + // LimitByIds: LimitType IDs that should be used for checking quota. Key + // in this map + // should be a valid LimitType string, and the value is the ID to be + // used. For + // example, an entry will cause all user quota limits to use + // 123 + // as the user ID. See google/api/quota.proto for the definition of + // LimitType. + // CLIENT_PROJECT: Not supported. + // USER: Value of this entry will be used for enforcing user-level + // quota + // limits. If none specified, caller IP passed in the + // servicecontrol.googleapis.com/caller_ip label will be used + // instead. + // If the server cannot resolve a value for this LimitType, an + // error + // will be thrown. No validation will be performed on this + // ID. + // Deprecated: use servicecontrol.googleapis.com/user label to send user + // ID. + LimitByIds map[string]string `json:"limitByIds,omitempty"` + + // QuotaMode: Quota mode for this operation. + // + // Possible values: + // "ACQUIRE" - Decreases available quota by the cost specified for the + // operation. + // If cost is higher than available quota, operation fails and + // returns + // error. + // "ACQUIRE_BEST_EFFORT" - Decreases available quota by the cost + // specified for the operation. + // If cost is higher than available quota, operation does not fail + // and + // available quota goes down to zero but it returns error. + // "CHECK" - Does not change any available quota. Only checks if there + // is enough + // quota. + // No lock is placed on the checked tokens neither. + // "RELEASE" - Increases available quota by the operation cost + // specified for the + // operation. + QuotaMode string `json:"quotaMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LimitByIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LimitByIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QuotaProperties) MarshalJSON() ([]byte, error) { + type noMethod QuotaProperties + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReleaseQuotaRequest: Request message for the ReleaseQuota method. +type ReleaseQuotaRequest struct { + // ReleaseOperation: Operation that describes the quota release. + ReleaseOperation *QuotaOperation `json:"releaseOperation,omitempty"` + + // ServiceConfigId: Specifies which version of service configuration + // should be used to process + // the request. If unspecified or no matching version can be found, the + // latest + // one will be used. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ReleaseOperation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ReleaseOperation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ReleaseQuotaRequest) MarshalJSON() ([]byte, error) { + type noMethod ReleaseQuotaRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReleaseQuotaResponse: Response message for the ReleaseQuota method. +type ReleaseQuotaResponse struct { + // OperationId: The same operation_id value used in the + // ReleaseQuotaRequest. Used for + // logging and diagnostics purposes. + OperationId string `json:"operationId,omitempty"` + + // QuotaMetrics: Quota metrics to indicate the result of release. + // Depending on the + // request, one or more of the following metrics will be included: + // + // 1. For rate quota, per quota group or per quota metric released + // amount + // will be specified using the following delta metric: + // "serviceruntime.googleapis.com/api/consumer/quota_refund_count" + // + // 2. For allocation quota, per quota metric total usage will be + // specified + // using the following gauge metric: + // + // "serviceruntime.googleapis.com/allocation/consumer/quota_used_count" + QuotaMetrics []*MetricValueSet `json:"quotaMetrics,omitempty"` + + // ReleaseErrors: Indicates the decision of the release. + ReleaseErrors []*QuotaError `json:"releaseErrors,omitempty"` + + // ServiceConfigId: ID of the actual config used to process the request. ServiceConfigId string `json:"serviceConfigId,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ReportErrors") to + // ForceSendFields is a list of field names (e.g. "OperationId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1118,7 +1779,7 @@ type ReportResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ReportErrors") to include + // NullFields is a list of field names (e.g. "OperationId") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -1127,32 +1788,102 @@ type ReportResponse struct { NullFields []string `json:"-"` } -func (s *ReportResponse) MarshalJSON() ([]byte, error) { - type noMethod ReportResponse +func (s *ReleaseQuotaResponse) MarshalJSON() ([]byte, error) { + type noMethod ReleaseQuotaResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RequestMetadata: Metadata about the request. -type RequestMetadata struct { - // CallerIp: The IP address of the caller. - CallerIp string `json:"callerIp,omitempty"` +// ReportError: Represents the processing error of one `Operation` in +// the request. +type ReportError struct { + // OperationId: The Operation.operation_id value from the request. + OperationId string `json:"operationId,omitempty"` - // CallerSuppliedUserAgent: The user agent of the caller. - // This information is not authenticated and should be treated - // accordingly. - // For example: + // Status: Details of the error when processing the `Operation`. + Status *Status `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OperationId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OperationId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReportError) MarshalJSON() ([]byte, error) { + type noMethod ReportError + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ReportInfo struct { + // OperationId: The Operation.operation_id value from the request. + OperationId string `json:"operationId,omitempty"` + + // QuotaInfo: Quota usage info when processing the `Operation`. + QuotaInfo *QuotaInfo `json:"quotaInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OperationId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OperationId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReportInfo) MarshalJSON() ([]byte, error) { + type noMethod ReportInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReportRequest: Request message for the Report method. +type ReportRequest struct { + // Operations: Operations to be reported. // - // + `google-api-python-client/1.4.0`: - // The request was made by the Google API client for Python. - // + `Cloud SDK Command Line Tool apitools-client/1.0 gcloud/0.9.62`: - // The request was made by the Google Cloud SDK CLI (gcloud). - // + `AppEngine-Google; (+http://code.google.com/appengine; appid: - // s~my-project`: - // The request was made from the `my-project` App Engine app. - CallerSuppliedUserAgent string `json:"callerSuppliedUserAgent,omitempty"` + // Typically the service should report one operation per + // request. + // Putting multiple operations into a single request is allowed, but + // should + // be used only when multiple operations are natually available at the + // time + // of the report. + // + // If multiple operations are in a single request, the total request + // size + // should be no larger than 1MB. See ReportResponse.report_errors + // for + // partial failure behavior. + Operations []*Operation `json:"operations,omitempty"` - // ForceSendFields is a list of field names (e.g. "CallerIp") to + // ServiceConfigId: Specifies which version of service config should be + // used to process the + // request. + // + // If unspecified or no matching version can be found, the + // latest one will be used. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Operations") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1160,7 +1891,7 @@ type RequestMetadata struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CallerIp") to include in + // NullFields is a list of field names (e.g. "Operations") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -1169,35 +1900,228 @@ type RequestMetadata struct { NullFields []string `json:"-"` } -func (s *RequestMetadata) MarshalJSON() ([]byte, error) { - type noMethod RequestMetadata +func (s *ReportRequest) MarshalJSON() ([]byte, error) { + type noMethod ReportRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Status: The `Status` type defines a logical error model that is -// suitable for different -// programming environments, including REST APIs and RPC APIs. It is -// used by -// [gRPC](https://github.com/grpc). The error model is designed to -// be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, -// and error details. The error code should be an enum value -// of -// google.rpc.Code, but it may accept additional error codes if needed. -// The -// error message should be a developer-facing English message that -// helps -// developers *understand* and *resolve* the error. If a localized -// user-facing -// error message is needed, put the localized message in the error +// ReportResponse: Response message for the Report method. +type ReportResponse struct { + // ReportErrors: Partial failures, one for each `Operation` in the + // request that failed + // processing. There are three possible combinations of the RPC + // status: + // + // 1. The combination of a successful RPC status and an empty + // `report_errors` + // list indicates a complete success where all `Operations` in the + // request are processed successfully. + // 2. The combination of a successful RPC status and a non-empty + // `report_errors` list indicates a partial success where some + // `Operations` in the request succeeded. Each + // `Operation` that failed processing has a corresponding item + // in this list. + // 3. A failed RPC status indicates a general non-deterministic + // failure. + // When this happens, it's impossible to know which of the + // 'Operations' in the request succeeded or failed. + ReportErrors []*ReportError `json:"reportErrors,omitempty"` + + // ReportInfos: Quota usage for each quota release `Operation` + // request. + // + // Fully or partially failed quota release request may or may not be + // present + // in `report_quota_info`. For example, a failed quota release request + // will + // have the current quota usage info when precise quota library returns + // the + // info. A deadline exceeded quota request will not have quota usage + // info. + // + // If there is no quota release request, report_quota_info will be + // empty. + // + ReportInfos []*ReportInfo `json:"reportInfos,omitempty"` + + // ServiceConfigId: The actual config id used to process the request. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ReportErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ReportErrors") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReportResponse) MarshalJSON() ([]byte, error) { + type noMethod ReportResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RequestMetadata: Metadata about the request. +type RequestMetadata struct { + // CallerIp: The IP address of the caller. + CallerIp string `json:"callerIp,omitempty"` + + // CallerSuppliedUserAgent: The user agent of the caller. + // This information is not authenticated and should be treated + // accordingly. + // For example: + // + // + `google-api-python-client/1.4.0`: + // The request was made by the Google API client for Python. + // + `Cloud SDK Command Line Tool apitools-client/1.0 gcloud/0.9.62`: + // The request was made by the Google Cloud SDK CLI (gcloud). + // + `AppEngine-Google; (+http://code.google.com/appengine; appid: + // s~my-project`: + // The request was made from the `my-project` App Engine app. + CallerSuppliedUserAgent string `json:"callerSuppliedUserAgent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CallerIp") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CallerIp") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RequestMetadata) MarshalJSON() ([]byte, error) { + type noMethod RequestMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StartReconciliationRequest struct { + // ReconciliationOperation: Operation that describes the quota + // reconciliation. + ReconciliationOperation *QuotaOperation `json:"reconciliationOperation,omitempty"` + + // ServiceConfigId: Specifies which version of service configuration + // should be used to process + // the request. If unspecified or no matching version can be found, the + // latest + // one will be used. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ReconciliationOperation") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ReconciliationOperation") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *StartReconciliationRequest) MarshalJSON() ([]byte, error) { + type noMethod StartReconciliationRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StartReconciliationResponse struct { + // OperationId: The same operation_id value used in the + // StartReconciliationRequest. Used + // for logging and diagnostics purposes. + OperationId string `json:"operationId,omitempty"` + + // QuotaMetrics: Metric values as tracked by One Platform before the + // start of + // reconciliation. + QuotaMetrics []*MetricValueSet `json:"quotaMetrics,omitempty"` + + // ReconciliationErrors: Indicates the decision of the reconciliation + // start. + ReconciliationErrors []*QuotaError `json:"reconciliationErrors,omitempty"` + + // ServiceConfigId: ID of the actual config used to process the request. + ServiceConfigId string `json:"serviceConfigId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "OperationId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OperationId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StartReconciliationResponse) MarshalJSON() ([]byte, error) { + type noMethod StartReconciliationResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is +// suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of +// google.rpc.Code, but it may accept additional error codes if needed. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error // details or // localize it in the client. The optional error details may contain // arbitrary @@ -1271,69 +2195,500 @@ type Status struct { // google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type noMethod Status + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "servicecontrol.services.allocateQuota": + +type ServicesAllocateQuotaCall struct { + s *Service + serviceName string + allocatequotarequest *AllocateQuotaRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AllocateQuota: Attempts to allocate quota for the specified consumer. +// It should be called +// before the operation is executed. +// +// This method requires the +// `servicemanagement.services.quota` +// permission on the specified service. For more information, +// see +// [Google Cloud IAM](https://cloud.google.com/iam). +func (r *ServicesService) AllocateQuota(serviceName string, allocatequotarequest *AllocateQuotaRequest) *ServicesAllocateQuotaCall { + c := &ServicesAllocateQuotaCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.serviceName = serviceName + c.allocatequotarequest = allocatequotarequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesAllocateQuotaCall) Fields(s ...googleapi.Field) *ServicesAllocateQuotaCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesAllocateQuotaCall) Context(ctx context.Context) *ServicesAllocateQuotaCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesAllocateQuotaCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesAllocateQuotaCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.allocatequotarequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:allocateQuota") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "serviceName": c.serviceName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicecontrol.services.allocateQuota" call. +// Exactly one of *AllocateQuotaResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AllocateQuotaResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ServicesAllocateQuotaCall) Do(opts ...googleapi.CallOption) (*AllocateQuotaResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AllocateQuotaResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Attempts to allocate quota for the specified consumer. It should be called\nbefore the operation is executed.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + // "flatPath": "v1/services/{serviceName}:allocateQuota", + // "httpMethod": "POST", + // "id": "servicecontrol.services.allocateQuota", + // "parameterOrder": [ + // "serviceName" + // ], + // "parameters": { + // "serviceName": { + // "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/services/{serviceName}:allocateQuota", + // "request": { + // "$ref": "AllocateQuotaRequest" + // }, + // "response": { + // "$ref": "AllocateQuotaResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/servicecontrol" + // ] + // } + +} + +// method id "servicecontrol.services.check": + +type ServicesCheckCall struct { + s *Service + serviceName string + checkrequest *CheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Check: Checks an operation with Google Service Control to decide +// whether +// the given operation should proceed. It should be called before +// the +// operation is executed. +// +// If feasible, the client should cache the check results and reuse them +// for +// 60 seconds. In case of server errors, the client can rely on the +// cached +// results for longer time. +// +// NOTE: the `CheckRequest` has the size limit of 64KB. +// +// This method requires the `servicemanagement.services.check` +// permission +// on the specified service. For more information, see +// [Google Cloud IAM](https://cloud.google.com/iam). +func (r *ServicesService) Check(serviceName string, checkrequest *CheckRequest) *ServicesCheckCall { + c := &ServicesCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.serviceName = serviceName + c.checkrequest = checkrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesCheckCall) Fields(s ...googleapi.Field) *ServicesCheckCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesCheckCall) Context(ctx context.Context) *ServicesCheckCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesCheckCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesCheckCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.checkrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:check") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "serviceName": c.serviceName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicecontrol.services.check" call. +// Exactly one of *CheckResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CheckResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ServicesCheckCall) Do(opts ...googleapi.CallOption) (*CheckResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &CheckResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Checks an operation with Google Service Control to decide whether\nthe given operation should proceed. It should be called before the\noperation is executed.\n\nIf feasible, the client should cache the check results and reuse them for\n60 seconds. In case of server errors, the client can rely on the cached\nresults for longer time.\n\nNOTE: the `CheckRequest` has the size limit of 64KB.\n\nThis method requires the `servicemanagement.services.check` permission\non the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + // "flatPath": "v1/services/{serviceName}:check", + // "httpMethod": "POST", + // "id": "servicecontrol.services.check", + // "parameterOrder": [ + // "serviceName" + // ], + // "parameters": { + // "serviceName": { + // "description": "The service name as specified in its service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/services/{serviceName}:check", + // "request": { + // "$ref": "CheckRequest" + // }, + // "response": { + // "$ref": "CheckResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/servicecontrol" + // ] + // } + +} + +// method id "servicecontrol.services.endReconciliation": + +type ServicesEndReconciliationCall struct { + s *Service + serviceName string + endreconciliationrequest *EndReconciliationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// EndReconciliation: Signals the quota controller that service ends the +// ongoing usage +// reconciliation. +// +// This method requires the +// `servicemanagement.services.quota` +// permission on the specified service. For more information, +// see +// [Google Cloud IAM](https://cloud.google.com/iam). +func (r *ServicesService) EndReconciliation(serviceName string, endreconciliationrequest *EndReconciliationRequest) *ServicesEndReconciliationCall { + c := &ServicesEndReconciliationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.serviceName = serviceName + c.endreconciliationrequest = endreconciliationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesEndReconciliationCall) Fields(s ...googleapi.Field) *ServicesEndReconciliationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesEndReconciliationCall) Context(ctx context.Context) *ServicesEndReconciliationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesEndReconciliationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` +func (c *ServicesEndReconciliationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.endreconciliationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:endReconciliation") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "serviceName": c.serviceName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -func (s *Status) MarshalJSON() ([]byte, error) { - type noMethod Status - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +// Do executes the "servicecontrol.services.endReconciliation" call. +// Exactly one of *EndReconciliationResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *EndReconciliationResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ServicesEndReconciliationCall) Do(opts ...googleapi.CallOption) (*EndReconciliationResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &EndReconciliationResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Signals the quota controller that service ends the ongoing usage\nreconciliation.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + // "flatPath": "v1/services/{serviceName}:endReconciliation", + // "httpMethod": "POST", + // "id": "servicecontrol.services.endReconciliation", + // "parameterOrder": [ + // "serviceName" + // ], + // "parameters": { + // "serviceName": { + // "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/services/{serviceName}:endReconciliation", + // "request": { + // "$ref": "EndReconciliationRequest" + // }, + // "response": { + // "$ref": "EndReconciliationResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/servicecontrol" + // ] + // } + } -// method id "servicecontrol.services.check": +// method id "servicecontrol.services.releaseQuota": -type ServicesCheckCall struct { - s *Service - serviceName string - checkrequest *CheckRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ServicesReleaseQuotaCall struct { + s *Service + serviceName string + releasequotarequest *ReleaseQuotaRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Check: Checks an operation with Google Service Control to decide -// whether -// the given operation should proceed. It should be called before -// the -// operation is executed. -// -// If feasible, the client should cache the check results and reuse them -// for -// 60 seconds. In case of server errors, the client can rely on the -// cached -// results for longer time. +// ReleaseQuota: Releases previously allocated quota done through +// AllocateQuota method. // -// NOTE: the `CheckRequest` has the size limit of 64KB. -// -// This method requires the `servicemanagement.services.check` -// permission -// on the specified service. For more information, see +// This method requires the +// `servicemanagement.services.quota` +// permission on the specified service. For more information, +// see // [Google Cloud IAM](https://cloud.google.com/iam). -func (r *ServicesService) Check(serviceName string, checkrequest *CheckRequest) *ServicesCheckCall { - c := &ServicesCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ServicesService) ReleaseQuota(serviceName string, releasequotarequest *ReleaseQuotaRequest) *ServicesReleaseQuotaCall { + c := &ServicesReleaseQuotaCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName - c.checkrequest = checkrequest + c.releasequotarequest = releasequotarequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ServicesCheckCall) Fields(s ...googleapi.Field) *ServicesCheckCall { +func (c *ServicesReleaseQuotaCall) Fields(s ...googleapi.Field) *ServicesReleaseQuotaCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -1341,34 +2696,35 @@ func (c *ServicesCheckCall) Fields(s ...googleapi.Field) *ServicesCheckCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ServicesCheckCall) Context(ctx context.Context) *ServicesCheckCall { +func (c *ServicesReleaseQuotaCall) Context(ctx context.Context) *ServicesReleaseQuotaCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ServicesCheckCall) Header() http.Header { +func (c *ServicesReleaseQuotaCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ServicesCheckCall) doRequest(alt string) (*http.Response, error) { +func (c *ServicesReleaseQuotaCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.checkrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.releasequotarequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:check") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:releaseQuota") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -1378,14 +2734,14 @@ func (c *ServicesCheckCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "servicecontrol.services.check" call. -// Exactly one of *CheckResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CheckResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "servicecontrol.services.releaseQuota" call. +// Exactly one of *ReleaseQuotaResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ReleaseQuotaResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ServicesCheckCall) Do(opts ...googleapi.CallOption) (*CheckResponse, error) { +func (c *ServicesReleaseQuotaCall) Do(opts ...googleapi.CallOption) (*ReleaseQuotaResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1404,7 +2760,7 @@ func (c *ServicesCheckCall) Do(opts ...googleapi.CallOption) (*CheckResponse, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CheckResponse{ + ret := &ReleaseQuotaResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1416,27 +2772,27 @@ func (c *ServicesCheckCall) Do(opts ...googleapi.CallOption) (*CheckResponse, er } return ret, nil // { - // "description": "Checks an operation with Google Service Control to decide whether\nthe given operation should proceed. It should be called before the\noperation is executed.\n\nIf feasible, the client should cache the check results and reuse them for\n60 seconds. In case of server errors, the client can rely on the cached\nresults for longer time.\n\nNOTE: the `CheckRequest` has the size limit of 64KB.\n\nThis method requires the `servicemanagement.services.check` permission\non the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", - // "flatPath": "v1/services/{serviceName}:check", + // "description": "Releases previously allocated quota done through AllocateQuota method.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + // "flatPath": "v1/services/{serviceName}:releaseQuota", // "httpMethod": "POST", - // "id": "servicecontrol.services.check", + // "id": "servicecontrol.services.releaseQuota", // "parameterOrder": [ // "serviceName" // ], // "parameters": { // "serviceName": { - // "description": "The service name as specified in its service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + // "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "v1/services/{serviceName}:check", + // "path": "v1/services/{serviceName}:releaseQuota", // "request": { - // "$ref": "CheckRequest" + // "$ref": "ReleaseQuotaRequest" // }, // "response": { - // "$ref": "CheckResponse" + // "$ref": "ReleaseQuotaResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -1515,6 +2871,7 @@ func (c *ServicesReportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reportrequest) if err != nil { @@ -1599,3 +2956,180 @@ func (c *ServicesReportCall) Do(opts ...googleapi.CallOption) (*ReportResponse, // } } + +// method id "servicecontrol.services.startReconciliation": + +type ServicesStartReconciliationCall struct { + s *Service + serviceName string + startreconciliationrequest *StartReconciliationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// StartReconciliation: Unlike rate quota, allocation quota does not get +// refilled periodically. +// So, it is possible that the quota usage as seen by the service +// differs from +// what the One Platform considers the usage is. This is expected to +// happen +// only rarely, but over time this can accumulate. Services can +// invoke +// StartReconciliation and EndReconciliation to correct this usage +// drift, as +// described below: +// 1. Service sends StartReconciliation with a timestamp in future for +// each +// metric that needs to be reconciled. The timestamp being in future +// allows +// to account for in-flight AllocateQuota and ReleaseQuota requests +// for the +// same metric. +// 2. One Platform records this timestamp and starts tracking +// subsequent +// AllocateQuota and ReleaseQuota requests until EndReconciliation +// is +// called. +// 3. At or after the time specified in the StartReconciliation, +// service +// sends EndReconciliation with the usage that needs to be reconciled +// to. +// 4. One Platform adjusts its own record of usage for that metric to +// the +// value specified in EndReconciliation by taking in to account any +// allocation or release between StartReconciliation and +// EndReconciliation. +// +// Signals the quota controller that the service wants to perform a +// usage +// reconciliation as specified in the request. +// +// This method requires the +// `servicemanagement.services.quota` +// permission on the specified service. For more information, +// see +// [Google Cloud IAM](https://cloud.google.com/iam). +func (r *ServicesService) StartReconciliation(serviceName string, startreconciliationrequest *StartReconciliationRequest) *ServicesStartReconciliationCall { + c := &ServicesStartReconciliationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.serviceName = serviceName + c.startreconciliationrequest = startreconciliationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesStartReconciliationCall) Fields(s ...googleapi.Field) *ServicesStartReconciliationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesStartReconciliationCall) Context(ctx context.Context) *ServicesStartReconciliationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesStartReconciliationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesStartReconciliationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.startreconciliationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:startReconciliation") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "serviceName": c.serviceName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicecontrol.services.startReconciliation" call. +// Exactly one of *StartReconciliationResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *StartReconciliationResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ServicesStartReconciliationCall) Do(opts ...googleapi.CallOption) (*StartReconciliationResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &StartReconciliationResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Unlike rate quota, allocation quota does not get refilled periodically.\nSo, it is possible that the quota usage as seen by the service differs from\nwhat the One Platform considers the usage is. This is expected to happen\nonly rarely, but over time this can accumulate. Services can invoke\nStartReconciliation and EndReconciliation to correct this usage drift, as\ndescribed below:\n1. Service sends StartReconciliation with a timestamp in future for each\n metric that needs to be reconciled. The timestamp being in future allows\n to account for in-flight AllocateQuota and ReleaseQuota requests for the\n same metric.\n2. One Platform records this timestamp and starts tracking subsequent\n AllocateQuota and ReleaseQuota requests until EndReconciliation is\n called.\n3. At or after the time specified in the StartReconciliation, service\n sends EndReconciliation with the usage that needs to be reconciled to.\n4. One Platform adjusts its own record of usage for that metric to the\n value specified in EndReconciliation by taking in to account any\n allocation or release between StartReconciliation and EndReconciliation.\n\nSignals the quota controller that the service wants to perform a usage\nreconciliation as specified in the request.\n\nThis method requires the `servicemanagement.services.quota`\npermission on the specified service. For more information, see\n[Google Cloud IAM](https://cloud.google.com/iam).", + // "flatPath": "v1/services/{serviceName}:startReconciliation", + // "httpMethod": "POST", + // "id": "servicecontrol.services.startReconciliation", + // "parameterOrder": [ + // "serviceName" + // ], + // "parameters": { + // "serviceName": { + // "description": "Name of the service as specified in the service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee google.api.Service for the definition of a service name.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/services/{serviceName}:startReconciliation", + // "request": { + // "$ref": "StartReconciliationRequest" + // }, + // "response": { + // "$ref": "StartReconciliationResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/servicecontrol" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json index 5d3778ce7..6ce704dfc 100644 --- a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json +++ b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json @@ -1,872 +1,309 @@ { - "id": "servicemanagement:v1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "schemas": { + "UndeleteServiceResponse": { + "description": "Response message for UndeleteService method.", + "type": "object", + "properties": { + "service": { + "$ref": "ManagedService", + "description": "Revived service resource." + } + }, + "id": "UndeleteServiceResponse" + }, + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "type": "object", + "properties": { + "iamOwned": { + "type": "boolean" }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" + "rules": { + "description": "If more than one rule is specified, the rules are applied in the following\nmanner:\n- All matching LOG rules are always applied.\n- If any DENY/DENY_WITH_LOG rule matches, permission is denied.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is\n granted.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if no rule applies, permission is denied.", + "type": "array", + "items": { + "$ref": "Rule" + } }, - "https://www.googleapis.com/auth/service.management": { - "description": "Manage your Google API service configuration" + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" }, - "https://www.googleapis.com/auth/service.management.readonly": { - "description": "View your Google API service configuration" + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", + "type": "array", + "items": { + "$ref": "AuditConfig" + } + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } + }, + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" } - } - } - }, - "description": "Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.", - "protocol": "rest", - "title": "Google Service Management API", - "resources": { - "operations": { - "methods": { - "get": { - "id": "servicemanagement.operations.get", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "name" + }, + "id": "Policy" + }, + "Api": { + "description": "Api is a light-weight descriptor for a protocol buffer service.", + "type": "object", + "properties": { + "mixins": { + "description": "Included APIs. See Mixin.", + "type": "array", + "items": { + "$ref": "Mixin" + } + }, + "options": { + "description": "Any metadata attached to the API.", + "type": "array", + "items": { + "$ref": "Option" + } + }, + "methods": { + "description": "The methods of this api, in unspecified order.", + "type": "array", + "items": { + "$ref": "Method" + } + }, + "name": { + "description": "The fully qualified name of this api, including package name\nfollowed by the api's simple name.", + "type": "string" + }, + "syntax": { + "enumDescriptions": [ + "Syntax `proto2`.", + "Syntax `proto3`." ], - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "flatPath": "v1/operations/{operationsId}", - "httpMethod": "GET", - "parameters": { - "name": { - "description": "The name of the operation resource.", - "required": true, - "pattern": "^operations/.+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax of the service.", + "type": "string" + }, + "sourceContext": { + "$ref": "SourceContext", + "description": "Source context for the protocol buffer service represented by this\nmessage." + }, + "version": { + "description": "A version string for this api. If specified, must have the form\n`major-version.minor-version`, as in `1.10`. If the minor version\nis omitted, it defaults to zero. If the entire version field is\nempty, the major version is derived from the package name, as\noutlined below. If the field is not empty, the version in the\npackage name will be verified to be consistent with what is\nprovided here.\n\nThe versioning schema uses [semantic\nversioning](http://semver.org) where the major version number\nindicates a breaking change and the minor version an additive,\nnon-breaking change. Both version numbers are signals to users\nwhat to expect from different versions, and should be carefully\nchosen based on the product plan.\n\nThe major version is also reflected in the package name of the\nAPI, which must end in `v\u003cmajor-version\u003e`, as in\n`google.feature.v1`. For major versions 0 and 1, the suffix can\nbe omitted. Zero major versions must only be used for\nexperimental, none-GA apis.\n", + "type": "string" } - } + }, + "id": "Api" }, - "services": { - "resources": { - "rollouts": { - "methods": { - "get": { - "id": "servicemanagement.services.rollouts.get", - "response": { - "$ref": "Rollout" - }, - "parameterOrder": [ - "serviceName", - "rolloutId" - ], - "description": "Gets a service configuration rollout.", - "flatPath": "v1/services/{serviceName}/rollouts/{rolloutId}", - "httpMethod": "GET", - "parameters": { - "rolloutId": { - "description": "The id of the rollout resource.", - "required": true, - "location": "path", - "type": "string" - }, - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/rollouts/{rolloutId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/service.management", - "https://www.googleapis.com/auth/service.management.readonly" - ] - }, - "create": { - "id": "servicemanagement.services.rollouts.create", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOperation\u003cresponse: Rollout\u003e", - "request": { - "$ref": "Rollout" - }, - "flatPath": "v1/services/{serviceName}/rollouts", - "httpMethod": "POST", - "parameters": { - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/rollouts", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "list": { - "id": "servicemanagement.services.rollouts.list", - "response": { - "$ref": "ListServiceRolloutsResponse" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest.", - "flatPath": "v1/services/{serviceName}/rollouts", - "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "The max number of items to include in the response list.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - }, - "pageToken": { - "description": "The token of the page to retrieve.", - "location": "query", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/rollouts", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/service.management", - "https://www.googleapis.com/auth/service.management.readonly" - ] - } + "DataAccessOptions": { + "description": "Write a Data Access (Gin) log", + "type": "object", + "properties": {}, + "id": "DataAccessOptions" + }, + "Authentication": { + "description": "`Authentication` defines the authentication configuration for an API.\n\nExample for an API targeted for external use:\n\n name: calendar.googleapis.com\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "type": "object", + "properties": { + "providers": { + "description": "Defines a set of authentication providers that a service supports.", + "type": "array", + "items": { + "$ref": "AuthProvider" } }, - "configs": { - "methods": { - "submit": { - "id": "servicemanagement.services.configs.submit", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e", - "request": { - "$ref": "SubmitConfigSourceRequest" - }, - "flatPath": "v1/services/{serviceName}/configs:submit", - "httpMethod": "POST", - "parameters": { - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/configs:submit", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "get": { - "id": "servicemanagement.services.configs.get", - "response": { - "$ref": "Service" - }, - "parameterOrder": [ - "serviceName", - "configId" - ], - "description": "Gets a service configuration (version) for a managed service.", - "flatPath": "v1/services/{serviceName}/configs/{configId}", - "httpMethod": "GET", - "parameters": { - "configId": { - "description": "The id of the service configuration resource.", - "required": true, - "location": "path", - "type": "string" - }, - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/configs/{configId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/service.management", - "https://www.googleapis.com/auth/service.management.readonly" - ] - }, - "create": { - "id": "servicemanagement.services.configs.create", - "response": { - "$ref": "Service" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.", - "request": { - "$ref": "Service" - }, - "flatPath": "v1/services/{serviceName}/configs", - "httpMethod": "POST", - "parameters": { - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/configs", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "list": { - "id": "servicemanagement.services.configs.list", - "response": { - "$ref": "ListServiceConfigsResponse" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest.", - "flatPath": "v1/services/{serviceName}/configs", - "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "The max number of items to include in the response list.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - }, - "pageToken": { - "description": "The token of the page to retrieve.", - "location": "query", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/configs", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/service.management", - "https://www.googleapis.com/auth/service.management.readonly" - ] - } + "rules": { + "description": "A list of authentication rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "AuthenticationRule" } } }, - "methods": { - "getIamPolicy": { - "id": "servicemanagement.services.getIamPolicy", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "request": { - "$ref": "GetIamPolicyRequest" - }, - "flatPath": "v1/services/{servicesId}:getIamPolicy", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^services/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] + "id": "Authentication" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "type": "object", + "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" }, - "disable": { - "id": "servicemanagement.services.disable", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Disable a managed service for a project.\n\nOperation\u003cresponse: DisableServiceResponse\u003e", - "request": { - "$ref": "DisableServiceRequest" - }, - "flatPath": "v1/services/{serviceName}:disable", - "httpMethod": "POST", - "parameters": { - "serviceName": { - "description": "Name of the service to disable. Specifying an unknown service name\nwill cause the request to fail.", - "required": true, - "location": "path", - "type": "string" - } + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" }, - "path": "v1/services/{serviceName}:disable", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object" }, - "generateConfigReport": { - "id": "servicemanagement.services.generateConfigReport", - "response": { - "$ref": "GenerateConfigReportResponse" - }, - "parameterOrder": [], - "description": "Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration.", - "request": { - "$ref": "GenerateConfigReportRequest" - }, - "flatPath": "v1/services:generateConfigReport", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/services:generateConfigReport", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "type": "string" }, - "getConfig": { - "id": "servicemanagement.services.getConfig", - "response": { - "$ref": "Service" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Gets a service configuration (version) for a managed service.", - "flatPath": "v1/services/{serviceName}/config", - "httpMethod": "GET", - "parameters": { - "configId": { - "description": "The id of the service configuration resource.", - "location": "query", - "type": "string" - }, - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}/config", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/service.management", - "https://www.googleapis.com/auth/service.management.readonly" - ] + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." }, - "undelete": { - "id": "servicemanagement.services.undelete", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation\u003cresponse: UndeleteServiceResponse\u003e", - "flatPath": "v1/services/{serviceName}:undelete", - "httpMethod": "POST", - "parameters": { - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}:undelete", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "get": { - "id": "servicemanagement.services.get", - "response": { - "$ref": "ManagedService" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Gets a managed service. Authentication is required unless the service is\npublic.", - "flatPath": "v1/services/{serviceName}", - "httpMethod": "GET", - "parameters": { - "serviceName": { - "description": "The name of the service. See the `ServiceManager` overview for naming\nrequirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/service.management", - "https://www.googleapis.com/auth/service.management.readonly" - ] - }, - "list": { - "id": "servicemanagement.services.list", - "response": { - "$ref": "ListServicesResponse" - }, - "parameterOrder": [], - "description": "Lists managed services.\n\nIf called without any authentication, it returns only the public services.\nIf called with authentication, it returns all services that the caller has\n\"servicemanagement.services.get\" permission for.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\".", - "flatPath": "v1/services", - "httpMethod": "GET", - "parameters": { - "pageSize": { - "description": "Requested size of the next page of data.", - "location": "query", - "type": "integer", - "format": "int32" - }, - "producerProjectId": { - "description": "Include services produced by the specified project.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", - "location": "query", - "type": "string" - }, - "consumerId": { - "description": "Include services consumed by the specified consumer.\n\nThe Google Service Management implementation accepts the following\nforms:\n- project:\u003cproject_id\u003e", - "location": "query", - "type": "string" - } - }, - "path": "v1/services", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/service.management", - "https://www.googleapis.com/auth/service.management.readonly" - ] - }, - "create": { - "id": "servicemanagement.services.create", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [], - "description": "Creates a new managed service.\nPlease note one producer project can own no more than 20 services.\n\nOperation\u003cresponse: ManagedService\u003e", - "request": { - "$ref": "ManagedService" - }, - "flatPath": "v1/services", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/services", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "enable": { - "id": "servicemanagement.services.enable", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Enable a managed service for a project with default setting.\n\nOperation\u003cresponse: EnableServiceResponse\u003e\n\ngoogle.rpc.Status errors may contain a\ngoogle.rpc.PreconditionFailure error detail.", - "request": { - "$ref": "EnableServiceRequest" - }, - "flatPath": "v1/services/{serviceName}:enable", - "httpMethod": "POST", - "parameters": { - "serviceName": { - "description": "Name of the service to enable. Specifying an unknown service name will\ncause the request to fail.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}:enable", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "setIamPolicy": { - "id": "servicemanagement.services.setIamPolicy", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "flatPath": "v1/services/{servicesId}:setIamPolicy", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^services/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+resource}:setIamPolicy", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "delete": { - "id": "servicemanagement.services.delete", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "serviceName" - ], - "description": "Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", - "flatPath": "v1/services/{serviceName}", - "httpMethod": "DELETE", - "parameters": { - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/services/{serviceName}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, - "testIamPermissions": { - "id": "servicemanagement.services.testIamPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "flatPath": "v1/services/{servicesId}:testIamPermissions", - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", - "required": true, - "pattern": "^services/[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/{+resource}:testIamPermissions", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] + "metadata": { + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } } - } - } - }, - "schemas": { - "Api": { - "description": "Api is a light-weight descriptor for a protocol buffer service.", + }, + "id": "Operation" + }, + "Page": { + "description": "Represents a documentation page. A page can contain subpages to represent\nnested documentation set structure.", "type": "object", "properties": { - "methods": { - "description": "The methods of this api, in unspecified order.", - "type": "array", - "items": { - "$ref": "Method" - } - }, - "options": { - "description": "Any metadata attached to the API.", + "subpages": { + "description": "Subpages of this page. The order of subpages specified here will be\nhonored in the generated docset.", "type": "array", "items": { - "$ref": "Option" + "$ref": "Page" } }, - "sourceContext": { - "description": "Source context for the protocol buffer service represented by this\nmessage.", - "$ref": "SourceContext" - }, "name": { - "description": "The fully qualified name of this api, including package name\nfollowed by the api's simple name.", - "type": "string" - }, - "syntax": { - "description": "The source syntax of the service.", - "enum": [ - "SYNTAX_PROTO2", - "SYNTAX_PROTO3" - ], - "enumDescriptions": [ - "Syntax `proto2`.", - "Syntax `proto3`." - ], + "description": "The name of the page. It will be used as an identity of the page to\ngenerate URI of the page, text of the link to this page in navigation,\netc. The full page name (start from the root page name to this page\nconcatenated with `.`) can be used as reference to the page in your\ndocumentation. For example:\n\u003cpre\u003e\u003ccode\u003epages:\n- name: Tutorial\n content: (== include tutorial.md ==)\n subpages:\n - name: Java\n content: (== include tutorial_java.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nYou can reference `Java` page using Markdown reference link syntax:\n`Java`.", "type": "string" }, - "version": { - "description": "A version string for this api. If specified, must have the form\n`major-version.minor-version`, as in `1.10`. If the minor version\nis omitted, it defaults to zero. If the entire version field is\nempty, the major version is derived from the package name, as\noutlined below. If the field is not empty, the version in the\npackage name will be verified to be consistent with what is\nprovided here.\n\nThe versioning schema uses [semantic\nversioning](http://semver.org) where the major version number\nindicates a breaking change and the minor version an additive,\nnon-breaking change. Both version numbers are signals to users\nwhat to expect from different versions, and should be carefully\nchosen based on the product plan.\n\nThe major version is also reflected in the package name of the\nAPI, which must end in `v\u003cmajor-version\u003e`, as in\n`google.feature.v1`. For major versions 0 and 1, the suffix can\nbe omitted. Zero major versions must only be used for\nexperimental, none-GA apis.\n", - "type": "string" - }, - "mixins": { - "description": "Included APIs. See Mixin.", - "type": "array", - "items": { - "$ref": "Mixin" - } - } - }, - "id": "Api" - }, - "SystemParameterRule": { - "description": "Define a system parameter rule mapping system parameter definitions to\nmethods.", - "type": "object", - "properties": { - "parameters": { - "description": "Define parameters. Multiple names may be defined for a parameter.\nFor a given method call, only one of them should be used. If multiple\nnames are used the behavior is implementation-dependent.\nIf none of the specified names are present the behavior is\nparameter-dependent.", - "type": "array", - "items": { - "$ref": "SystemParameter" - } - }, - "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "content": { + "description": "The Markdown content of the page. You can use \u003ccode\u003e(== include {path} ==)\u003c/code\u003e\nto include content from a Markdown file.", "type": "string" } }, - "id": "SystemParameterRule" + "id": "Page" }, - "Diagnostic": { - "description": "Represents a diagnostic message (error or warning)", + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", "type": "object", "properties": { - "location": { - "description": "File name and line number of the error or warning.", - "type": "string" - }, - "kind": { - "description": "The kind of diagnostic information provided.", - "enum": [ - "WARNING", - "ERROR" - ], - "enumDescriptions": [ - "Warnings and errors", - "Only errors" - ], - "type": "string" + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" }, "message": { - "description": "Message describing the error or warning.", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", "type": "string" - } - }, - "id": "Diagnostic" - }, - "ChangeReport": { - "description": "Change report associated with a particular service configuration.\n\nIt contains a list of ConfigChanges based on the comparison between\ntwo service configurations.", - "type": "object", - "properties": { - "configChanges": { - "description": "List of changes between two service configurations.\nThe changes will be alphabetically sorted based on the identifier\nof each change.\nA ConfigChange identifier is a dot separated path to the configuration.\nExample: visibility.rules[selector='LibraryService.CreateBook'].restriction", + }, + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", "type": "array", "items": { - "$ref": "ConfigChange" + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" } } }, - "id": "ChangeReport" + "id": "Status" }, - "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a\ntype name and a set of labels. For example, the monitored resource\ndescriptor for Google Compute Engine VM instances has a type of\n`\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and\n`\"zone\"` to identify particular VM instances.\n\nDifferent APIs can support different monitored resource types. APIs generally\nprovide a `list` method that returns the monitored resource descriptors used\nby the API.", + "Binding": { + "description": "Associates `members` with a `role`.", "type": "object", "properties": { - "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be\ndisplayed in user interfaces. It should be a Title Cased Noun Phrase,\nwithout any article or other determiners. For example,\n`\"Google Cloud SQL Database\"`.", - "type": "string" - }, - "description": { - "description": "Optional. A detailed description of the monitored resource type that might\nbe used in documentation.", - "type": "string" - }, - "labels": { - "description": "Required. A set of labels used to describe instances of this monitored\nresource type. For example, an individual Google Cloud SQL database is\nidentified by values for the labels `\"database_id\"` and `\"zone\"`.", + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", "type": "array", "items": { - "$ref": "LabelDescriptor" + "type": "string" } }, - "type": { - "description": "Required. The monitored resource type. For example, the type\n`\"cloudsql_database\"` represents databases in Google Cloud SQL.\nThe maximum length of this value is 256 characters.", - "type": "string" - }, - "name": { - "description": "Optional. The resource name of the monitored resource descriptor:\n`\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where\n{type} is the value of the `type` field in this object and\n{project_id} is a project ID that provides API-specific context for\naccessing the type. APIs that do not use project information can use the\nresource name format `\"monitoredResourceDescriptors/{type}\"`.", + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", "type": "string" } }, - "id": "MonitoredResourceDescriptor" + "id": "Binding" }, - "LogConfig": { - "description": "Specifies what kind of log the caller must write\nIncrement a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only,\nand end in \"_count\". Field names should not contain an initial slash.\nThe actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are\ntheir respective values.\n\nAt present the only supported field names are\n - \"iam_principal\", corresponding to IAMContext.principal;\n - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples:\n counter { metric: \"/debug_access_count\" field: \"iam_principal\" }\n ==\u003e increment counter /iam/policy/backend_debug_access_count\n {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support:\n* multiple field names (though this may be supported in the future)\n* decrementing the counter\n* incrementing it by anything other than 1", + "AuthProvider": { + "description": "Configuration for an anthentication provider, including support for\n[JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "type": "object", "properties": { - "dataAccess": { - "description": "Data access options.", - "$ref": "DataAccessOptions" + "jwksUri": { + "description": "URL of the provider's public key set to validate signature of the JWT. See\n[OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).\nOptional if the key set document:\n - can be retrieved from\n [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html\n of the issuer.\n - can be inferred from the email domain of the issuer (e.g. a Google service account).\n\nExample: https://www.googleapis.com/oauth2/v1/certs", + "type": "string" }, - "counter": { - "description": "Counter options.", - "$ref": "CounterOptions" + "audiences": { + "description": "The list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "type": "string" }, - "cloudAudit": { - "description": "Cloud audit options.", - "$ref": "CloudAuditOptions" - } - }, - "id": "LogConfig" - }, - "Mixin": { - "description": "Declares an API to be included in this API. The including API must\nredeclare all the methods from the included API, but documentation\nand options are inherited as follows:\n\n- If after comment and whitespace stripping, the documentation\n string of the redeclared method is empty, it will be inherited\n from the original method.\n\n- Each annotation belonging to the service config (http,\n visibility) which is not set in the redeclared method will be\n inherited.\n\n- If an http annotation is inherited, the path pattern will be\n modified as follows. Any version prefix will be replaced by the\n version of the including API plus the root path if specified.\n\nExample of a simple mixin:\n\n package google.acl.v1;\n service AccessControl {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n }\n }\n\n package google.storage.v2;\n service Storage {\n // rpc GetAcl(GetAclRequest) returns (Acl);\n\n // Get a data record.\n rpc GetData(GetDataRequest) returns (Data) {\n option (google.api.http).get = \"/v2/{resource=**}\";\n }\n }\n\nExample of a mixin configuration:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n\nThe mixin construct implies that all methods in `AccessControl` are\nalso declared with same name and request/response types in\n`Storage`. A documentation generator or annotation processor will\nsee the effective `Storage.GetAcl` method after inherting\ndocumentation and annotations as follows:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n }\n ...\n }\n\nNote how the version in the path pattern changed from `v1` to `v2`.\n\nIf the `root` field in the mixin is specified, it should be a\nrelative path under which inherited HTTP paths are placed. Example:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n root: acls\n\nThis implies the following inherited HTTP annotation:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n }\n ...\n }", - "type": "object", - "properties": { - "root": { - "description": "If non-empty specifies a path under which inherited HTTP paths\nare rooted.", + "id": { + "description": "The unique identifier of the auth provider. It will be referred to by\n`AuthRequirement.provider_id`.\n\nExample: \"bookstore_auth\".", "type": "string" }, - "name": { - "description": "The fully qualified name of the API which is included.", + "issuer": { + "description": "Identifies the principal that issued the JWT. See\nhttps://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1\nUsually a URL or an email address.\n\nExample: https://securetoken.google.com\nExample: 1234567-compute@developer.gserviceaccount.com", "type": "string" } }, - "id": "Mixin" + "id": "AuthProvider" }, - "Service": { - "description": "`Service` is the root object of Google service configuration schema. It\ndescribes basic information about a service, such as the name and the\ntitle, and delegates other aspects to sub-sections. Each sub-section is\neither a proto message or a repeated proto message that configures a\nspecific aspect, such as auth. See each proto message definition for details.\n\nExample:\n\n type: google.api.Service\n config_version: 3\n name: calendar.googleapis.com\n title: Google Calendar API\n apis:\n - name: google.calendar.v3.Calendar\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "EnumValue": { + "description": "Enum value definition.", "type": "object", "properties": { - "id": { - "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. If empty, the server may choose to\ngenerate one instead.", + "name": { + "description": "Enum value name.", "type": "string" }, - "enums": { - "description": "A list of all enum types included in this API service. Enums\nreferenced directly or indirectly by the `apis` are automatically\nincluded. Enums which are not referenced but shall be included\nshould be listed here by name. Example:\n\n enums:\n - name: google.someapi.v1.SomeEnum", + "options": { + "description": "Protocol buffer options.", "type": "array", "items": { - "$ref": "Enum" + "$ref": "Option" } }, - "usage": { - "description": "Configuration controlling usage of this service.", - "$ref": "Usage" - }, - "control": { - "description": "Configuration for the service control plane.", - "$ref": "Control" + "number": { + "description": "Enum value number.", + "format": "int32", + "type": "integer" + } + }, + "id": "EnumValue" + }, + "Service": { + "description": "`Service` is the root object of Google service configuration schema. It\ndescribes basic information about a service, such as the name and the\ntitle, and delegates other aspects to sub-sections. Each sub-section is\neither a proto message or a repeated proto message that configures a\nspecific aspect, such as auth. See each proto message definition for details.\n\nExample:\n\n type: google.api.Service\n config_version: 3\n name: calendar.googleapis.com\n title: Google Calendar API\n apis:\n - name: google.calendar.v3.Calendar\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "type": "object", + "properties": { + "customError": { + "description": "Custom error configuration.", + "$ref": "CustomError" }, "title": { "description": "The product title associated with this service.", "type": "string" }, - "http": { - "description": "HTTP configuration.", - "$ref": "Http" - }, - "systemTypes": { - "description": "A list of all proto message types included in this API service.\nIt serves similar purpose as [google.api.Service.types], except that\nthese types are not needed by user-defined APIs. Therefore, they will not\nshow up in the generated discovery doc. This field should only be used\nto define system APIs in ESF.", + "endpoints": { + "description": "Configuration for network endpoints. If this is empty, then an endpoint\nwith the same name as the service is automatically generated to service all\ndefined APIs.", "type": "array", "items": { - "$ref": "Type" + "$ref": "Endpoint" } }, - "configVersion": { - "description": "The version of the service configuration. The config version may\ninfluence interpretation of the configuration, for example, to\ndetermine defaults. This is documented together with applicable\noptions. The current default for the config version itself is `3`.", - "type": "integer", - "format": "uint32" - }, - "backend": { - "description": "API backend configuration.", - "$ref": "Backend" - }, - "monitoring": { - "description": "Monitoring configuration.", - "$ref": "Monitoring" - }, - "visibility": { - "description": "API visibility configuration.", - "$ref": "Visibility" - }, - "logging": { - "description": "Logging configuration.", - "$ref": "Logging" - }, - "customError": { - "description": "Custom error configuration.", - "$ref": "CustomError" - }, - "context": { - "description": "Context configuration.", - "$ref": "Context" + "logs": { + "description": "Defines the logs used by this service.", + "type": "array", + "items": { + "$ref": "LogDescriptor" + } }, "apis": { "description": "A list of API interfaces exported by this service. Only the `name` field\nof the google.protobuf.Api needs to be provided by the configuration\nauthor, as the remaining fields will be derived from the IDL during the\nnormalization process. It is an error to specify an API interface here\nwhich cannot be resolved against the associated IDL files.", @@ -875,31 +312,24 @@ "$ref": "Api" } }, - "metrics": { - "description": "Defines the metrics used by this service.", + "types": { + "description": "A list of all proto message types included in this API service.\nTypes referenced directly or indirectly by the `apis` are\nautomatically included. Messages which are not referenced but\nshall be included, such as types used by the `google.protobuf.Any` type,\nshould be listed here by name. Example:\n\n types:\n - name: google.protobuf.Int32", "type": "array", "items": { - "$ref": "MetricDescriptor" + "$ref": "Type" } }, + "http": { + "$ref": "Http", + "description": "HTTP configuration." + }, "systemParameters": { "description": "System parameter configuration.", "$ref": "SystemParameters" }, - "endpoints": { - "description": "Configuration for network endpoints. If this is empty, then an endpoint\nwith the same name as the service is automatically generated to service all\ndefined APIs.", - "type": "array", - "items": { - "$ref": "Endpoint" - } - }, - "name": { - "description": "The DNS address at which this service is available,\ne.g. `calendar.googleapis.com`.", - "type": "string" - }, - "producerProjectId": { - "description": "The id of the Google developer project that owns the service.\nMembers of this project can manage the service configuration,\nmanage consumption of the service, etc.", - "type": "string" + "backend": { + "description": "API backend configuration.", + "$ref": "Backend" }, "documentation": { "description": "Additional API documentation.", @@ -912,750 +342,897 @@ "$ref": "MonitoredResourceDescriptor" } }, - "types": { - "description": "A list of all proto message types included in this API service.\nTypes referenced directly or indirectly by the `apis` are\nautomatically included. Messages which are not referenced but\nshall be included, such as types used by the `google.protobuf.Any` type,\nshould be listed here by name. Example:\n\n types:\n - name: google.protobuf.Int32", + "logging": { + "$ref": "Logging", + "description": "Logging configuration." + }, + "context": { + "description": "Context configuration.", + "$ref": "Context" + }, + "enums": { + "description": "A list of all enum types included in this API service. Enums\nreferenced directly or indirectly by the `apis` are automatically\nincluded. Enums which are not referenced but shall be included\nshould be listed here by name. Example:\n\n enums:\n - name: google.someapi.v1.SomeEnum", "type": "array", "items": { - "$ref": "Type" + "$ref": "Enum" } }, - "logs": { - "description": "Defines the logs used by this service.", + "id": { + "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. If empty, the server may choose to\ngenerate one instead.", + "type": "string" + }, + "usage": { + "description": "Configuration controlling usage of this service.", + "$ref": "Usage" + }, + "metrics": { + "description": "Defines the metrics used by this service.", "type": "array", "items": { - "$ref": "LogDescriptor" + "$ref": "MetricDescriptor" } }, "authentication": { "description": "Auth configuration.", "$ref": "Authentication" - } - }, - "id": "Service" - }, - "SubmitConfigSourceResponse": { - "description": "Response message for SubmitConfigSource method.", - "type": "object", - "properties": { - "serviceConfig": { - "description": "The generated service configuration.", - "$ref": "Service" - } - }, - "id": "SubmitConfigSourceResponse" - }, - "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: (== include google/foo/overview.md ==)\n - name: Tutorial\n content: (== include google/foo/tutorial.md ==)\n subpages;\n - name: Java\n content: (== include google/foo/tutorial_java.md ==)\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e[fully.qualified.proto.name][]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e[display text][fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e(-- internal comment --)\u003c/code\u003e\u003c/pre\u003e\nComments can be made conditional using a visibility label. The below\ntext will be only rendered if the `BETA` label is available:\n\u003cpre\u003e\u003ccode\u003e(--BETA: comment for BETA users --)\u003c/code\u003e\u003c/pre\u003e\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e(== include path/to/file ==)\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e(== resource_for v1.shelves.books ==)\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", - "type": "object", - "properties": { - "overview": { - "description": "Declares a single overview page. For example:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n overview: (== include overview.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nThis is a shortcut for the following declaration (using pages style):\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n pages:\n - name: Overview\n content: (== include overview.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nNote: you cannot specify both `overview` field and `pages` field.", - "type": "string" }, - "documentationRootUrl": { - "description": "The URL to the root of documentation.", - "type": "string" + "experimental": { + "description": "Experimental configuration.", + "$ref": "Experimental" }, - "pages": { - "description": "The top level pages for the documentation set.", + "control": { + "description": "Configuration for the service control plane.", + "$ref": "Control" + }, + "configVersion": { + "description": "The version of the service configuration. The config version may\ninfluence interpretation of the configuration, for example, to\ndetermine defaults. This is documented together with applicable\noptions. The current default for the config version itself is `3`.", + "format": "uint32", + "type": "integer" + }, + "monitoring": { + "$ref": "Monitoring", + "description": "Monitoring configuration." + }, + "systemTypes": { + "description": "A list of all proto message types included in this API service.\nIt serves similar purpose as [google.api.Service.types], except that\nthese types are not needed by user-defined APIs. Therefore, they will not\nshow up in the generated discovery doc. This field should only be used\nto define system APIs in ESF.", "type": "array", "items": { - "$ref": "Page" + "$ref": "Type" } }, - "summary": { - "description": "A short summary of what the service does. Can only be provided by\nplain text.", + "producerProjectId": { + "description": "The id of the Google developer project that owns the service.\nMembers of this project can manage the service configuration,\nmanage consumption of the service, etc.", "type": "string" }, - "rules": { - "description": "A list of documentation rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", - "type": "array", - "items": { - "$ref": "DocumentationRule" - } + "visibility": { + "description": "API visibility configuration.", + "$ref": "Visibility" + }, + "name": { + "description": "The DNS address at which this service is available,\ne.g. `calendar.googleapis.com`.", + "type": "string" } }, - "id": "Documentation" + "id": "Service" }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "OperationMetadata": { + "description": "The metadata associated with a long running operation resource.", "type": "object", "properties": { - "auditConfigs": { - "description": "Specifies cloud audit logging configuration for this policy.", - "type": "array", - "items": { - "$ref": "AuditConfig" - } + "startTime": { + "description": "The start time of the operation.", + "format": "google-datetime", + "type": "string" }, - "rules": { - "description": "If more than one rule is specified, the rules are applied in the following\nmanner:\n- All matching LOG rules are always applied.\n- If any DENY/DENY_WITH_LOG rule matches, permission is denied.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is\n granted.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if no rule applies, permission is denied.", + "resourceNames": { + "description": "The full name of the resources that this operation is directly\nassociated with.", "type": "array", "items": { - "$ref": "Rule" + "type": "string" } }, - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "steps": { + "description": "Detailed status information for each step. The order is undetermined.", "type": "array", "items": { - "$ref": "Binding" + "$ref": "Step" } }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "type": "string", - "format": "byte" - }, - "iamOwned": { - "type": "boolean" - }, - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer", - "format": "int32" + "progressPercentage": { + "description": "Percentage of completion of this operation, ranging from 0 to 100.", + "format": "int32", + "type": "integer" } }, - "id": "Policy" + "id": "OperationMetadata" }, - "OAuthRequirements": { - "description": "OAuth scopes are a way to define data and permissions on data. For example,\nthere are scopes defined for \"Read-only access to Google Calendar\" and\n\"Access to Cloud Platform\". Users can consent to a scope for an application,\ngiving it permission to access that data on their behalf.\n\nOAuth scope specifications should be fairly coarse grained; a user will need\nto see and understand the text description of what your scope means.\n\nIn most cases: use one or at most two OAuth scopes for an entire family of\nproducts. If your product has multiple APIs, you should probably be sharing\nthe OAuth scope across all of those APIs.\n\nWhen you need finer grained OAuth consent screens: talk with your product\nmanagement about how developers will use them in practice.\n\nPlease note that even though each of the canonical scopes is enough for a\nrequest to be accepted and passed to the backend, a request can still fail\ndue to the backend requiring additional scopes or permissions.", + "CustomHttpPattern": { + "description": "A custom pattern is used for defining custom HTTP verb.", "type": "object", "properties": { - "canonicalScopes": { - "description": "The list of publicly documented OAuth scopes that are allowed access. An\nOAuth token containing any of these scopes will be accepted.\n\nExample:\n\n canonical_scopes: https://www.googleapis.com/auth/calendar,\n https://www.googleapis.com/auth/calendar.read", + "kind": { + "description": "The name of this custom HTTP verb.", + "type": "string" + }, + "path": { + "description": "The path matched by this custom verb.", "type": "string" } }, - "id": "OAuthRequirements" + "id": "CustomHttpPattern" }, - "ListServicesResponse": { - "description": "Response message for `ListServices` method.", + "SystemParameterRule": { + "description": "Define a system parameter rule mapping system parameter definitions to\nmethods.", "type": "object", "properties": { - "nextPageToken": { - "description": "Token that can be passed to `ListServices` to resume a paginated query.", + "selector": { + "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", "type": "string" }, - "services": { - "description": "The returned services will only have the name field set.", + "parameters": { + "description": "Define parameters. Multiple names may be defined for a parameter.\nFor a given method call, only one of them should be used. If multiple\nnames are used the behavior is implementation-dependent.\nIf none of the specified names are present the behavior is\nparameter-dependent.", "type": "array", "items": { - "$ref": "ManagedService" + "$ref": "SystemParameter" } } }, - "id": "ListServicesResponse" + "id": "SystemParameterRule" }, - "Step": { - "description": "Represents the status of one operation step.", + "HttpRule": { + "description": "`HttpRule` defines the mapping of an RPC method to one or more HTTP\nREST APIs. The mapping determines what portions of the request\nmessage are populated from the path, query parameters, or body of\nthe HTTP request. The mapping is typically specified as an\n`google.api.http` annotation, see \"google/api/annotations.proto\"\nfor details.\n\nThe mapping consists of a field specifying the path template and\nmethod kind. The path template can refer to fields in the request\nmessage, as in the example below which describes a REST GET\noperation on a resource collection of messages:\n\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http).get = \"/v1/messages/{message_id}/{sub.subfield}\";\n }\n }\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // mapped to the URL\n SubMessage sub = 2; // `sub.subfield` is url-mapped\n }\n message Message {\n string text = 1; // content of the resource\n }\n\nThe same http annotation can alternatively be expressed inside the\n`GRPC API Configuration` YAML file.\n\n http:\n rules:\n - selector: \u003cproto_package_name\u003e.Messaging.GetMessage\n get: /v1/messages/{message_id}/{sub.subfield}\n\nThis definition enables an automatic, bidrectional mapping of HTTP\nJSON to RPC. Example:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456/foo` | `GetMessage(message_id: \"123456\" sub: SubMessage(subfield: \"foo\"))`\n\nIn general, not only fields but also field paths can be referenced\nfrom a path pattern. Fields mapped to the path pattern cannot be\nrepeated and must have a primitive (non-message) type.\n\nAny fields in the request message which are not bound by the path\npattern automatically become (optional) HTTP query\nparameters. Assume the following definition of the request message:\n\n\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // mapped to the URL\n int64 revision = 2; // becomes a parameter\n SubMessage sub = 3; // `sub.subfield` becomes a parameter\n }\n\n\nThis enables a HTTP JSON to RPC mapping as below:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))`\n\nNote that fields which are mapped to HTTP parameters must have a\nprimitive type or a repeated primitive type. Message types are not\nallowed. In the case of a repeated type, the parameter can be\nrepeated in the URL, as in `...?param=A¶m=B`.\n\nFor HTTP method kinds which allow a request body, the `body` field\nspecifies the mapping. Consider a REST update method on the\nmessage resource collection:\n\n\n service Messaging {\n rpc UpdateMessage(UpdateMessageRequest) returns (Message) {\n option (google.api.http) = {\n put: \"/v1/messages/{message_id}\"\n body: \"message\"\n };\n }\n }\n message UpdateMessageRequest {\n string message_id = 1; // mapped to the URL\n Message message = 2; // mapped to the body\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled, where the\nrepresentation of the JSON in the request body is determined by\nprotos JSON encoding:\n\nHTTP | RPC\n-----|-----\n`PUT /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })`\n\nThe special name `*` can be used in the body mapping to define that\nevery field not bound by the path template should be mapped to the\nrequest body. This enables the following alternative definition of\nthe update method:\n\n service Messaging {\n rpc UpdateMessage(Message) returns (Message) {\n option (google.api.http) = {\n put: \"/v1/messages/{message_id}\"\n body: \"*\"\n };\n }\n }\n message Message {\n string message_id = 1;\n string text = 2;\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled:\n\nHTTP | RPC\n-----|-----\n`PUT /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" text: \"Hi!\")`\n\nNote that when using `*` in the body mapping, it is not possible to\nhave HTTP parameters, as all fields not bound by the path end in\nthe body. This makes this option more rarely used in practice of\ndefining REST APIs. The common usage of `*` is in custom methods\nwhich don't use the URL at all for transferring data.\n\nIt is possible to define multiple HTTP methods for one RPC by using\nthe `additional_bindings` option. Example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/messages/{message_id}\"\n additional_bindings {\n get: \"/v1/users/{user_id}/messages/{message_id}\"\n }\n };\n }\n }\n message GetMessageRequest {\n string message_id = 1;\n string user_id = 2;\n }\n\n\nThis enables the following two alternative HTTP JSON to RPC\nmappings:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")`\n`GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id: \"123456\")`\n\n# Rules for HTTP mapping\n\nThe rules for mapping HTTP path, query parameters, and body fields\nto the request message are as follows:\n\n1. The `body` field specifies either `*` or a field path, or is\n omitted. If omitted, it assumes there is no HTTP body.\n2. Leaf fields (recursive expansion of nested messages in the\n request) can be classified into three types:\n (a) Matched in the URL template.\n (b) Covered by body (if body is `*`, everything except (a) fields;\n else everything under the body field)\n (c) All other fields.\n3. URL query parameters found in the HTTP request are mapped to (c) fields.\n4. Any body sent with an HTTP request can contain only (b) fields.\n\nThe syntax of the path template is as follows:\n\n Template = \"/\" Segments [ Verb ] ;\n Segments = Segment { \"/\" Segment } ;\n Segment = \"*\" | \"**\" | LITERAL | Variable ;\n Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ;\n FieldPath = IDENT { \".\" IDENT } ;\n Verb = \":\" LITERAL ;\n\nThe syntax `*` matches a single path segment. It follows the semantics of\n[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String\nExpansion.\n\nThe syntax `**` matches zero or more path segments. It follows the semantics\nof [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved\nExpansion. NOTE: it must be the last segment in the path except the Verb.\n\nThe syntax `LITERAL` matches literal text in the URL path.\n\nThe syntax `Variable` matches the entire path as specified by its template;\nthis nested template must not contain further variables. If a variable\nmatches a single path segment, its template may be omitted, e.g. `{var}`\nis equivalent to `{var=*}`.\n\nNOTE: the field paths in variables and in the `body` must not refer to\nrepeated fields or map fields.\n\nUse CustomHttpPattern to specify any HTTP method that is not included in the\n`pattern` field, such as HEAD, or \"*\" to leave the HTTP method unspecified for\na given URL path rule. The wild-card rule is useful for services that provide\ncontent to Web (HTML) clients.", "type": "object", "properties": { - "description": { - "description": "The short description of the step.", + "responseBody": { + "description": "The name of the response field whose value is mapped to the HTTP body of\nresponse. Other response fields are ignored. This field is optional. When\nnot set, the response message will be used as HTTP body of response.\nNOTE: the referred field must be not a repeated field and must be present\nat the top-level of response message type.", "type": "string" }, - "status": { - "description": "The status code.", - "enum": [ - "STATUS_UNSPECIFIED", - "DONE", - "NOT_STARTED", - "IN_PROGRESS", - "FAILED", - "CANCELLED" - ], - "enumDescriptions": [ - "Unspecifed code.", - "The step has completed without errors.", - "The step has not started yet.", - "The step is in progress.", - "The step has completed with errors.", - "The step has completed with cancellation." - ], + "mediaUpload": { + "$ref": "MediaUpload", + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration." + }, + "selector": { + "description": "Selects methods to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + }, + "custom": { + "$ref": "CustomHttpPattern", + "description": "Custom pattern is used for defining custom verbs." + }, + "patch": { + "description": "Used for updating a resource.", + "type": "string" + }, + "get": { + "description": "Used for listing and getting information about resources.", "type": "string" + }, + "put": { + "description": "Used for updating a resource.", + "type": "string" + }, + "delete": { + "description": "Used for deleting a resource.", + "type": "string" + }, + "body": { + "description": "The name of the request field whose value is mapped to the HTTP body, or\n`*` for mapping all fields not captured by the path pattern to the HTTP\nbody. NOTE: the referred field must not be a repeated field and must be\npresent at the top-level of request message type.", + "type": "string" + }, + "post": { + "description": "Used for creating a resource.", + "type": "string" + }, + "mediaDownload": { + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", + "$ref": "MediaDownload" + }, + "additionalBindings": { + "description": "Additional HTTP bindings for the selector. Nested bindings must\nnot contain an `additional_bindings` field themselves (that is,\nthe nesting may only be one level deep).", + "type": "array", + "items": { + "$ref": "HttpRule" + } } }, - "id": "Step" + "id": "HttpRule" }, - "Context": { - "description": "`Context` defines which contexts an API requests.\n\nExample:\n\n context:\n rules:\n - selector: \"*\"\n requested:\n - google.rpc.context.ProjectContext\n - google.rpc.context.OriginContext\n\nThe above specifies that all methods in the API request\n`google.rpc.context.ProjectContext` and\n`google.rpc.context.OriginContext`.\n\nAvailable context types are defined in package\n`google.rpc.context`.", + "VisibilityRule": { + "description": "A visibility rule provides visibility configuration for an individual API\nelement.", "type": "object", "properties": { - "rules": { - "description": "A list of RPC context rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", - "type": "array", - "items": { - "$ref": "ContextRule" - } + "restriction": { + "description": "A comma-separated list of visibility labels that apply to the `selector`.\nAny of the listed labels can be used to grant the visibility.\n\nIf a rule has multiple labels, removing one of the labels but not all of\nthem can break clients.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: GOOGLE_INTERNAL, TRUSTED_TESTER\n\nRemoving GOOGLE_INTERNAL from this restriction will break clients that\nrely on this method and only had access to it through GOOGLE_INTERNAL.", + "type": "string" + }, + "selector": { + "description": "Selects methods, messages, fields, enums, etc. to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" } }, - "id": "Context" + "id": "VisibilityRule" }, - "Monitoring": { - "description": "Monitoring configuration of the service.\n\nThe example below shows how to configure monitored resources and metrics\nfor monitoring. In the example, a monitored resource and two metrics are\ndefined. The `library.googleapis.com/book/returned_count` metric is sent\nto both producer and consumer projects, whereas the\n`library.googleapis.com/book/overdue_count` metric is only sent to the\nconsumer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n metrics:\n - name: library.googleapis.com/book/returned_count\n metric_kind: DELTA\n value_type: INT64\n labels:\n - key: /customer_id\n - name: library.googleapis.com/book/overdue_count\n metric_kind: GAUGE\n value_type: INT64\n labels:\n - key: /customer_id\n monitoring:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n - library.googleapis.com/book/overdue_count", + "MonitoringDestination": { + "description": "Configuration of a specific monitoring destination (the producer project\nor the consumer project).", "type": "object", "properties": { - "producerDestinations": { - "description": "Monitoring configurations for sending metrics to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A metric can be used in at most\none producer destination.", - "type": "array", - "items": { - "$ref": "MonitoringDestination" - } + "monitoredResource": { + "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "type": "string" }, - "consumerDestinations": { - "description": "Monitoring configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A metric can be used in at most\none consumer destination.", + "metrics": { + "description": "Names of the metrics to report to this monitoring destination.\nEach name must be defined in Service.metrics section.", "type": "array", "items": { - "$ref": "MonitoringDestination" + "type": "string" } } }, - "id": "Monitoring" + "id": "MonitoringDestination" }, - "ManagedService": { - "description": "The full representation of a Service that is managed by\nGoogle Service Management.", + "Visibility": { + "description": "`Visibility` defines restrictions for the visibility of service\nelements. Restrictions are specified using visibility labels\n(e.g., TRUSTED_TESTER) that are elsewhere linked to users and projects.\n\nUsers and projects can have access to more than one visibility label. The\neffective visibility for multiple labels is the union of each label's\nelements, plus any unrestricted elements.\n\nIf an element and its parents have no restrictions, visibility is\nunconditionally granted.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: TRUSTED_TESTER\n - selector: google.calendar.Calendar.Delegate\n restriction: GOOGLE_INTERNAL\n\nHere, all methods are publicly visible except for the restricted methods\nEnhancedSearch and Delegate.", "type": "object", "properties": { - "producerProjectId": { - "description": "ID of the project that produces and owns this service.", - "type": "string" - }, - "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements.", - "type": "string" + "rules": { + "description": "A list of visibility rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "VisibilityRule" + } } }, - "id": "ManagedService" + "id": "Visibility" }, - "ConfigFile": { - "description": "Generic specification of a source configuration file", + "ConfigChange": { + "description": "Output generated from semantically comparing two versions of a service\nconfiguration.\n\nIncludes detailed information about a field that have changed with\napplicable advice about potential consequences for the change, such as\nbackwards-incompatibility.", "type": "object", "properties": { - "filePath": { - "description": "The file name of the configuration file (full or relative path).", - "type": "string" - }, - "fileType": { - "description": "The type of configuration file this represents.", - "enum": [ - "FILE_TYPE_UNSPECIFIED", - "SERVICE_CONFIG_YAML", - "OPEN_API_JSON", - "OPEN_API_YAML", - "FILE_DESCRIPTOR_SET_PROTO" - ], + "changeType": { "enumDescriptions": [ - "Unknown file type.", - "YAML-specification of service.", - "OpenAPI specification, serialized in JSON.", - "OpenAPI specification, serialized in YAML.", - "FileDescriptorSet, generated by protoc.\n\nTo generate, use protoc with imports and source info included.\nFor an example test.proto file, the following command would put the value\nin a new file named out.pb.\n\n$protoc --include_imports --include_source_info test.proto -o out.pb" + "No value was provided.", + "The changed object exists in the 'new' service configuration, but not\nin the 'old' service configuration.", + "The changed object exists in the 'old' service configuration, but not\nin the 'new' service configuration.", + "The changed object exists in both service configurations, but its value\nis different." + ], + "enum": [ + "CHANGE_TYPE_UNSPECIFIED", + "ADDED", + "REMOVED", + "MODIFIED" ], + "description": "The type for this change, either ADDED, REMOVED, or MODIFIED.", "type": "string" }, - "fileContents": { - "description": "The bytes that constitute the file.", - "type": "string", - "format": "byte" + "element": { + "description": "Object hierarchy path to the change, with levels separated by a '.'\ncharacter. For repeated fields, an applicable unique identifier field is\nused for the index (usually selector, name, or id). For maps, the term\n'key' is used. If the field has no unique identifier, the numeric index\nis used.\nExamples:\n- visibility.rules[selector==\"google.LibraryService.CreateBook\"].restriction\n- quota.metric_rules[selector==\"google\"].metric_costs[key==\"reads\"].value\n- logging.producer_destinations[0]", + "type": "string" + }, + "oldValue": { + "description": "Value of the changed object in the old Service configuration,\nin JSON format. This field will not be populated if ChangeType == ADDED.", + "type": "string" + }, + "advices": { + "description": "Collection of advice provided for this change, useful for determining the\npossible impact of this change.", + "type": "array", + "items": { + "$ref": "Advice" + } + }, + "newValue": { + "description": "Value of the changed object in the new Service configuration,\nin JSON format. This field will not be populated if ChangeType == REMOVED.", + "type": "string" } }, - "id": "ConfigFile" + "id": "ConfigChange" }, - "ListServiceConfigsResponse": { - "description": "Response message for ListServiceConfigs method.", + "SystemParameters": { + "description": "### System parameter configuration\n\nA system parameter is a special kind of parameter defined by the API\nsystem, not by an individual API. It is typically mapped to an HTTP header\nand/or a URL query parameter. This configuration specifies which methods\nchange the names of the system parameters.", "type": "object", "properties": { - "nextPageToken": { - "description": "The token of the next page of results.", - "type": "string" - }, - "serviceConfigs": { - "description": "The list of service configuration resources.", + "rules": { + "description": "Define system parameters.\n\nThe parameters defined here will override the default parameters\nimplemented by the system. If this field is missing from the service\nconfig, default system parameters will be used. Default system parameters\nand names is implementation-dependent.\n\nExample: define api key for all methods\n\n system_parameters\n rules:\n - selector: \"*\"\n parameters:\n - name: api_key\n url_query_parameter: api_key\n\n\nExample: define 2 api key names for a specific method.\n\n system_parameters\n rules:\n - selector: \"/ListShelves\"\n parameters:\n - name: api_key\n http_header: Api-Key1\n - name: api_key\n http_header: Api-Key2\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", "type": "array", "items": { - "$ref": "Service" + "$ref": "SystemParameterRule" } } }, - "id": "ListServiceConfigsResponse" + "id": "SystemParameters" }, - "TrafficPercentStrategy": { - "description": "Strategy that specifies how Google Service Control should select\ndifferent\nversions of service configurations based on traffic percentage.\n\nOne example of how to gradually rollout a new service configuration using\nthis\nstrategy:\nDay 1\n\n Rollout {\n id: \"example.googleapis.com/rollout_20160206\"\n traffic_percent_strategy {\n percentages: {\n \"example.googleapis.com/20160201\": 70.00\n \"example.googleapis.com/20160206\": 30.00\n }\n }\n }\n\nDay 2\n\n Rollout {\n id: \"example.googleapis.com/rollout_20160207\"\n traffic_percent_strategy: {\n percentages: {\n \"example.googleapis.com/20160206\": 100.00\n }\n }\n }", + "Rollout": { + "description": "A rollout resource that defines how service configuration versions are pushed\nto control plane systems. Typically, you create a new version of the\nservice config, and then create a Rollout to push the service config.", "type": "object", "properties": { - "percentages": { - "description": "Maps service configuration IDs to their corresponding traffic percentage.\nKey is the service configuration ID, Value is the traffic percentage\nwhich must be greater than 0.0 and the sum must equal to 100.0.", - "additionalProperties": { - "type": "number", - "format": "double" - }, - "type": "object" + "trafficPercentStrategy": { + "description": "Google Service Control selects service configurations based on\ntraffic percentage.", + "$ref": "TrafficPercentStrategy" + }, + "createdBy": { + "description": "The user who created the Rollout. Readonly.", + "type": "string" + }, + "rolloutId": { + "description": "Optional unique identifier of this Rollout. Only lower case letters, digits\n and '-' are allowed.\n\nIf not specified by client, the server will generate one. The generated id\nwill have the form of \u003cdate\u003e\u003crevision number\u003e, where \"date\" is the create\ndate in ISO 8601 format. \"revision number\" is a monotonically increasing\npositive number that is reset every day for each service.\nAn example of the generated rollout_id is '2016-02-16r1'", + "type": "string" + }, + "deleteServiceStrategy": { + "$ref": "DeleteServiceStrategy", + "description": "The strategy associated with a rollout to delete a `ManagedService`.\nReadonly." + }, + "createTime": { + "description": "Creation time of the rollout. Readonly.", + "format": "google-datetime", + "type": "string" + }, + "status": { + "description": "The status of this rollout. Readonly. In case of a failed rollout,\nthe system will automatically rollback to the current Rollout\nversion. Readonly.", + "type": "string", + "enumDescriptions": [ + "No status specified.", + "The Rollout is in progress.", + "The Rollout has completed successfully.", + "The Rollout has been cancelled. This can happen if you have overlapping\nRollout pushes, and the previous ones will be cancelled.", + "The Rollout has failed. It is typically caused by configuration errors.", + "The Rollout has not started yet and is pending for execution." + ], + "enum": [ + "ROLLOUT_STATUS_UNSPECIFIED", + "IN_PROGRESS", + "SUCCESS", + "CANCELLED", + "FAILED", + "PENDING" + ] + }, + "serviceName": { + "description": "The name of the service associated with this Rollout.", + "type": "string" } }, - "id": "TrafficPercentStrategy" + "id": "Rollout" }, "GenerateConfigReportRequest": { "description": "Request message for GenerateConfigReport method.", "type": "object", "properties": { "oldConfig": { - "description": "Service configuration against which the comparison will be done.\nFor this version of API, the supported types are\ngoogle.api.servicemanagement.v1.ConfigRef,\ngoogle.api.servicemanagement.v1.ConfigSource,\nand google.api.Service", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, + "description": "Service configuration against which the comparison will be done.\nFor this version of API, the supported types are\ngoogle.api.servicemanagement.v1.ConfigRef,\ngoogle.api.servicemanagement.v1.ConfigSource,\nand google.api.Service", "type": "object" }, "newConfig": { - "description": "Service configuration for which we want to generate the report.\nFor this version of API, the supported types are\ngoogle.api.servicemanagement.v1.ConfigRef,\ngoogle.api.servicemanagement.v1.ConfigSource,\nand google.api.Service", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, + "description": "Service configuration for which we want to generate the report.\nFor this version of API, the supported types are\ngoogle.api.servicemanagement.v1.ConfigRef,\ngoogle.api.servicemanagement.v1.ConfigSource,\nand google.api.Service", "type": "object" } }, "id": "GenerateConfigReportRequest" }, - "GetIamPolicyRequest": { - "description": "Request message for `GetIamPolicy` method.", + "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "type": "object", + "properties": { + "updateMask": { + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, a default\nmask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", + "format": "google-fieldmask", + "type": "string" + }, + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + } + }, + "id": "SetIamPolicyRequest" + }, + "DeleteServiceStrategy": { + "description": "Strategy used to delete a service. This strategy is a placeholder only\nused by the system generated rollout to delete a service.", "type": "object", "properties": {}, - "id": "GetIamPolicyRequest" + "id": "DeleteServiceStrategy" }, - "LoggingDestination": { - "description": "Configuration of a specific logging destination (the producer project\nor the consumer project).", + "Step": { + "description": "Represents the status of one operation step.", "type": "object", "properties": { - "monitoredResource": { - "description": "The monitored resource type. The type must be defined in the\nService.monitored_resources section.", + "description": { + "description": "The short description of the step.", "type": "string" }, + "status": { + "enumDescriptions": [ + "Unspecifed code.", + "The operation or step has completed without errors.", + "The operation or step has not started yet.", + "The operation or step is in progress.", + "The operation or step has completed with errors.", + "The operation or step has completed with cancellation." + ], + "enum": [ + "STATUS_UNSPECIFIED", + "DONE", + "NOT_STARTED", + "IN_PROGRESS", + "FAILED", + "CANCELLED" + ], + "description": "The status code.", + "type": "string" + } + }, + "id": "Step" + }, + "LoggingDestination": { + "description": "Configuration of a specific logging destination (the producer project\nor the consumer project).", + "type": "object", + "properties": { "logs": { "description": "Names of the logs to be sent to this destination. Each name must\nbe defined in the Service.logs section. If the log name is\nnot a domain scoped name, it will be automatically prefixed with\nthe service name followed by \"/\".", "type": "array", "items": { "type": "string" } + }, + "monitoredResource": { + "description": "The monitored resource type. The type must be defined in the\nService.monitored_resources section.", + "type": "string" } }, "id": "LoggingDestination" }, - "Authentication": { - "description": "`Authentication` defines the authentication configuration for an API.\n\nExample for an API targeted for external use:\n\n name: calendar.googleapis.com\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "Option": { + "description": "A protocol buffer option, which can be attached to a message, field,\nenumeration, etc.", "type": "object", "properties": { - "providers": { - "description": "Defines a set of authentication providers that a service supports.", + "value": { + "description": "The option's value packed in an Any message. If the value is a primitive,\nthe corresponding wrapper type defined in google/protobuf/wrappers.proto\nshould be used. If the value is an enum, it should be stored as an int32\nvalue using the google.protobuf.Int32Value type.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + }, + "name": { + "description": "The option's name. For protobuf built-in options (options defined in\ndescriptor.proto), this is the short name. For example, `\"map_entry\"`.\nFor custom options, it should be the fully-qualified name. For example,\n`\"google.api.http\"`.", + "type": "string" + } + }, + "id": "Option" + }, + "Logging": { + "description": "Logging configuration of the service.\n\nThe following example shows how to configure logs to be sent to the\nproducer and consumer projects. In the example, the `activity_history`\nlog is sent to both the producer and consumer projects, whereas the\n`purchase_history` log is only sent to the producer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n logs:\n - name: activity_history\n labels:\n - key: /customer_id\n - name: purchase_history\n logging:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history\n - purchase_history\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history", + "type": "object", + "properties": { + "consumerDestinations": { + "description": "Logging configurations for sending logs to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none consumer destination.", "type": "array", "items": { - "$ref": "AuthProvider" + "$ref": "LoggingDestination" } }, - "rules": { - "description": "A list of authentication rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "producerDestinations": { + "description": "Logging configurations for sending logs to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none producer destination.", "type": "array", "items": { - "$ref": "AuthenticationRule" + "$ref": "LoggingDestination" } } }, - "id": "Authentication" + "id": "Logging" }, - "Type": { - "description": "A protocol buffer message type.", + "Method": { + "description": "Method represents a method of an api.", "type": "object", "properties": { - "oneofs": { - "description": "The list of types appearing in `oneof` definitions in this type.", - "type": "array", - "items": { - "type": "string" - } + "responseTypeUrl": { + "description": "The URL of the output message type.", + "type": "string" }, "options": { - "description": "The protocol buffer options.", + "description": "Any metadata attached to the method.", "type": "array", "items": { "$ref": "Option" } }, - "sourceContext": { - "description": "The source context.", - "$ref": "SourceContext" - }, - "fields": { - "description": "The list of fields.", - "type": "array", - "items": { - "$ref": "Field" - } + "responseStreaming": { + "description": "If true, the response is streamed.", + "type": "boolean" }, "name": { - "description": "The fully qualified message name.", + "description": "The simple name of this method.", + "type": "string" + }, + "requestTypeUrl": { + "description": "A URL of the input message type.", "type": "string" }, + "requestStreaming": { + "description": "If true, the request is streamed.", + "type": "boolean" + }, "syntax": { - "description": "The source syntax.", - "enum": [ - "SYNTAX_PROTO2", - "SYNTAX_PROTO3" - ], "enumDescriptions": [ "Syntax `proto2`.", "Syntax `proto3`." ], + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax of this method.", "type": "string" } }, - "id": "Type" + "id": "Method" }, - "Backend": { - "description": "`Backend` defines the backend configuration for a service.", + "ListServiceRolloutsResponse": { + "description": "Response message for ListServiceRollouts method.", "type": "object", "properties": { - "rules": { - "description": "A list of API backend rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "rollouts": { + "description": "The list of rollout resources.", "type": "array", "items": { - "$ref": "BackendRule" + "$ref": "Rollout" } + }, + "nextPageToken": { + "description": "The token of the next page of results.", + "type": "string" } }, - "id": "Backend" + "id": "ListServiceRolloutsResponse" }, - "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nIt consists of which permission types are logged, and what identities, if\nany, are exempted from logging.\nAn AuditConifg must have one or more AuditLogConfigs.", + "ConfigRef": { + "description": "Represents a service configuration with its name and id.", "type": "object", "properties": { - "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `resourcemanager`, `storage`, `compute`.\n`allServices` is a special value that covers all services.", + "name": { + "description": "Resource name of a service config. It must have the following\nformat: \"services/{service name}/configs/{config id}\".", + "type": "string" + } + }, + "id": "ConfigRef" + }, + "Mixin": { + "description": "Declares an API to be included in this API. The including API must\nredeclare all the methods from the included API, but documentation\nand options are inherited as follows:\n\n- If after comment and whitespace stripping, the documentation\n string of the redeclared method is empty, it will be inherited\n from the original method.\n\n- Each annotation belonging to the service config (http,\n visibility) which is not set in the redeclared method will be\n inherited.\n\n- If an http annotation is inherited, the path pattern will be\n modified as follows. Any version prefix will be replaced by the\n version of the including API plus the root path if specified.\n\nExample of a simple mixin:\n\n package google.acl.v1;\n service AccessControl {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n }\n }\n\n package google.storage.v2;\n service Storage {\n // rpc GetAcl(GetAclRequest) returns (Acl);\n\n // Get a data record.\n rpc GetData(GetDataRequest) returns (Data) {\n option (google.api.http).get = \"/v2/{resource=**}\";\n }\n }\n\nExample of a mixin configuration:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n\nThe mixin construct implies that all methods in `AccessControl` are\nalso declared with same name and request/response types in\n`Storage`. A documentation generator or annotation processor will\nsee the effective `Storage.GetAcl` method after inherting\ndocumentation and annotations as follows:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n }\n ...\n }\n\nNote how the version in the path pattern changed from `v1` to `v2`.\n\nIf the `root` field in the mixin is specified, it should be a\nrelative path under which inherited HTTP paths are placed. Example:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n root: acls\n\nThis implies the following inherited HTTP annotation:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n }\n ...\n }", + "type": "object", + "properties": { + "root": { + "description": "If non-empty specifies a path under which inherited HTTP paths\nare rooted.", "type": "string" }, - "auditLogConfigs": { - "description": "The configuration for logging of each type of permission.\nNext ID: 4", + "name": { + "description": "The fully qualified name of the API which is included.", + "type": "string" + } + }, + "id": "Mixin" + }, + "CustomError": { + "description": "Customize service error responses. For example, list any service\nspecific protobuf types that can appear in error detail lists of\nerror responses.\n\nExample:\n\n custom_error:\n types:\n - google.foo.v1.CustomError\n - google.foo.v1.AnotherError", + "type": "object", + "properties": { + "types": { + "description": "The list of custom error detail types, e.g. 'google.foo.v1.CustomError'.", "type": "array", "items": { - "$ref": "AuditLogConfig" + "type": "string" } }, - "exemptedMembers": { - "description": "Specifies the identities that are exempted from \"data access\" audit\nlogging for the `service` specified above.\nFollows the same format of Binding.members.\nThis field is deprecated in favor of per-permission-type exemptions.", + "rules": { + "description": "The list of custom error rules that apply to individual API messages.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", "type": "array", "items": { - "type": "string" + "$ref": "CustomErrorRule" } } }, - "id": "AuditConfig" + "id": "CustomError" }, - "ListServiceRolloutsResponse": { - "description": "Response message for ListServiceRollouts method.", + "CounterOptions": { + "description": "Options for counters", "type": "object", "properties": { - "rollouts": { - "description": "The list of rollout resources.", + "metric": { + "description": "The metric to update.", + "type": "string" + }, + "field": { + "description": "The field value to attribute.", + "type": "string" + } + }, + "id": "CounterOptions" + }, + "Http": { + "description": "Defines the HTTP configuration for a service. It contains a list of\nHttpRule, each specifying the mapping of an RPC method\nto one or more HTTP REST API methods.", + "type": "object", + "properties": { + "rules": { + "description": "A list of HTTP configuration rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", "type": "array", "items": { - "$ref": "Rollout" + "$ref": "HttpRule" } - }, - "nextPageToken": { - "description": "The token of the next page of results.", - "type": "string" } }, - "id": "ListServiceRolloutsResponse" + "id": "Http" }, - "Rollout": { - "description": "A rollout resource that defines how service configuration versions are pushed\nto control plane systems. Typically, you create a new version of the\nservice config, and then create a Rollout to push the service config.", + "Control": { + "description": "Selects and configures the service controller used by the service. The\nservice controller handles features like abuse, quota, billing, logging,\nmonitoring, etc.", "type": "object", "properties": { - "createdBy": { - "description": "The user who created the Rollout. Readonly.", + "environment": { + "description": "The service control environment to use. If empty, no control plane\nfeature (like quota and billing) will be enabled.", "type": "string" - }, - "trafficPercentStrategy": { - "description": "Google Service Control selects service configurations based on\ntraffic percentage.", - "$ref": "TrafficPercentStrategy" - }, - "status": { - "description": "The status of this rollout. Readonly. In case of a failed rollout,\nthe system will automatically rollback to the current Rollout\nversion. Readonly.", - "enum": [ - "ROLLOUT_STATUS_UNSPECIFIED", - "IN_PROGRESS", - "SUCCESS", - "CANCELLED", - "FAILED", - "PENDING" - ], - "enumDescriptions": [ - "No status specified.", - "The Rollout is in progress.", - "The Rollout has completed successfully.", - "The Rollout has been cancelled. This can happen if you have overlapping\nRollout pushes, and the previous ones will be cancelled.", - "The Rollout has failed. It is typically caused by configuration errors.", - "The Rollout has not started yet and is pending for execution." - ], + } + }, + "id": "Control" + }, + "SystemParameter": { + "description": "Define a parameter's name and location. The parameter may be passed as either\nan HTTP header or a URL query parameter, and if both are passed the behavior\nis implementation-dependent.", + "type": "object", + "properties": { + "urlQueryParameter": { + "description": "Define the URL query parameter name to use for the parameter. It is case\nsensitive.", "type": "string" }, - "deleteServiceStrategy": { - "description": "The strategy associated with a rollout to delete a `ManagedService`.\nReadonly.", - "$ref": "DeleteServiceStrategy" - }, - "createTime": { - "description": "Creation time of the rollout. Readonly.", - "type": "string", - "format": "google-datetime" - }, - "serviceName": { - "description": "The name of the service associated with this Rollout.", + "httpHeader": { + "description": "Define the HTTP header name to use for the parameter. It is case\ninsensitive.", "type": "string" }, - "rolloutId": { - "description": "Optional unique identifier of this Rollout. Only lower case letters, digits\n and '-' are allowed.\n\nIf not specified by client, the server will generate one. The generated id\nwill have the form of \u003cdate\u003e\u003crevision number\u003e, where \"date\" is the create\ndate in ISO 8601 format. \"revision number\" is a monotonically increasing\npositive number that is reset every day for each service.\nAn example of the generated rollout_id is '2016-02-16r1'", + "name": { + "description": "Define the name of the parameter, such as \"api_key\" . It is case sensitive.", "type": "string" } }, - "id": "Rollout" + "id": "SystemParameter" }, - "ConfigSource": { - "description": "Represents a source file which is used to generate the service configuration\ndefined by `google.api.Service`.", + "Monitoring": { + "description": "Monitoring configuration of the service.\n\nThe example below shows how to configure monitored resources and metrics\nfor monitoring. In the example, a monitored resource and two metrics are\ndefined. The `library.googleapis.com/book/returned_count` metric is sent\nto both producer and consumer projects, whereas the\n`library.googleapis.com/book/overdue_count` metric is only sent to the\nconsumer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n metrics:\n - name: library.googleapis.com/book/returned_count\n metric_kind: DELTA\n value_type: INT64\n labels:\n - key: /customer_id\n - name: library.googleapis.com/book/overdue_count\n metric_kind: GAUGE\n value_type: INT64\n labels:\n - key: /customer_id\n monitoring:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n - library.googleapis.com/book/overdue_count", "type": "object", "properties": { - "files": { - "description": "Set of source configuration files that are used to generate a service\nconfiguration (`google.api.Service`).", + "consumerDestinations": { + "description": "Monitoring configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A metric can be used in at most\none consumer destination.", "type": "array", "items": { - "$ref": "ConfigFile" + "$ref": "MonitoringDestination" } }, - "id": { - "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. If empty, the server may choose to\ngenerate one instead.", - "type": "string" + "producerDestinations": { + "description": "Monitoring configurations for sending metrics to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A metric can be used in at most\none producer destination.", + "type": "array", + "items": { + "$ref": "MonitoringDestination" + } } }, - "id": "ConfigSource" + "id": "Monitoring" }, - "Method": { - "description": "Method represents a method of an api.", + "Field": { + "description": "A single field of a message type.", "type": "object", "properties": { - "requestStreaming": { - "description": "If true, the request is streamed.", - "type": "boolean" + "jsonName": { + "description": "The field JSON name.", + "type": "string" + }, + "kind": { + "description": "The field type.", + "type": "string", + "enumDescriptions": [ + "Field type unknown.", + "Field type double.", + "Field type float.", + "Field type int64.", + "Field type uint64.", + "Field type int32.", + "Field type fixed64.", + "Field type fixed32.", + "Field type bool.", + "Field type string.", + "Field type group. Proto2 syntax only, and deprecated.", + "Field type message.", + "Field type bytes.", + "Field type uint32.", + "Field type enum.", + "Field type sfixed32.", + "Field type sfixed64.", + "Field type sint32.", + "Field type sint64." + ], + "enum": [ + "TYPE_UNKNOWN", + "TYPE_DOUBLE", + "TYPE_FLOAT", + "TYPE_INT64", + "TYPE_UINT64", + "TYPE_INT32", + "TYPE_FIXED64", + "TYPE_FIXED32", + "TYPE_BOOL", + "TYPE_STRING", + "TYPE_GROUP", + "TYPE_MESSAGE", + "TYPE_BYTES", + "TYPE_UINT32", + "TYPE_ENUM", + "TYPE_SFIXED32", + "TYPE_SFIXED64", + "TYPE_SINT32", + "TYPE_SINT64" + ] }, "options": { - "description": "Any metadata attached to the method.", + "description": "The protocol buffer options.", "type": "array", "items": { "$ref": "Option" } }, - "requestTypeUrl": { - "description": "A URL of the input message type.", + "oneofIndex": { + "description": "The index of the field type in `Type.oneofs`, for message or enumeration\ntypes. The first type has index 1; zero means the type is not in the list.", + "format": "int32", + "type": "integer" + }, + "cardinality": { + "enumDescriptions": [ + "For fields with unknown cardinality.", + "For optional fields.", + "For required fields. Proto2 syntax only.", + "For repeated fields." + ], + "enum": [ + "CARDINALITY_UNKNOWN", + "CARDINALITY_OPTIONAL", + "CARDINALITY_REQUIRED", + "CARDINALITY_REPEATED" + ], + "description": "The field cardinality.", "type": "string" }, - "name": { - "description": "The simple name of this method.", + "packed": { + "description": "Whether to use alternative packed wire representation.", + "type": "boolean" + }, + "defaultValue": { + "description": "The string value of the default value of this field. Proto2 syntax only.", "type": "string" }, - "syntax": { - "description": "The source syntax of this method.", - "enum": [ - "SYNTAX_PROTO2", - "SYNTAX_PROTO3" - ], - "enumDescriptions": [ - "Syntax `proto2`.", - "Syntax `proto3`." - ], + "name": { + "description": "The field name.", "type": "string" }, - "responseTypeUrl": { - "description": "The URL of the output message type.", + "typeUrl": { + "description": "The field type URL, without the scheme, for message or enumeration\ntypes. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", "type": "string" }, - "responseStreaming": { - "description": "If true, the response is streamed.", - "type": "boolean" + "number": { + "description": "The field number.", + "format": "int32", + "type": "integer" } }, - "id": "Method" + "id": "Field" }, - "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", "type": "object", "properties": { - "error": { - "description": "The error result of the operation in case of failure or cancellation.", - "$ref": "Status" - }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" - }, - "metadata": { - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "response": { - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", - "type": "string" + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "array", + "items": { + "type": "string" + } } }, - "id": "Operation" + "id": "TestIamPermissionsRequest" }, - "Rule": { - "description": "A rule to be applied in a Policy.", + "Enum": { + "description": "Enum type definition.", "type": "object", "properties": { - "description": { - "description": "Human-readable description of the rule.", - "type": "string" - }, - "in": { - "description": "If one or more 'in' clauses are specified, the rule matches if\nthe PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", - "type": "array", - "items": { - "type": "string" - } - }, - "action": { - "description": "Required", - "enum": [ - "NO_ACTION", - "ALLOW", - "ALLOW_WITH_LOG", - "DENY", - "DENY_WITH_LOG", - "LOG" - ], - "enumDescriptions": [ - "Default no action.", - "Matching 'Entries' grant access.", - "Matching 'Entries' grant access and the caller promises to log\nthe request per the returned log_configs.", - "Matching 'Entries' deny access.", - "Matching 'Entries' deny access and the caller promises to log\nthe request per the returned log_configs.", - "Matching 'Entries' tell IAM.Check callers to generate logs." - ], + "name": { + "description": "Enum type name.", "type": "string" }, - "conditions": { - "description": "Additional restrictions that must be met", + "enumvalue": { + "description": "Enum value definitions.", "type": "array", "items": { - "$ref": "Condition" + "$ref": "EnumValue" } }, - "notIn": { - "description": "If one or more 'not_in' clauses are specified, the rule matches\nif the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.\nThe format for in and not_in entries is the same as for members in a\nBinding (see google/iam/v1/policy.proto).", + "options": { + "description": "Protocol buffer options.", "type": "array", "items": { - "type": "string" + "$ref": "Option" } }, - "logConfig": { - "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries\nthat match the LOG action.", - "type": "array", - "items": { - "$ref": "LogConfig" - } + "sourceContext": { + "$ref": "SourceContext", + "description": "The source context." }, - "permissions": { - "description": "A permission is a string of form '\u003cservice\u003e.\u003cresource type\u003e.\u003cverb\u003e'\n(e.g., 'storage.buckets.list'). A value of '*' matches all permissions,\nand a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", - "type": "array", - "items": { - "type": "string" - } + "syntax": { + "enumDescriptions": [ + "Syntax `proto2`.", + "Syntax `proto3`." + ], + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax.", + "type": "string" } }, - "id": "Rule" + "id": "Enum" }, - "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created,\ndeleting or altering it stops data collection and makes the metric type's\nexisting data unusable.", + "LabelDescriptor": { + "description": "A description of a label.", "type": "object", "properties": { "description": { - "description": "A detailed description of the metric, which can be used in documentation.", - "type": "string" - }, - "unit": { - "description": "The unit in which the metric value is reported. It is only applicable\nif the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The\nsupported units are a subset of [The Unified Code for Units of\nMeasure](http://unitsofmeasure.org/ucum.html) standard:\n\n**Basic units (UNIT)**\n\n* `bit` bit\n* `By` byte\n* `s` second\n* `min` minute\n* `h` hour\n* `d` day\n\n**Prefixes (PREFIX)**\n\n* `k` kilo (10**3)\n* `M` mega (10**6)\n* `G` giga (10**9)\n* `T` tera (10**12)\n* `P` peta (10**15)\n* `E` exa (10**18)\n* `Z` zetta (10**21)\n* `Y` yotta (10**24)\n* `m` milli (10**-3)\n* `u` micro (10**-6)\n* `n` nano (10**-9)\n* `p` pico (10**-12)\n* `f` femto (10**-15)\n* `a` atto (10**-18)\n* `z` zepto (10**-21)\n* `y` yocto (10**-24)\n* `Ki` kibi (2**10)\n* `Mi` mebi (2**20)\n* `Gi` gibi (2**30)\n* `Ti` tebi (2**40)\n\n**Grammar**\n\nThe grammar includes the dimensionless unit `1`, such as `1/s`.\n\nThe grammar also includes these connectors:\n\n* `/` division (as an infix operator, e.g. `1/s`).\n* `.` multiplication (as an infix operator, e.g. `GBy.d`)\n\nThe grammar for a unit is as follows:\n\n Expression = Component { \".\" Component } { \"/\" Component } ;\n\n Component = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\n Annotation = \"{\" NAME \"}\" ;\n\nNotes:\n\n* `Annotation` is just a comment if it follows a `UNIT` and is\n equivalent to `1` if it is used alone. For examples,\n `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.\n* `NAME` is a sequence of non-blank printable ASCII characters not\n containing '{' or '}'.", + "description": "A human-readable description for the label.", "type": "string" }, - "labels": { - "description": "The set of labels that can be used to describe a specific\ninstance of this metric type. For example, the\n`appengine.googleapis.com/http/server/response_latencies` metric\ntype has a label for the HTTP response code, `response_code`, so\nyou can look at latencies for successful responses or just\nfor responses that failed.", - "type": "array", - "items": { - "$ref": "LabelDescriptor" - } - }, - "metricKind": { - "description": "Whether the metric records instantaneous values, changes to a value, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", - "enum": [ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE" - ], + "valueType": { + "description": "The type of data that can be assigned to the label.", + "type": "string", "enumDescriptions": [ - "Do not use this default value.", - "An instantaneous measurement of a value.", - "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative\nmeasurements in a time series should have the same start time\nand increasing end times, until an event resets the cumulative\nvalue to zero and sets a new start time for the following\npoints." + "A variable-length string. This is the default.", + "Boolean; true or false.", + "A 64-bit signed integer." ], - "type": "string" - }, - "valueType": { - "description": "Whether the measurement is an integer, a floating-point number, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ - "VALUE_TYPE_UNSPECIFIED", - "BOOL", - "INT64", - "DOUBLE", "STRING", - "DISTRIBUTION", - "MONEY" - ], - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean.\nThis value type can be used only if the metric kind is `GAUGE`.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string.\nThis value type can be used only if the metric kind is `GAUGE`.", - "The value is a `Distribution`.", - "The value is money." - ], - "type": "string" + "BOOL", + "INT64" + ] }, - "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces.\nUse sentence case without an ending period, for example \"Request count\".", + "key": { + "description": "The label key.", + "type": "string" + } + }, + "id": "LabelDescriptor" + }, + "Diagnostic": { + "description": "Represents a diagnostic message (error or warning)", + "type": "object", + "properties": { + "location": { + "description": "File name and line number of the error or warning.", "type": "string" }, - "name": { - "description": "The resource name of the metric descriptor. Depending on the\nimplementation, the name typically includes: (1) the parent resource name\nthat defines the scope of the metric type or of its data; and (2) the\nmetric's URL-encoded type, which also appears in the `type` field of this\ndescriptor. For example, following is the resource name of a custom\nmetric within the GCP project `my-project-id`:\n\n \"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"", + "kind": { + "enumDescriptions": [ + "Warnings and errors", + "Only errors" + ], + "enum": [ + "WARNING", + "ERROR" + ], + "description": "The kind of diagnostic information provided.", "type": "string" }, - "type": { - "description": "The metric type, including its DNS name prefix. The type is not\nURL-encoded. All user-defined custom metric types have the DNS name\n`custom.googleapis.com`. Metric types should use a natural hierarchical\ngrouping. For example:\n\n \"custom.googleapis.com/invoice/paid/amount\"\n \"appengine.googleapis.com/http/server/response_latencies\"", + "message": { + "description": "Message describing the error or warning.", "type": "string" } }, - "id": "MetricDescriptor" + "id": "Diagnostic" }, "EnableServiceRequest": { "description": "Request message for EnableService method.", @@ -1668,580 +1245,518 @@ }, "id": "EnableServiceRequest" }, - "DocumentationRule": { - "description": "A documentation rule provides information about individual API elements.", + "Type": { + "description": "A protocol buffer message type.", "type": "object", "properties": { - "description": { - "description": "Description of the selected API(s).", - "type": "string" + "fields": { + "description": "The list of fields.", + "type": "array", + "items": { + "$ref": "Field" + } }, - "deprecationDescription": { - "description": "Deprecation description of the selected element(s). It can be provided if an\nelement is marked as `deprecated`.", + "name": { + "description": "The fully qualified message name.", "type": "string" }, - "selector": { - "description": "The selector is a comma-separated list of patterns. Each pattern is a\nqualified name of the element which may end in \"*\", indicating a wildcard.\nWildcards are only allowed at the end and for a whole component of the\nqualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". To\nspecify a default for all applicable elements, the whole pattern \"*\"\nis used.", - "type": "string" - } - }, - "id": "DocumentationRule" - }, - "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "type": "object", - "properties": { - "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, a default\nmask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", - "type": "string", - "format": "google-fieldmask" + "oneofs": { + "description": "The list of types appearing in `oneof` definitions in this type.", + "type": "array", + "items": { + "type": "string" + } }, - "policy": { - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", - "$ref": "Policy" - } - }, - "id": "SetIamPolicyRequest" - }, - "CounterOptions": { - "description": "Options for counters", - "type": "object", - "properties": { - "metric": { - "description": "The metric to update.", - "type": "string" + "sourceContext": { + "$ref": "SourceContext", + "description": "The source context." }, - "field": { - "description": "The field value to attribute.", + "syntax": { + "enumDescriptions": [ + "Syntax `proto2`.", + "Syntax `proto3`." + ], + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax.", "type": "string" + }, + "options": { + "description": "The protocol buffer options.", + "type": "array", + "items": { + "$ref": "Option" + } } }, - "id": "CounterOptions" + "id": "Type" }, - "Condition": { - "description": "A condition to be met.", + "GenerateConfigReportResponse": { + "description": "Response message for GenerateConfigReport method.", "type": "object", "properties": { - "sys": { - "description": "Trusted attributes supplied by any service that owns resources and uses\nthe IAM system for access control.", - "enum": [ - "NO_ATTR", - "REGION", - "SERVICE", - "NAME", - "IP" - ], - "enumDescriptions": [ - "Default non-attribute type", - "Region of the resource", - "Service name", - "Resource name", - "IP address of the caller" - ], - "type": "string" - }, - "values": { - "description": "The objects of the condition. This is mutually exclusive with 'value'.", + "diagnostics": { + "description": "Errors / Linter warnings associated with the service definition this\nreport\nbelongs to.", "type": "array", "items": { - "type": "string" + "$ref": "Diagnostic" } }, - "iam": { - "description": "Trusted attributes supplied by the IAM system.", - "enum": [ - "NO_ATTR", - "AUTHORITY", - "ATTRIBUTION" - ], - "enumDescriptions": [ - "Default non-attribute.", - "Either principal or (if present) authority selector.", - "The principal (even if an authority selector is present), which\nmust only be used for attribution, not authorization." - ], - "type": "string" - }, - "op": { - "description": "An operator to apply the subject with.", - "enum": [ - "NO_OP", - "EQUALS", - "NOT_EQUALS", - "IN", - "NOT_IN", - "DISCHARGED" - ], - "enumDescriptions": [ - "Default no-op.", - "DEPRECATED. Use IN instead.", - "DEPRECATED. Use NOT_IN instead.", - "Set-inclusion check.", - "Set-exclusion check.", - "Subject is discharged" - ], + "serviceName": { + "description": "Name of the service this report belongs to.", "type": "string" }, - "value": { - "description": "DEPRECATED. Use 'values' instead.", - "type": "string" + "changeReports": { + "description": "list of ChangeReport, each corresponding to comparison between two\nservice configurations.", + "type": "array", + "items": { + "$ref": "ChangeReport" + } }, - "svc": { - "description": "Trusted attributes discharged by the service.", + "id": { + "description": "ID of the service configuration this report belongs to.", "type": "string" } }, - "id": "Condition" + "id": "GenerateConfigReportResponse" }, - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "ListServiceConfigsResponse": { + "description": "Response message for ListServiceConfigs method.", "type": "object", "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" - }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "serviceConfigs": { + "description": "The list of service configuration resources.", "type": "array", "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "$ref": "Service" } }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "nextPageToken": { + "description": "The token of the next page of results.", "type": "string" } }, - "id": "Status" + "id": "ListServiceConfigsResponse" }, - "Endpoint": { - "description": "`Endpoint` describes a network endpoint that serves a set of APIs.\nA service may expose any number of endpoints, and all endpoints share the\nsame service configuration, such as quota configuration and monitoring\nconfiguration.\n\nExample service configuration:\n\n name: library-example.googleapis.com\n endpoints:\n # Below entry makes 'google.example.library.v1.Library'\n # API be served from endpoint address library-example.googleapis.com.\n # It also allows HTTP OPTIONS calls to be passed to the backend, for\n # it to decide whether the subsequent cross-origin request is\n # allowed to proceed.\n - name: library-example.googleapis.com\n allow_cors: true", + "Experimental": { + "description": "Experimental service configuration. These configuration options can\nonly be used by whitelisted users.", "type": "object", "properties": { - "apis": { - "description": "The list of APIs served by this endpoint.", + "authorization": { + "description": "Authorization configuration.", + "$ref": "AuthorizationConfig" + } + }, + "id": "Experimental" + }, + "Backend": { + "description": "`Backend` defines the backend configuration for a service.", + "type": "object", + "properties": { + "rules": { + "description": "A list of API backend rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", "type": "array", "items": { - "type": "string" + "$ref": "BackendRule" } - }, - "allowCors": { - "description": "Allowing\n[CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka\ncross-domain traffic, would allow the backends served from this endpoint to\nreceive and respond to HTTP OPTIONS requests. The response will be used by\nthe browser to determine whether the subsequent cross-origin request is\nallowed to proceed.", - "type": "boolean" - }, - "name": { - "description": "The canonical name of this endpoint.", - "type": "string" - }, - "aliases": { - "description": "DEPRECATED: This field is no longer supported. Instead of using aliases,\nplease specify multiple google.api.Endpoint for each of the intented\nalias.\n\nAdditional names that this endpoint will be hosted on.", + } + }, + "id": "Backend" + }, + "AuditConfig": { + "description": "Specifies the audit configuration for a service.\nIt consists of which permission types are logged, and what identities, if\nany, are exempted from logging.\nAn AuditConifg must have one or more AuditLogConfigs.", + "type": "object", + "properties": { + "auditLogConfigs": { + "description": "The configuration for logging of each type of permission.\nNext ID: 4", "type": "array", "items": { - "type": "string" + "$ref": "AuditLogConfig" } }, - "features": { - "description": "The list of features enabled on this endpoint.", + "exemptedMembers": { + "description": "Specifies the identities that are exempted from \"data access\" audit\nlogging for the `service` specified above.\nFollows the same format of Binding.members.\nThis field is deprecated in favor of per-permission-type exemptions.", "type": "array", "items": { "type": "string" } + }, + "service": { + "description": "Specifies a service that will be enabled for audit logging.\nFor example, `resourcemanager`, `storage`, `compute`.\n`allServices` is a special value that covers all services.", + "type": "string" } }, - "id": "Endpoint" + "id": "AuditConfig" }, - "Page": { - "description": "Represents a documentation page. A page can contain subpages to represent\nnested documentation set structure.", + "SubmitConfigSourceRequest": { + "description": "Request message for SubmitConfigSource method.", "type": "object", "properties": { - "subpages": { - "description": "Subpages of this page. The order of subpages specified here will be\nhonored in the generated docset.", - "type": "array", - "items": { - "$ref": "Page" - } - }, - "content": { - "description": "The Markdown content of the page. You can use \u003ccode\u003e(== include {path} ==)\u003c/code\u003e\nto include content from a Markdown file.", - "type": "string" + "configSource": { + "$ref": "ConfigSource", + "description": "The source configuration for the service." }, - "name": { - "description": "The name of the page. It will be used as an identity of the page to\ngenerate URI of the page, text of the link to this page in navigation,\netc. The full page name (start from the root page name to this page\nconcatenated with `.`) can be used as reference to the page in your\ndocumentation. For example:\n\u003cpre\u003e\u003ccode\u003epages:\n- name: Tutorial\n content: (== include tutorial.md ==)\n subpages:\n - name: Java\n content: (== include tutorial_java.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nYou can reference `Java` page using Markdown reference link syntax:\n`Java`.", - "type": "string" + "validateOnly": { + "description": "Optional. If set, this will result in the generation of a\n`google.api.Service` configuration based on the `ConfigSource` provided,\nbut the generated config and the sources will NOT be persisted.", + "type": "boolean" } }, - "id": "Page" + "id": "SubmitConfigSourceRequest" }, - "CustomErrorRule": { - "description": "A custom error rule.", + "DocumentationRule": { + "description": "A documentation rule provides information about individual API elements.", "type": "object", "properties": { - "isErrorType": { - "description": "Mark this message as possible payload in error response. Otherwise,\nobjects of this type will be filtered when they appear in error payload.", - "type": "boolean" + "description": { + "description": "Description of the selected API(s).", + "type": "string" + }, + "deprecationDescription": { + "description": "Deprecation description of the selected element(s). It can be provided if an\nelement is marked as `deprecated`.", + "type": "string" }, "selector": { - "description": "Selects messages to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "The selector is a comma-separated list of patterns. Each pattern is a\nqualified name of the element which may end in \"*\", indicating a wildcard.\nWildcards are only allowed at the end and for a whole component of the\nqualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". To\nspecify a default for all applicable elements, the whole pattern \"*\"\nis used.", "type": "string" } }, - "id": "CustomErrorRule" + "id": "DocumentationRule" }, - "Option": { - "description": "A protocol buffer option, which can be attached to a message, field,\nenumeration, etc.", + "AuthorizationConfig": { + "description": "Configuration of authorization.\n\nThis section determines the authorization provider, if unspecified, then no\nauthorization check will be done.\n\nExample:\n\n experimental:\n authorization:\n provider: firebaserules.googleapis.com", "type": "object", "properties": { - "value": { - "description": "The option's value packed in an Any message. If the value is a primitive,\nthe corresponding wrapper type defined in google/protobuf/wrappers.proto\nshould be used. If the value is an enum, it should be stored as an int32\nvalue using the google.protobuf.Int32Value type.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "name": { - "description": "The option's name. For protobuf built-in options (options defined in\ndescriptor.proto), this is the short name. For example, `\"map_entry\"`.\nFor custom options, it should be the fully-qualified name. For example,\n`\"google.api.http\"`.", + "provider": { + "description": "The name of the authorization provider, such as\nfirebaserules.googleapis.com.", "type": "string" } }, - "id": "Option" + "id": "AuthorizationConfig" }, - "HttpRule": { - "description": "`HttpRule` defines the mapping of an RPC method to one or more HTTP\nREST APIs. The mapping determines what portions of the request\nmessage are populated from the path, query parameters, or body of\nthe HTTP request. The mapping is typically specified as an\n`google.api.http` annotation, see \"google/api/annotations.proto\"\nfor details.\n\nThe mapping consists of a field specifying the path template and\nmethod kind. The path template can refer to fields in the request\nmessage, as in the example below which describes a REST GET\noperation on a resource collection of messages:\n\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http).get = \"/v1/messages/{message_id}/{sub.subfield}\";\n }\n }\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // mapped to the URL\n SubMessage sub = 2; // `sub.subfield` is url-mapped\n }\n message Message {\n string text = 1; // content of the resource\n }\n\nThe same http annotation can alternatively be expressed inside the\n`GRPC API Configuration` YAML file.\n\n http:\n rules:\n - selector: \u003cproto_package_name\u003e.Messaging.GetMessage\n get: /v1/messages/{message_id}/{sub.subfield}\n\nThis definition enables an automatic, bidrectional mapping of HTTP\nJSON to RPC. Example:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456/foo` | `GetMessage(message_id: \"123456\" sub: SubMessage(subfield: \"foo\"))`\n\nIn general, not only fields but also field paths can be referenced\nfrom a path pattern. Fields mapped to the path pattern cannot be\nrepeated and must have a primitive (non-message) type.\n\nAny fields in the request message which are not bound by the path\npattern automatically become (optional) HTTP query\nparameters. Assume the following definition of the request message:\n\n\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // mapped to the URL\n int64 revision = 2; // becomes a parameter\n SubMessage sub = 3; // `sub.subfield` becomes a parameter\n }\n\n\nThis enables a HTTP JSON to RPC mapping as below:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))`\n\nNote that fields which are mapped to HTTP parameters must have a\nprimitive type or a repeated primitive type. Message types are not\nallowed. In the case of a repeated type, the parameter can be\nrepeated in the URL, as in `...?param=A¶m=B`.\n\nFor HTTP method kinds which allow a request body, the `body` field\nspecifies the mapping. Consider a REST update method on the\nmessage resource collection:\n\n\n service Messaging {\n rpc UpdateMessage(UpdateMessageRequest) returns (Message) {\n option (google.api.http) = {\n put: \"/v1/messages/{message_id}\"\n body: \"message\"\n };\n }\n }\n message UpdateMessageRequest {\n string message_id = 1; // mapped to the URL\n Message message = 2; // mapped to the body\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled, where the\nrepresentation of the JSON in the request body is determined by\nprotos JSON encoding:\n\nHTTP | RPC\n-----|-----\n`PUT /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })`\n\nThe special name `*` can be used in the body mapping to define that\nevery field not bound by the path template should be mapped to the\nrequest body. This enables the following alternative definition of\nthe update method:\n\n service Messaging {\n rpc UpdateMessage(Message) returns (Message) {\n option (google.api.http) = {\n put: \"/v1/messages/{message_id}\"\n body: \"*\"\n };\n }\n }\n message Message {\n string message_id = 1;\n string text = 2;\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled:\n\nHTTP | RPC\n-----|-----\n`PUT /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" text: \"Hi!\")`\n\nNote that when using `*` in the body mapping, it is not possible to\nhave HTTP parameters, as all fields not bound by the path end in\nthe body. This makes this option more rarely used in practice of\ndefining REST APIs. The common usage of `*` is in custom methods\nwhich don't use the URL at all for transferring data.\n\nIt is possible to define multiple HTTP methods for one RPC by using\nthe `additional_bindings` option. Example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/messages/{message_id}\"\n additional_bindings {\n get: \"/v1/users/{user_id}/messages/{message_id}\"\n }\n };\n }\n }\n message GetMessageRequest {\n string message_id = 1;\n string user_id = 2;\n }\n\n\nThis enables the following two alternative HTTP JSON to RPC\nmappings:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")`\n`GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id: \"123456\")`\n\n# Rules for HTTP mapping\n\nThe rules for mapping HTTP path, query parameters, and body fields\nto the request message are as follows:\n\n1. The `body` field specifies either `*` or a field path, or is\n omitted. If omitted, it assumes there is no HTTP body.\n2. Leaf fields (recursive expansion of nested messages in the\n request) can be classified into three types:\n (a) Matched in the URL template.\n (b) Covered by body (if body is `*`, everything except (a) fields;\n else everything under the body field)\n (c) All other fields.\n3. URL query parameters found in the HTTP request are mapped to (c) fields.\n4. Any body sent with an HTTP request can contain only (b) fields.\n\nThe syntax of the path template is as follows:\n\n Template = \"/\" Segments [ Verb ] ;\n Segments = Segment { \"/\" Segment } ;\n Segment = \"*\" | \"**\" | LITERAL | Variable ;\n Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ;\n FieldPath = IDENT { \".\" IDENT } ;\n Verb = \":\" LITERAL ;\n\nThe syntax `*` matches a single path segment. It follows the semantics of\n[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String\nExpansion.\n\nThe syntax `**` matches zero or more path segments. It follows the semantics\nof [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved\nExpansion. NOTE: it must be the last segment in the path except the Verb.\n\nThe syntax `LITERAL` matches literal text in the URL path.\n\nThe syntax `Variable` matches the entire path as specified by its template;\nthis nested template must not contain further variables. If a variable\nmatches a single path segment, its template may be omitted, e.g. `{var}`\nis equivalent to `{var=*}`.\n\nNOTE: the field paths in variables and in the `body` must not refer to\nrepeated fields or map fields.\n\nUse CustomHttpPattern to specify any HTTP method that is not included in the\n`pattern` field, such as HEAD, or \"*\" to leave the HTTP method unspecified for\na given URL path rule. The wild-card rule is useful for services that provide\ncontent to Web (HTML) clients.", + "ContextRule": { + "description": "A context rule provides information about the context for an individual API\nelement.", "type": "object", "properties": { - "custom": { - "description": "Custom pattern is used for defining custom verbs.", - "$ref": "CustomHttpPattern" - }, - "responseBody": { - "description": "The name of the response field whose value is mapped to the HTTP body of\nresponse. Other response fields are ignored. This field is optional. When\nnot set, the response message will be used as HTTP body of response.\nNOTE: the referred field must be not a repeated field and must be present\nat the top-level of response message type.", + "selector": { + "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", "type": "string" }, - "additionalBindings": { - "description": "Additional HTTP bindings for the selector. Nested bindings must\nnot contain an `additional_bindings` field themselves (that is,\nthe nesting may only be one level deep).", + "provided": { + "description": "A list of full type names of provided contexts.", "type": "array", "items": { - "$ref": "HttpRule" + "type": "string" } }, - "mediaDownload": { - "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", - "$ref": "MediaDownload" - }, - "body": { - "description": "The name of the request field whose value is mapped to the HTTP body, or\n`*` for mapping all fields not captured by the path pattern to the HTTP\nbody. NOTE: the referred field must not be a repeated field and must be\npresent at the top-level of request message type.", + "requested": { + "description": "A list of full type names of requested contexts.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "ContextRule" + }, + "CloudAuditOptions": { + "description": "Write a Cloud Audit log", + "type": "object", + "properties": {}, + "id": "CloudAuditOptions" + }, + "MetricDescriptor": { + "description": "Defines a metric type and its schema. Once a metric descriptor is created,\ndeleting or altering it stops data collection and makes the metric type's\nexisting data unusable.", + "type": "object", + "properties": { + "displayName": { + "description": "A concise name for the metric, which can be displayed in user interfaces.\nUse sentence case without an ending period, for example \"Request count\".", "type": "string" }, - "put": { - "description": "Used for updating a resource.", + "description": { + "description": "A detailed description of the metric, which can be used in documentation.", "type": "string" }, - "get": { - "description": "Used for listing and getting information about resources.", + "unit": { + "description": "The unit in which the metric value is reported. It is only applicable\nif the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The\nsupported units are a subset of [The Unified Code for Units of\nMeasure](http://unitsofmeasure.org/ucum.html) standard:\n\n**Basic units (UNIT)**\n\n* `bit` bit\n* `By` byte\n* `s` second\n* `min` minute\n* `h` hour\n* `d` day\n\n**Prefixes (PREFIX)**\n\n* `k` kilo (10**3)\n* `M` mega (10**6)\n* `G` giga (10**9)\n* `T` tera (10**12)\n* `P` peta (10**15)\n* `E` exa (10**18)\n* `Z` zetta (10**21)\n* `Y` yotta (10**24)\n* `m` milli (10**-3)\n* `u` micro (10**-6)\n* `n` nano (10**-9)\n* `p` pico (10**-12)\n* `f` femto (10**-15)\n* `a` atto (10**-18)\n* `z` zepto (10**-21)\n* `y` yocto (10**-24)\n* `Ki` kibi (2**10)\n* `Mi` mebi (2**20)\n* `Gi` gibi (2**30)\n* `Ti` tebi (2**40)\n\n**Grammar**\n\nThe grammar includes the dimensionless unit `1`, such as `1/s`.\n\nThe grammar also includes these connectors:\n\n* `/` division (as an infix operator, e.g. `1/s`).\n* `.` multiplication (as an infix operator, e.g. `GBy.d`)\n\nThe grammar for a unit is as follows:\n\n Expression = Component { \".\" Component } { \"/\" Component } ;\n\n Component = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\n Annotation = \"{\" NAME \"}\" ;\n\nNotes:\n\n* `Annotation` is just a comment if it follows a `UNIT` and is\n equivalent to `1` if it is used alone. For examples,\n `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.\n* `NAME` is a sequence of non-blank printable ASCII characters not\n containing '{' or '}'.", "type": "string" }, - "selector": { - "description": "Selects methods to which this rule applies.\n\nRefer to selector for syntax details.", - "type": "string" + "labels": { + "description": "The set of labels that can be used to describe a specific\ninstance of this metric type. For example, the\n`appengine.googleapis.com/http/server/response_latencies` metric\ntype has a label for the HTTP response code, `response_code`, so\nyou can look at latencies for successful responses or just\nfor responses that failed.", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } }, - "post": { - "description": "Used for creating a resource.", + "name": { + "description": "The resource name of the metric descriptor. Depending on the\nimplementation, the name typically includes: (1) the parent resource name\nthat defines the scope of the metric type or of its data; and (2) the\nmetric's URL-encoded type, which also appears in the `type` field of this\ndescriptor. For example, following is the resource name of a custom\nmetric within the GCP project `my-project-id`:\n\n \"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"", "type": "string" }, - "patch": { - "description": "Used for updating a resource.", + "type": { + "description": "The metric type, including its DNS name prefix. The type is not\nURL-encoded. All user-defined custom metric types have the DNS name\n`custom.googleapis.com`. Metric types should use a natural hierarchical\ngrouping. For example:\n\n \"custom.googleapis.com/invoice/paid/amount\"\n \"appengine.googleapis.com/http/server/response_latencies\"", "type": "string" }, - "delete": { - "description": "Used for deleting a resource.", - "type": "string" + "valueType": { + "description": "Whether the measurement is an integer, a floating-point number, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "The value is a boolean.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a signed 64-bit integer.", + "The value is a double precision floating point number.", + "The value is a text string.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a `Distribution`.", + "The value is money." + ], + "enum": [ + "VALUE_TYPE_UNSPECIFIED", + "BOOL", + "INT64", + "DOUBLE", + "STRING", + "DISTRIBUTION", + "MONEY" + ] }, - "mediaUpload": { - "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", - "$ref": "MediaUpload" + "metricKind": { + "description": "Whether the metric records instantaneous values, changes to a value, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "An instantaneous measurement of a value.", + "The change in a value during a time interval.", + "A value accumulated over a time interval. Cumulative\nmeasurements in a time series should have the same start time\nand increasing end times, until an event resets the cumulative\nvalue to zero and sets a new start time for the following\npoints." + ], + "enum": [ + "METRIC_KIND_UNSPECIFIED", + "GAUGE", + "DELTA", + "CUMULATIVE" + ] } }, - "id": "HttpRule" + "id": "MetricDescriptor" }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", + "SourceContext": { + "description": "`SourceContext` represents information about the source of a\nprotobuf element, like the file in which it is defined.", "type": "object", "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "array", - "items": { - "type": "string" - } + "fileName": { + "description": "The path-qualified name of the .proto file that contained the associated\nprotobuf element. For example: `\"google/protobuf/source_context.proto\"`.", + "type": "string" } }, - "id": "TestIamPermissionsRequest" + "id": "SourceContext" }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", + "ListServicesResponse": { + "description": "Response message for `ListServices` method.", "type": "object", "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "services": { + "description": "The returned services will only have the name field set.", "type": "array", "items": { - "type": "string" + "$ref": "ManagedService" } + }, + "nextPageToken": { + "description": "Token that can be passed to `ListServices` to resume a paginated query.", + "type": "string" } }, - "id": "TestIamPermissionsResponse" + "id": "ListServicesResponse" }, - "CustomError": { - "description": "Customize service error responses. For example, list any service\nspecific protobuf types that can appear in error detail lists of\nerror responses.\n\nExample:\n\n custom_error:\n types:\n - google.foo.v1.CustomError\n - google.foo.v1.AnotherError", + "Endpoint": { + "description": "`Endpoint` describes a network endpoint that serves a set of APIs.\nA service may expose any number of endpoints, and all endpoints share the\nsame service configuration, such as quota configuration and monitoring\nconfiguration.\n\nExample service configuration:\n\n name: library-example.googleapis.com\n endpoints:\n # Below entry makes 'google.example.library.v1.Library'\n # API be served from endpoint address library-example.googleapis.com.\n # It also allows HTTP OPTIONS calls to be passed to the backend, for\n # it to decide whether the subsequent cross-origin request is\n # allowed to proceed.\n - name: library-example.googleapis.com\n allow_cors: true", "type": "object", "properties": { - "types": { - "description": "The list of custom error detail types, e.g. 'google.foo.v1.CustomError'.", + "aliases": { + "description": "DEPRECATED: This field is no longer supported. Instead of using aliases,\nplease specify multiple google.api.Endpoint for each of the intented\nalias.\n\nAdditional names that this endpoint will be hosted on.", "type": "array", "items": { "type": "string" } }, - "rules": { - "description": "The list of custom error rules that apply to individual API messages.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "features": { + "description": "The list of features enabled on this endpoint.", "type": "array", "items": { - "$ref": "CustomErrorRule" + "type": "string" + } + }, + "allowCors": { + "description": "Allowing\n[CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka\ncross-domain traffic, would allow the backends served from this endpoint to\nreceive and respond to HTTP OPTIONS requests. The response will be used by\nthe browser to determine whether the subsequent cross-origin request is\nallowed to proceed.", + "type": "boolean" + }, + "name": { + "description": "The canonical name of this endpoint.", + "type": "string" + }, + "apis": { + "description": "The list of APIs served by this endpoint.", + "type": "array", + "items": { + "type": "string" } } }, - "id": "CustomError" + "id": "Endpoint" }, - "MediaDownload": { - "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", + "OAuthRequirements": { + "description": "OAuth scopes are a way to define data and permissions on data. For example,\nthere are scopes defined for \"Read-only access to Google Calendar\" and\n\"Access to Cloud Platform\". Users can consent to a scope for an application,\ngiving it permission to access that data on their behalf.\n\nOAuth scope specifications should be fairly coarse grained; a user will need\nto see and understand the text description of what your scope means.\n\nIn most cases: use one or at most two OAuth scopes for an entire family of\nproducts. If your product has multiple APIs, you should probably be sharing\nthe OAuth scope across all of those APIs.\n\nWhen you need finer grained OAuth consent screens: talk with your product\nmanagement about how developers will use them in practice.\n\nPlease note that even though each of the canonical scopes is enough for a\nrequest to be accepted and passed to the backend, a request can still fail\ndue to the backend requiring additional scopes or permissions.", "type": "object", "properties": { - "enabled": { - "description": "Whether download is enabled.", - "type": "boolean" + "canonicalScopes": { + "description": "The list of publicly documented OAuth scopes that are allowed access. An\nOAuth token containing any of these scopes will be accepted.\n\nExample:\n\n canonical_scopes: https://www.googleapis.com/auth/calendar,\n https://www.googleapis.com/auth/calendar.read", + "type": "string" } }, - "id": "MediaDownload" + "id": "OAuthRequirements" }, - "SubmitConfigSourceRequest": { - "description": "Request message for SubmitConfigSource method.", + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", "type": "object", "properties": { - "configSource": { - "description": "The source configuration for the service.", - "$ref": "ConfigSource" - }, - "validateOnly": { - "description": "Optional. If set, this will result in the generation of a\n`google.api.Service` configuration based on the `ConfigSource` provided,\nbut the generated config and the sources will NOT be persisted.", - "type": "boolean" + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } } }, - "id": "SubmitConfigSourceRequest" + "id": "TestIamPermissionsResponse" }, - "AuthenticationRule": { - "description": "Authentication rules for the service.\n\nBy default, if a method has any authentication requirements, every request\nmust include a valid credential matching one of the requirements.\nIt's an error to include more than one kind of credential in a single\nrequest.\n\nIf a method doesn't have any auth requirements, request credentials will be\nignored.", + "GetIamPolicyRequest": { + "description": "Request message for `GetIamPolicy` method.", + "type": "object", + "properties": {}, + "id": "GetIamPolicyRequest" + }, + "Usage": { + "description": "Configuration controlling usage of a service.", "type": "object", "properties": { - "oauth": { - "description": "The requirements for OAuth credentials.", - "$ref": "OAuthRequirements" - }, - "allowWithoutCredential": { - "description": "Whether to allow requests without a credential. The credential can be\nan OAuth token, Google cookies (first-party auth) or EndUserCreds.\n\nFor requests without credentials, if the service control environment is\nspecified, each incoming request **must** be associated with a service\nconsumer. This can be done by passing an API key that belongs to a consumer\nproject.", - "type": "boolean" - }, "requirements": { - "description": "Requirements for additional authentication providers.", + "description": "Requirements that must be satisfied before a consumer project can use the\nservice. Each requirement is of the form \u003cservice.name\u003e/\u003crequirement-id\u003e;\nfor example 'serviceusage.googleapis.com/billing-enabled'.", "type": "array", "items": { - "$ref": "AuthRequirement" + "type": "string" } }, - "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "producerNotificationChannel": { + "description": "The full resource name of a channel used for sending notifications to the\nservice producer.\n\nGoogle Service Management currently only supports\n[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification\nchannel. To use Google Cloud Pub/Sub as the channel, this must be the name\nof a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format\ndocumented in https://cloud.google.com/pubsub/docs/overview.", "type": "string" + }, + "rules": { + "description": "A list of usage rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "UsageRule" + } } }, - "id": "AuthenticationRule" + "id": "Usage" }, - "Logging": { - "description": "Logging configuration of the service.\n\nThe following example shows how to configure logs to be sent to the\nproducer and consumer projects. In the example, the `activity_history`\nlog is sent to both the producer and consumer projects, whereas the\n`purchase_history` log is only sent to the producer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n logs:\n - name: activity_history\n labels:\n - key: /customer_id\n - name: purchase_history\n logging:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history\n - purchase_history\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history", + "Context": { + "description": "`Context` defines which contexts an API requests.\n\nExample:\n\n context:\n rules:\n - selector: \"*\"\n requested:\n - google.rpc.context.ProjectContext\n - google.rpc.context.OriginContext\n\nThe above specifies that all methods in the API request\n`google.rpc.context.ProjectContext` and\n`google.rpc.context.OriginContext`.\n\nAvailable context types are defined in package\n`google.rpc.context`.", "type": "object", "properties": { - "producerDestinations": { - "description": "Logging configurations for sending logs to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none producer destination.", - "type": "array", - "items": { - "$ref": "LoggingDestination" - } - }, - "consumerDestinations": { - "description": "Logging configurations for sending logs to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none consumer destination.", + "rules": { + "description": "A list of RPC context rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", "type": "array", "items": { - "$ref": "LoggingDestination" + "$ref": "ContextRule" } } }, - "id": "Logging" + "id": "Context" }, - "SystemParameter": { - "description": "Define a parameter's name and location. The parameter may be passed as either\nan HTTP header or a URL query parameter, and if both are passed the behavior\nis implementation-dependent.", + "Rule": { + "description": "A rule to be applied in a Policy.", "type": "object", "properties": { - "urlQueryParameter": { - "description": "Define the URL query parameter name to use for the parameter. It is case\nsensitive.", - "type": "string" - }, - "name": { - "description": "Define the name of the parameter, such as \"api_key\" . It is case sensitive.", - "type": "string" + "logConfig": { + "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries\nthat match the LOG action.", + "type": "array", + "items": { + "$ref": "LogConfig" + } }, - "httpHeader": { - "description": "Define the HTTP header name to use for the parameter. It is case\ninsensitive.", - "type": "string" - } - }, - "id": "SystemParameter" - }, - "Enum": { - "description": "Enum type definition.", - "type": "object", - "properties": { - "syntax": { - "description": "The source syntax.", - "enum": [ - "SYNTAX_PROTO2", - "SYNTAX_PROTO3" - ], - "enumDescriptions": [ - "Syntax `proto2`.", - "Syntax `proto3`." - ], - "type": "string" - }, - "enumvalue": { - "description": "Enum value definitions.", + "in": { + "description": "If one or more 'in' clauses are specified, the rule matches if\nthe PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", "type": "array", "items": { - "$ref": "EnumValue" + "type": "string" } }, - "options": { - "description": "Protocol buffer options.", + "permissions": { + "description": "A permission is a string of form '\u003cservice\u003e.\u003cresource type\u003e.\u003cverb\u003e'\n(e.g., 'storage.buckets.list'). A value of '*' matches all permissions,\nand a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", "type": "array", "items": { - "$ref": "Option" + "type": "string" } }, - "sourceContext": { - "description": "The source context.", - "$ref": "SourceContext" - }, - "name": { - "description": "Enum type name.", - "type": "string" - } - }, - "id": "Enum" - }, - "GenerateConfigReportResponse": { - "description": "Response message for GenerateConfigReport method.", - "type": "object", - "properties": { - "serviceName": { - "description": "Name of the service this report belongs to.", - "type": "string" - }, - "id": { - "description": "ID of the service configuration this report belongs to.", + "action": { + "enumDescriptions": [ + "Default no action.", + "Matching 'Entries' grant access.", + "Matching 'Entries' grant access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' deny access.", + "Matching 'Entries' deny access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' tell IAM.Check callers to generate logs." + ], + "enum": [ + "NO_ACTION", + "ALLOW", + "ALLOW_WITH_LOG", + "DENY", + "DENY_WITH_LOG", + "LOG" + ], + "description": "Required", "type": "string" }, - "changeReports": { - "description": "list of ChangeReport, each corresponding to comparison between two\nservice configurations.", - "type": "array", - "items": { - "$ref": "ChangeReport" - } - }, - "diagnostics": { - "description": "Errors / Linter warnings associated with the service definition this\nreport\nbelongs to.", - "type": "array", - "items": { - "$ref": "Diagnostic" - } - } - }, - "id": "GenerateConfigReportResponse" - }, - "DeleteServiceStrategy": { - "description": "Strategy used to delete a service. This strategy is a placeholder only\nused by the system generated rollout to delete a service.", - "type": "object", - "properties": {}, - "id": "DeleteServiceStrategy" - }, - "OperationMetadata": { - "description": "The metadata associated with a long running operation resource.", - "type": "object", - "properties": { - "steps": { - "description": "Detailed status information for each step. The order is undetermined.", + "notIn": { + "description": "If one or more 'not_in' clauses are specified, the rule matches\nif the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.\nThe format for in and not_in entries is the same as for members in a\nBinding (see google/iam/v1/policy.proto).", "type": "array", "items": { - "$ref": "Step" + "type": "string" } }, - "startTime": { - "description": "The start time of the operation.", - "type": "string", - "format": "google-datetime" + "description": { + "description": "Human-readable description of the rule.", + "type": "string" }, - "resourceNames": { - "description": "The full name of the resources that this operation is directly\nassociated with.", + "conditions": { + "description": "Additional restrictions that must be met", "type": "array", "items": { - "type": "string" + "$ref": "Condition" } - }, - "progressPercentage": { - "description": "Percentage of completion of this operation, ranging from 0 to 100.", - "type": "integer", - "format": "int32" - } - }, - "id": "OperationMetadata" - }, - "DisableServiceRequest": { - "description": "Request message for DisableService method.", - "type": "object", - "properties": { - "consumerId": { - "description": "The identity of consumer resource which service disablement will be\napplied to.\n\nThe Google Service Management implementation accepts the following\nforms:\n- \"project:\u003cproject_id\u003e\"\n\nNote: this is made compatible with\ngoogle.api.servicecontrol.v1.Operation.consumer_id.", - "type": "string" } }, - "id": "DisableServiceRequest" + "id": "Rule" }, - "CustomHttpPattern": { - "description": "A custom pattern is used for defining custom HTTP verb.", + "LogConfig": { + "description": "Specifies what kind of log the caller must write\nIncrement a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only,\nand end in \"_count\". Field names should not contain an initial slash.\nThe actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are\ntheir respective values.\n\nAt present the only supported field names are\n - \"iam_principal\", corresponding to IAMContext.principal;\n - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples:\n counter { metric: \"/debug_access_count\" field: \"iam_principal\" }\n ==\u003e increment counter /iam/policy/backend_debug_access_count\n {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support:\n* multiple field names (though this may be supported in the future)\n* decrementing the counter\n* incrementing it by anything other than 1", "type": "object", "properties": { - "path": { - "description": "The path matched by this custom verb.", - "type": "string" + "dataAccess": { + "description": "Data access options.", + "$ref": "DataAccessOptions" }, - "kind": { - "description": "The name of this custom HTTP verb.", - "type": "string" + "cloudAudit": { + "$ref": "CloudAuditOptions", + "description": "Cloud audit options." + }, + "counter": { + "description": "Counter options.", + "$ref": "CounterOptions" } }, - "id": "CustomHttpPattern" + "id": "LogConfig" }, "LogDescriptor": { "description": "A description of a log type. Example in YAML format:\n\n - name: library.googleapis.com/activity_history\n description: The history of borrowing and returning library items.\n display_name: Activity\n labels:\n - key: /customer_id\n description: Identifier of a library customer", @@ -2254,6 +1769,10 @@ "$ref": "LabelDescriptor" } }, + "name": { + "description": "The name of the log. It must be less than 512 characters long and can\ninclude the following characters: upper- and lower-case alphanumeric\ncharacters [A-Za-z0-9], and punctuation characters including\nslash, underscore, hyphen, period [/_-.].", + "type": "string" + }, "description": { "description": "A human-readable description of this log. This information appears in\nthe documentation and can contain details.", "type": "string" @@ -2261,643 +1780,1152 @@ "displayName": { "description": "The human-readable name for this log. This information appears on\nthe user interface and should be concise.", "type": "string" - }, - "name": { - "description": "The name of the log. It must be less than 512 characters long and can\ninclude the following characters: upper- and lower-case alphanumeric\ncharacters [A-Za-z0-9], and punctuation characters including\nslash, underscore, hyphen, period [/_-.].", - "type": "string" } }, "id": "LogDescriptor" }, - "MonitoringDestination": { - "description": "Configuration of a specific monitoring destination (the producer project\nor the consumer project).", + "ConfigFile": { + "description": "Generic specification of a source configuration file", "type": "object", "properties": { - "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "fileContents": { + "description": "The bytes that constitute the file.", + "format": "byte", "type": "string" }, - "metrics": { - "description": "Names of the metrics to report to this monitoring destination.\nEach name must be defined in Service.metrics section.", - "type": "array", - "items": { - "type": "string" - } + "filePath": { + "description": "The file name of the configuration file (full or relative path).", + "type": "string" + }, + "fileType": { + "description": "The type of configuration file this represents.", + "type": "string", + "enumDescriptions": [ + "Unknown file type.", + "YAML-specification of service.", + "OpenAPI specification, serialized in JSON.", + "OpenAPI specification, serialized in YAML.", + "FileDescriptorSet, generated by protoc.\n\nTo generate, use protoc with imports and source info included.\nFor an example test.proto file, the following command would put the value\nin a new file named out.pb.\n\n$protoc --include_imports --include_source_info test.proto -o out.pb" + ], + "enum": [ + "FILE_TYPE_UNSPECIFIED", + "SERVICE_CONFIG_YAML", + "OPEN_API_JSON", + "OPEN_API_YAML", + "FILE_DESCRIPTOR_SET_PROTO" + ] } }, - "id": "MonitoringDestination" + "id": "ConfigFile" }, - "Field": { - "description": "A single field of a message type.", + "MonitoredResourceDescriptor": { + "description": "An object that describes the schema of a MonitoredResource object using a\ntype name and a set of labels. For example, the monitored resource\ndescriptor for Google Compute Engine VM instances has a type of\n`\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and\n`\"zone\"` to identify particular VM instances.\n\nDifferent APIs can support different monitored resource types. APIs generally\nprovide a `list` method that returns the monitored resource descriptors used\nby the API.", "type": "object", "properties": { - "defaultValue": { - "description": "The string value of the default value of this field. Proto2 syntax only.", + "displayName": { + "description": "Optional. A concise name for the monitored resource type that might be\ndisplayed in user interfaces. It should be a Title Cased Noun Phrase,\nwithout any article or other determiners. For example,\n`\"Google Cloud SQL Database\"`.", "type": "string" }, - "jsonName": { - "description": "The field JSON name.", + "description": { + "description": "Optional. A detailed description of the monitored resource type that might\nbe used in documentation.", "type": "string" }, - "options": { - "description": "The protocol buffer options.", + "type": { + "description": "Required. The monitored resource type. For example, the type\n`\"cloudsql_database\"` represents databases in Google Cloud SQL.\nThe maximum length of this value is 256 characters.", + "type": "string" + }, + "labels": { + "description": "Required. A set of labels used to describe instances of this monitored\nresource type. For example, an individual Google Cloud SQL database is\nidentified by values for the labels `\"database_id\"` and `\"zone\"`.", "type": "array", "items": { - "$ref": "Option" + "$ref": "LabelDescriptor" } }, - "oneofIndex": { - "description": "The index of the field type in `Type.oneofs`, for message or enumeration\ntypes. The first type has index 1; zero means the type is not in the list.", - "type": "integer", - "format": "int32" - }, - "cardinality": { - "description": "The field cardinality.", - "enum": [ - "CARDINALITY_UNKNOWN", - "CARDINALITY_OPTIONAL", - "CARDINALITY_REQUIRED", - "CARDINALITY_REPEATED" - ], - "enumDescriptions": [ - "For fields with unknown cardinality.", - "For optional fields.", - "For required fields. Proto2 syntax only.", - "For repeated fields." - ], - "type": "string" - }, - "typeUrl": { - "description": "The field type URL, without the scheme, for message or enumeration\ntypes. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", - "type": "string" - }, "name": { - "description": "The field name.", + "description": "Optional. The resource name of the monitored resource descriptor:\n`\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where\n{type} is the value of the `type` field in this object and\n{project_id} is a project ID that provides API-specific context for\naccessing the type. APIs that do not use project information can use the\nresource name format `\"monitoredResourceDescriptors/{type}\"`.", "type": "string" + } + }, + "id": "MonitoredResourceDescriptor" + }, + "CustomErrorRule": { + "description": "A custom error rule.", + "type": "object", + "properties": { + "isErrorType": { + "description": "Mark this message as possible payload in error response. Otherwise,\nobjects of this type will be filtered when they appear in error payload.", + "type": "boolean" }, - "packed": { - "description": "Whether to use alternative packed wire representation.", - "type": "boolean" - }, - "number": { - "description": "The field number.", - "type": "integer", - "format": "int32" - }, - "kind": { - "description": "The field type.", - "enum": [ - "TYPE_UNKNOWN", - "TYPE_DOUBLE", - "TYPE_FLOAT", - "TYPE_INT64", - "TYPE_UINT64", - "TYPE_INT32", - "TYPE_FIXED64", - "TYPE_FIXED32", - "TYPE_BOOL", - "TYPE_STRING", - "TYPE_GROUP", - "TYPE_MESSAGE", - "TYPE_BYTES", - "TYPE_UINT32", - "TYPE_ENUM", - "TYPE_SFIXED32", - "TYPE_SFIXED64", - "TYPE_SINT32", - "TYPE_SINT64" - ], - "enumDescriptions": [ - "Field type unknown.", - "Field type double.", - "Field type float.", - "Field type int64.", - "Field type uint64.", - "Field type int32.", - "Field type fixed64.", - "Field type fixed32.", - "Field type bool.", - "Field type string.", - "Field type group. Proto2 syntax only, and deprecated.", - "Field type message.", - "Field type bytes.", - "Field type uint32.", - "Field type enum.", - "Field type sfixed32.", - "Field type sfixed64.", - "Field type sint32.", - "Field type sint64." - ], + "selector": { + "description": "Selects messages to which this rule applies.\n\nRefer to selector for syntax details.", "type": "string" } }, - "id": "Field" + "id": "CustomErrorRule" }, - "Binding": { - "description": "Associates `members` with a `role`.", + "MediaDownload": { + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", "type": "object", "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "enabled": { + "description": "Whether download is enabled.", + "type": "boolean" + } + }, + "id": "MediaDownload" + }, + "ChangeReport": { + "description": "Change report associated with a particular service configuration.\n\nIt contains a list of ConfigChanges based on the comparison between\ntwo service configurations.", + "type": "object", + "properties": { + "configChanges": { + "description": "List of changes between two service configurations.\nThe changes will be alphabetically sorted based on the identifier\nof each change.\nA ConfigChange identifier is a dot separated path to the configuration.\nExample: visibility.rules[selector='LibraryService.CreateBook'].restriction", "type": "array", "items": { - "type": "string" + "$ref": "ConfigChange" } - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" } }, - "id": "Binding" + "id": "ChangeReport" }, - "ConfigRef": { - "description": "Represents a service configuration with its name and id.", + "DisableServiceRequest": { + "description": "Request message for DisableService method.", "type": "object", "properties": { - "name": { - "description": "Resource name of a service config. It must have the following\nformat: \"services/{service name}/configs/{config id}\".", + "consumerId": { + "description": "The identity of consumer resource which service disablement will be\napplied to.\n\nThe Google Service Management implementation accepts the following\nforms:\n- \"project:\u003cproject_id\u003e\"\n\nNote: this is made compatible with\ngoogle.api.servicecontrol.v1.Operation.consumer_id.", "type": "string" } }, - "id": "ConfigRef" - }, - "DataAccessOptions": { - "description": "Write a Data Access (Gin) log", - "type": "object", - "properties": {}, - "id": "DataAccessOptions" + "id": "DisableServiceRequest" }, - "AuthProvider": { - "description": "Configuration for an anthentication provider, including support for\n[JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "SubmitConfigSourceResponse": { + "description": "Response message for SubmitConfigSource method.", "type": "object", "properties": { - "audiences": { - "description": "The list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", - "type": "string" - }, - "jwksUri": { - "description": "URL of the provider's public key set to validate signature of the JWT. See\n[OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).\nOptional if the key set document:\n - can be retrieved from\n [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html\n of the issuer.\n - can be inferred from the email domain of the issuer (e.g. a Google service account).\n\nExample: https://www.googleapis.com/oauth2/v1/certs", - "type": "string" - }, - "id": { - "description": "The unique identifier of the auth provider. It will be referred to by\n`AuthRequirement.provider_id`.\n\nExample: \"bookstore_auth\".", - "type": "string" - }, - "issuer": { - "description": "Identifies the principal that issued the JWT. See\nhttps://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1\nUsually a URL or an email address.\n\nExample: https://securetoken.google.com\nExample: 1234567-compute@developer.gserviceaccount.com", - "type": "string" + "serviceConfig": { + "description": "The generated service configuration.", + "$ref": "Service" } }, - "id": "AuthProvider" + "id": "SubmitConfigSourceResponse" }, - "VisibilityRule": { - "description": "A visibility rule provides visibility configuration for an individual API\nelement.", + "MediaUpload": { + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", "type": "object", "properties": { - "restriction": { - "description": "A comma-separated list of visibility labels that apply to the `selector`.\nAny of the listed labels can be used to grant the visibility.\n\nIf a rule has multiple labels, removing one of the labels but not all of\nthem can break clients.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: GOOGLE_INTERNAL, TRUSTED_TESTER\n\nRemoving GOOGLE_INTERNAL from this restriction will break clients that\nrely on this method and only had access to it through GOOGLE_INTERNAL.", - "type": "string" - }, - "selector": { - "description": "Selects methods, messages, fields, enums, etc. to which this rule applies.\n\nRefer to selector for syntax details.", - "type": "string" + "enabled": { + "description": "Whether upload is enabled.", + "type": "boolean" } }, - "id": "VisibilityRule" + "id": "MediaUpload" }, - "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", + "Advice": { + "description": "Generated advice about this change, used for providing more\ninformation about how a change will affect the existing service.", "type": "object", "properties": { - "logType": { - "description": "The log type that this config enables.", - "enum": [ - "LOG_TYPE_UNSPECIFIED", - "ADMIN_READ", - "DATA_WRITE", - "DATA_READ" - ], - "enumDescriptions": [ - "Default case. Should never be this.", - "Admin reads. Example: CloudIAM getIamPolicy", - "Data writes. Example: CloudSQL Users create", - "Data reads. Example: CloudSQL Users list" - ], + "description": { + "description": "Useful description for why this advice was applied and what actions should\nbe taken to mitigate any implied risks.", "type": "string" - }, - "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", - "type": "array", - "items": { - "type": "string" - } } }, - "id": "AuditLogConfig" + "id": "Advice" }, - "UndeleteServiceResponse": { - "description": "Response message for UndeleteService method.", + "ManagedService": { + "description": "The full representation of a Service that is managed by\nGoogle Service Management.", "type": "object", "properties": { - "service": { - "description": "Revived service resource.", - "$ref": "ManagedService" + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements.", + "type": "string" + }, + "producerProjectId": { + "description": "ID of the project that produces and owns this service.", + "type": "string" } }, - "id": "UndeleteServiceResponse" + "id": "ManagedService" }, "UsageRule": { "description": "Usage configuration rules for the service.\n\nNOTE: Under development.\n\n\nUse this rule to configure unregistered calls for the service. Unregistered\ncalls are calls that do not contain consumer project identity.\n(Example: calls that do not contain an API key).\nBy default, API methods do not allow unregistered calls, and each method call\nmust be identified by a consumer project identity. Use this rule to\nallow/disallow unregistered calls.\n\nExample of an API that wants to allow unregistered calls for entire service.\n\n usage:\n rules:\n - selector: \"*\"\n allow_unregistered_calls: true\n\nExample of a method that wants to allow unregistered calls.\n\n usage:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allow_unregistered_calls: true", "type": "object", "properties": { - "allowUnregisteredCalls": { - "description": "True, if the method allows unregistered calls; false otherwise.", - "type": "boolean" - }, "selector": { "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", "type": "string" + }, + "allowUnregisteredCalls": { + "description": "True, if the method allows unregistered calls; false otherwise.", + "type": "boolean" } }, "id": "UsageRule" }, - "EnumValue": { - "description": "Enum value definition.", + "TrafficPercentStrategy": { + "description": "Strategy that specifies how Google Service Control should select\ndifferent\nversions of service configurations based on traffic percentage.\n\nOne example of how to gradually rollout a new service configuration using\nthis\nstrategy:\nDay 1\n\n Rollout {\n id: \"example.googleapis.com/rollout_20160206\"\n traffic_percent_strategy {\n percentages: {\n \"example.googleapis.com/20160201\": 70.00\n \"example.googleapis.com/20160206\": 30.00\n }\n }\n }\n\nDay 2\n\n Rollout {\n id: \"example.googleapis.com/rollout_20160207\"\n traffic_percent_strategy: {\n percentages: {\n \"example.googleapis.com/20160206\": 100.00\n }\n }\n }", "type": "object", "properties": { - "options": { - "description": "Protocol buffer options.", - "type": "array", - "items": { - "$ref": "Option" + "percentages": { + "description": "Maps service configuration IDs to their corresponding traffic percentage.\nKey is the service configuration ID, Value is the traffic percentage\nwhich must be greater than 0.0 and the sum must equal to 100.0.", + "type": "object", + "additionalProperties": { + "format": "double", + "type": "number" } - }, - "name": { - "description": "Enum value name.", - "type": "string" - }, - "number": { - "description": "Enum value number.", - "type": "integer", - "format": "int32" } }, - "id": "EnumValue" - }, - "MediaUpload": { - "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", - "type": "object", - "properties": { - "enabled": { - "description": "Whether upload is enabled.", - "type": "boolean" - } - }, - "id": "MediaUpload" + "id": "TrafficPercentStrategy" }, - "BackendRule": { - "description": "A backend rule provides configuration for an individual API element.", + "AuthRequirement": { + "description": "User-defined authentication requirements, including support for\n[JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "type": "object", "properties": { - "address": { - "description": "The address of the API backend.", + "providerId": { + "description": "id from authentication provider.\n\nExample:\n\n provider_id: bookstore_auth", "type": "string" }, - "deadline": { - "description": "The number of seconds to wait for a response from a request. The\ndefault depends on the deployment context.", - "type": "number", - "format": "double" - }, - "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "audiences": { + "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is\nimplemented and accepted in all the runtime components.\n\nThe list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", "type": "string" } }, - "id": "BackendRule" + "id": "AuthRequirement" }, - "ContextRule": { - "description": "A context rule provides information about the context for an individual API\nelement.", + "Condition": { + "description": "A condition to be met.", "type": "object", "properties": { - "provided": { - "description": "A list of full type names of provided contexts.", - "type": "array", - "items": { - "type": "string" - } - }, - "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", - "type": "string" - }, - "requested": { - "description": "A list of full type names of requested contexts.", - "type": "array", + "iam": { + "enumDescriptions": [ + "Default non-attribute.", + "Either principal or (if present) authority selector.", + "The principal (even if an authority selector is present), which\nmust only be used for attribution, not authorization.", + "An approver (distinct from the requester) that has authorized this\nrequest.\nWhen used with IN, the condition indicates that one of the approvers\nassociated with the request matches the specified principal, or is a\nmember of the specified group. Approvers can only grant additional\naccess, and are thus only used in a strictly positive context\n(e.g. ALLOW/IN or DENY/NOT_IN).\nSee: go/rpc-security-policy-dynamicauth." + ], + "enum": [ + "NO_ATTR", + "AUTHORITY", + "ATTRIBUTION", + "APPROVER" + ], + "description": "Trusted attributes supplied by the IAM system.", + "type": "string" + }, + "values": { + "description": "The objects of the condition. This is mutually exclusive with 'value'.", + "type": "array", "items": { "type": "string" } + }, + "op": { + "enumDescriptions": [ + "Default no-op.", + "DEPRECATED. Use IN instead.", + "DEPRECATED. Use NOT_IN instead.", + "Set-inclusion check.", + "Set-exclusion check.", + "Subject is discharged" + ], + "enum": [ + "NO_OP", + "EQUALS", + "NOT_EQUALS", + "IN", + "NOT_IN", + "DISCHARGED" + ], + "description": "An operator to apply the subject with.", + "type": "string" + }, + "svc": { + "description": "Trusted attributes discharged by the service.", + "type": "string" + }, + "sys": { + "description": "Trusted attributes supplied by any service that owns resources and uses\nthe IAM system for access control.", + "type": "string", + "enumDescriptions": [ + "Default non-attribute type", + "Region of the resource", + "Service name", + "Resource name", + "IP address of the caller" + ], + "enum": [ + "NO_ATTR", + "REGION", + "SERVICE", + "NAME", + "IP" + ] + }, + "value": { + "description": "DEPRECATED. Use 'values' instead.", + "type": "string" } }, - "id": "ContextRule" + "id": "Condition" }, - "Http": { - "description": "Defines the HTTP configuration for a service. It contains a list of\nHttpRule, each specifying the mapping of an RPC method\nto one or more HTTP REST API methods.", + "Documentation": { + "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: (== include google/foo/overview.md ==)\n - name: Tutorial\n content: (== include google/foo/tutorial.md ==)\n subpages;\n - name: Java\n content: (== include google/foo/tutorial_java.md ==)\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e[fully.qualified.proto.name][]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e[display text][fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e(-- internal comment --)\u003c/code\u003e\u003c/pre\u003e\nComments can be made conditional using a visibility label. The below\ntext will be only rendered if the `BETA` label is available:\n\u003cpre\u003e\u003ccode\u003e(--BETA: comment for BETA users --)\u003c/code\u003e\u003c/pre\u003e\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e(== include path/to/file ==)\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e(== resource_for v1.shelves.books ==)\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", "type": "object", "properties": { + "summary": { + "description": "A short summary of what the service does. Can only be provided by\nplain text.", + "type": "string" + }, + "documentationRootUrl": { + "description": "The URL to the root of documentation.", + "type": "string" + }, "rules": { - "description": "A list of HTTP configuration rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of documentation rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", "type": "array", "items": { - "$ref": "HttpRule" + "$ref": "DocumentationRule" + } + }, + "overview": { + "description": "Declares a single overview page. For example:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n overview: (== include overview.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nThis is a shortcut for the following declaration (using pages style):\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n pages:\n - name: Overview\n content: (== include overview.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nNote: you cannot specify both `overview` field and `pages` field.", + "type": "string" + }, + "pages": { + "description": "The top level pages for the documentation set.", + "type": "array", + "items": { + "$ref": "Page" } } }, - "id": "Http" + "id": "Documentation" }, - "Visibility": { - "description": "`Visibility` defines restrictions for the visibility of service\nelements. Restrictions are specified using visibility labels\n(e.g., TRUSTED_TESTER) that are elsewhere linked to users and projects.\n\nUsers and projects can have access to more than one visibility label. The\neffective visibility for multiple labels is the union of each label's\nelements, plus any unrestricted elements.\n\nIf an element and its parents have no restrictions, visibility is\nunconditionally granted.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: TRUSTED_TESTER\n - selector: google.calendar.Calendar.Delegate\n restriction: GOOGLE_INTERNAL\n\nHere, all methods are publicly visible except for the restricted methods\nEnhancedSearch and Delegate.", + "AuditLogConfig": { + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", "type": "object", "properties": { - "rules": { - "description": "A list of visibility rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "exemptedMembers": { + "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", "type": "array", "items": { - "$ref": "VisibilityRule" + "type": "string" } + }, + "logType": { + "description": "The log type that this config enables.", + "type": "string", + "enumDescriptions": [ + "Default case. Should never be this.", + "Admin reads. Example: CloudIAM getIamPolicy", + "Data writes. Example: CloudSQL Users create", + "Data reads. Example: CloudSQL Users list" + ], + "enum": [ + "LOG_TYPE_UNSPECIFIED", + "ADMIN_READ", + "DATA_WRITE", + "DATA_READ" + ] } }, - "id": "Visibility" + "id": "AuditLogConfig" }, - "ConfigChange": { - "description": "Output generated from semantically comparing two versions of a service\nconfiguration.\n\nIncludes detailed information about a field that have changed with\napplicable advice about potential consequences for the change, such as\nbackwards-incompatibility.", + "ConfigSource": { + "description": "Represents a source file which is used to generate the service configuration\ndefined by `google.api.Service`.", "type": "object", "properties": { - "newValue": { - "description": "Value of the changed object in the new Service configuration,\nin JSON format. This field will not be populated if ChangeType == REMOVED.", - "type": "string" - }, - "oldValue": { - "description": "Value of the changed object in the old Service configuration,\nin JSON format. This field will not be populated if ChangeType == ADDED.", + "id": { + "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. If empty, the server may choose to\ngenerate one instead.", "type": "string" }, - "element": { - "description": "Object hierarchy path to the change, with levels separated by a '.'\ncharacter. For repeated fields, an applicable unique identifier field is\nused for the index (usually selector, name, or id). For maps, the term\n'key' is used. If the field has no unique identifier, the numeric index\nis used.\nExamples:\n- visibility.rules[selector==\"google.LibraryService.CreateBook\"].restriction\n- quota.metric_rules[selector==\"google\"].metric_costs[key==\"reads\"].value\n- logging.producer_destinations[0]", + "files": { + "description": "Set of source configuration files that are used to generate a service\nconfiguration (`google.api.Service`).", + "type": "array", + "items": { + "$ref": "ConfigFile" + } + } + }, + "id": "ConfigSource" + }, + "BackendRule": { + "description": "A backend rule provides configuration for an individual API element.", + "type": "object", + "properties": { + "address": { + "description": "The address of the API backend.", "type": "string" }, - "changeType": { - "description": "The type for this change, either ADDED, REMOVED, or MODIFIED.", - "enum": [ - "CHANGE_TYPE_UNSPECIFIED", - "ADDED", - "REMOVED", - "MODIFIED" - ], - "enumDescriptions": [ - "No value was provided.", - "The changed object exists in the 'new' service configuration, but not\nin the 'old' service configuration.", - "The changed object exists in the 'old' service configuration, but not\nin the 'new' service configuration.", - "The changed object exists in both service configurations, but its value\nis different." - ], + "selector": { + "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", "type": "string" }, - "advices": { - "description": "Collection of advice provided for this change, useful for determining the\npossible impact of this change.", - "type": "array", - "items": { - "$ref": "Advice" - } + "deadline": { + "description": "The number of seconds to wait for a response from a request. The\ndefault depends on the deployment context.", + "format": "double", + "type": "number" } }, - "id": "ConfigChange" + "id": "BackendRule" }, - "SystemParameters": { - "description": "### System parameter configuration\n\nA system parameter is a special kind of parameter defined by the API\nsystem, not by an individual API. It is typically mapped to an HTTP header\nand/or a URL query parameter. This configuration specifies which methods\nchange the names of the system parameters.", + "AuthenticationRule": { + "description": "Authentication rules for the service.\n\nBy default, if a method has any authentication requirements, every request\nmust include a valid credential matching one of the requirements.\nIt's an error to include more than one kind of credential in a single\nrequest.\n\nIf a method doesn't have any auth requirements, request credentials will be\nignored.", "type": "object", "properties": { - "rules": { - "description": "Define system parameters.\n\nThe parameters defined here will override the default parameters\nimplemented by the system. If this field is missing from the service\nconfig, default system parameters will be used. Default system parameters\nand names is implementation-dependent.\n\nExample: define api key for all methods\n\n system_parameters\n rules:\n - selector: \"*\"\n parameters:\n - name: api_key\n url_query_parameter: api_key\n\n\nExample: define 2 api key names for a specific method.\n\n system_parameters\n rules:\n - selector: \"/ListShelves\"\n parameters:\n - name: api_key\n http_header: Api-Key1\n - name: api_key\n http_header: Api-Key2\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "oauth": { + "$ref": "OAuthRequirements", + "description": "The requirements for OAuth credentials." + }, + "requirements": { + "description": "Requirements for additional authentication providers.", "type": "array", "items": { - "$ref": "SystemParameterRule" + "$ref": "AuthRequirement" + } + }, + "selector": { + "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + }, + "allowWithoutCredential": { + "description": "Whether to allow requests without a credential. The credential can be\nan OAuth token, Google cookies (first-party auth) or EndUserCreds.\n\nFor requests without credentials, if the service control environment is\nspecified, each incoming request **must** be associated with a service\nconsumer. This can be done by passing an API key that belongs to a consumer\nproject.", + "type": "boolean" + } + }, + "id": "AuthenticationRule" + } + }, + "protocol": "rest", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "version": "v1", + "baseUrl": "https://servicemanagement.googleapis.com/", + "canonicalName": "Service Management", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/service.management.readonly": { + "description": "View your Google API service configuration" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/service.management": { + "description": "Manage your Google API service configuration" + } + } + } + }, + "kind": "discovery#restDescription", + "description": "Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.", + "servicePath": "", + "rootUrl": "https://servicemanagement.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "servicemanagement", + "batchPath": "batch", + "revision": "20170211", + "documentationLink": "https://cloud.google.com/service-management/", + "id": "servicemanagement:v1", + "title": "Google Service Management API", + "discoveryVersion": "v1", + "ownerName": "Google", + "resources": { + "services": { + "methods": { + "list": { + "parameterOrder": [], + "response": { + "$ref": "ListServicesResponse" + }, + "httpMethod": "GET", + "parameters": { + "consumerId": { + "location": "query", + "description": "Include services consumed by the specified consumer.\n\nThe Google Service Management implementation accepts the following\nforms:\n- project:\u003cproject_id\u003e", + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Requested size of the next page of data.", + "format": "int32", + "type": "integer" + }, + "producerProjectId": { + "description": "Include services produced by the specified project.", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/service.management", + "https://www.googleapis.com/auth/service.management.readonly" + ], + "flatPath": "v1/services", + "path": "v1/services", + "id": "servicemanagement.services.list", + "description": "Lists managed services.\n\nIf called without any authentication, it returns only the public services.\nIf called with authentication, it returns all services that the caller has\n\"servicemanagement.services.get\" permission for.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\"." + }, + "create": { + "request": { + "$ref": "ManagedService" + }, + "description": "Creates a new managed service.\nPlease note one producer project can own no more than 20 services.\n\nOperation\u003cresponse: ManagedService\u003e", + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "Operation" + }, + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "flatPath": "v1/services", + "id": "servicemanagement.services.create", + "path": "v1/services" + }, + "generateConfigReport": { + "description": "Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration.", + "request": { + "$ref": "GenerateConfigReportRequest" + }, + "response": { + "$ref": "GenerateConfigReportResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "parameters": {}, + "flatPath": "v1/services:generateConfigReport", + "path": "v1/services:generateConfigReport", + "id": "servicemanagement.services.generateConfigReport" + }, + "get": { + "description": "Gets a managed service. Authentication is required unless the service is\npublic.", + "response": { + "$ref": "ManagedService" + }, + "parameterOrder": [ + "serviceName" + ], + "httpMethod": "GET", + "parameters": { + "serviceName": { + "location": "path", + "description": "The name of the service. See the `ServiceManager` overview for naming\nrequirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/service.management", + "https://www.googleapis.com/auth/service.management.readonly" + ], + "flatPath": "v1/services/{serviceName}", + "path": "v1/services/{serviceName}", + "id": "servicemanagement.services.get" + }, + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^services/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "flatPath": "v1/services/{servicesId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "servicemanagement.services.testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning." + }, + "getConfig": { + "response": { + "$ref": "Service" + }, + "parameterOrder": [ + "serviceName" + ], + "httpMethod": "GET", + "parameters": { + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + }, + "configId": { + "location": "query", + "description": "The id of the service configuration resource.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/service.management", + "https://www.googleapis.com/auth/service.management.readonly" + ], + "flatPath": "v1/services/{serviceName}/config", + "path": "v1/services/{serviceName}/config", + "id": "servicemanagement.services.getConfig", + "description": "Gets a service configuration (version) for a managed service." + }, + "enable": { + "description": "Enable a managed service for a project with default setting.\n\nOperation\u003cresponse: EnableServiceResponse\u003e\n\ngoogle.rpc.Status errors may contain a\ngoogle.rpc.PreconditionFailure error detail.", + "request": { + "$ref": "EnableServiceRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "serviceName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "parameters": { + "serviceName": { + "description": "Name of the service to enable. Specifying an unknown service name will\ncause the request to fail.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/services/{serviceName}:enable", + "id": "servicemanagement.services.enable", + "path": "v1/services/{serviceName}:enable" + }, + "delete": { + "flatPath": "v1/services/{serviceName}", + "path": "v1/services/{serviceName}", + "id": "servicemanagement.services.delete", + "description": "Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "serviceName" + ], + "httpMethod": "DELETE", + "parameters": { + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, + "setIamPolicy": { + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^services/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "flatPath": "v1/services/{servicesId}:setIamPolicy", + "id": "servicemanagement.services.setIamPolicy", + "path": "v1/{+resource}:setIamPolicy" + }, + "disable": { + "flatPath": "v1/services/{serviceName}:disable", + "id": "servicemanagement.services.disable", + "path": "v1/services/{serviceName}:disable", + "description": "Disable a managed service for a project.\n\nOperation\u003cresponse: DisableServiceResponse\u003e", + "request": { + "$ref": "DisableServiceRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "serviceName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "parameters": { + "serviceName": { + "description": "Name of the service to disable. Specifying an unknown service name\nwill cause the request to fail.", + "required": true, + "type": "string", + "location": "path" + } + } + }, + "getIamPolicy": { + "request": { + "$ref": "GetIamPolicyRequest" + }, + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^services/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "flatPath": "v1/services/{servicesId}:getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "id": "servicemanagement.services.getIamPolicy" + }, + "undelete": { + "httpMethod": "POST", + "parameterOrder": [ + "serviceName" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "flatPath": "v1/services/{serviceName}:undelete", + "id": "servicemanagement.services.undelete", + "path": "v1/services/{serviceName}:undelete", + "description": "Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation\u003cresponse: UndeleteServiceResponse\u003e" + } + }, + "resources": { + "configs": { + "methods": { + "submit": { + "httpMethod": "POST", + "parameterOrder": [ + "serviceName" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "flatPath": "v1/services/{serviceName}/configs:submit", + "id": "servicemanagement.services.configs.submit", + "path": "v1/services/{serviceName}/configs:submit", + "request": { + "$ref": "SubmitConfigSourceRequest" + }, + "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e" + }, + "list": { + "flatPath": "v1/services/{serviceName}/configs", + "path": "v1/services/{serviceName}/configs", + "id": "servicemanagement.services.configs.list", + "description": "Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest.", + "response": { + "$ref": "ListServiceConfigsResponse" + }, + "parameterOrder": [ + "serviceName" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/service.management", + "https://www.googleapis.com/auth/service.management.readonly" + ], + "parameters": { + "pageSize": { + "location": "query", + "description": "The max number of items to include in the response list.", + "format": "int32", + "type": "integer" + }, + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + }, + "pageToken": { + "description": "The token of the page to retrieve.", + "type": "string", + "location": "query" + } + } + }, + "get": { + "description": "Gets a service configuration (version) for a managed service.", + "parameterOrder": [ + "serviceName", + "configId" + ], + "response": { + "$ref": "Service" + }, + "httpMethod": "GET", + "parameters": { + "serviceName": { + "location": "path", + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string" + }, + "configId": { + "description": "The id of the service configuration resource.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/service.management", + "https://www.googleapis.com/auth/service.management.readonly" + ], + "flatPath": "v1/services/{serviceName}/configs/{configId}", + "path": "v1/services/{serviceName}/configs/{configId}", + "id": "servicemanagement.services.configs.get" + }, + "create": { + "flatPath": "v1/services/{serviceName}/configs", + "path": "v1/services/{serviceName}/configs", + "id": "servicemanagement.services.configs.create", + "request": { + "$ref": "Service" + }, + "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.", + "response": { + "$ref": "Service" + }, + "parameterOrder": [ + "serviceName" + ], + "httpMethod": "POST", + "parameters": { + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + } + } + }, + "rollouts": { + "methods": { + "list": { + "description": "Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest.", + "httpMethod": "GET", + "parameterOrder": [ + "serviceName" + ], + "response": { + "$ref": "ListServiceRolloutsResponse" + }, + "parameters": { + "pageSize": { + "description": "The max number of items to include in the response list.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "The token of the page to retrieve.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/service.management", + "https://www.googleapis.com/auth/service.management.readonly" + ], + "flatPath": "v1/services/{serviceName}/rollouts", + "id": "servicemanagement.services.rollouts.list", + "path": "v1/services/{serviceName}/rollouts" + }, + "get": { + "description": "Gets a service configuration rollout.", + "response": { + "$ref": "Rollout" + }, + "parameterOrder": [ + "serviceName", + "rolloutId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/service.management", + "https://www.googleapis.com/auth/service.management.readonly" + ], + "parameters": { + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + }, + "rolloutId": { + "description": "The id of the rollout resource.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/services/{serviceName}/rollouts/{rolloutId}", + "path": "v1/services/{serviceName}/rollouts/{rolloutId}", + "id": "servicemanagement.services.rollouts.get" + }, + "create": { + "request": { + "$ref": "Rollout" + }, + "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOperation\u003cresponse: Rollout\u003e", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "serviceName" + ], + "httpMethod": "POST", + "parameters": { + "serviceName": { + "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "flatPath": "v1/services/{serviceName}/rollouts", + "path": "v1/services/{serviceName}/rollouts", + "id": "servicemanagement.services.rollouts.create" + } } } - }, - "id": "SystemParameters" + } }, - "LabelDescriptor": { - "description": "A description of a label.", - "type": "object", - "properties": { - "description": { - "description": "A human-readable description for the label.", - "type": "string" - }, - "valueType": { - "description": "The type of data that can be assigned to the label.", - "enum": [ - "STRING", - "BOOL", - "INT64" + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "name" ], - "enumDescriptions": [ - "A variable-length string. This is the default.", - "Boolean; true or false.", - "A 64-bit signed integer." + "httpMethod": "GET", + "parameters": { + "name": { + "description": "The name of the operation resource.", + "required": true, + "type": "string", + "pattern": "^operations/.+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" ], - "type": "string" - }, - "key": { - "description": "The label key.", - "type": "string" - } - }, - "id": "LabelDescriptor" - }, - "Usage": { - "description": "Configuration controlling usage of a service.", - "type": "object", - "properties": { - "producerNotificationChannel": { - "description": "The full resource name of a channel used for sending notifications to the\nservice producer.\n\nGoogle Service Management currently only supports\n[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification\nchannel. To use Google Cloud Pub/Sub as the channel, this must be the name\nof a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format\ndocumented in https://cloud.google.com/pubsub/docs/overview.", - "type": "string" - }, - "requirements": { - "description": "Requirements that must be satisfied before a consumer project can use the\nservice. Each requirement is of the form \u003cservice.name\u003e/\u003crequirement-id\u003e;\nfor example 'serviceusage.googleapis.com/billing-enabled'.", - "type": "array", - "items": { - "type": "string" - } - }, - "rules": { - "description": "A list of usage rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", - "type": "array", - "items": { - "$ref": "UsageRule" - } - } - }, - "id": "Usage" - }, - "Advice": { - "description": "Generated advice about this change, used for providing more\ninformation about how a change will affect the existing service.", - "type": "object", - "properties": { - "description": { - "description": "Useful description for why this advice was applied and what actions should\nbe taken to mitigate any implied risks.", - "type": "string" - } - }, - "id": "Advice" - }, - "CloudAuditOptions": { - "description": "Write a Cloud Audit log", - "type": "object", - "properties": {}, - "id": "CloudAuditOptions" - }, - "AuthRequirement": { - "description": "User-defined authentication requirements, including support for\n[JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", - "type": "object", - "properties": { - "audiences": { - "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is\nimplemented and accepted in all the runtime components.\n\nThe list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", - "type": "string" - }, - "providerId": { - "description": "id from authentication provider.\n\nExample:\n\n provider_id: bookstore_auth", - "type": "string" - } - }, - "id": "AuthRequirement" - }, - "Control": { - "description": "Selects and configures the service controller used by the service. The\nservice controller handles features like abuse, quota, billing, logging,\nmonitoring, etc.", - "type": "object", - "properties": { - "environment": { - "description": "The service control environment to use. If empty, no control plane\nfeature (like quota and billing) will be enabled.", - "type": "string" - } - }, - "id": "Control" - }, - "SourceContext": { - "description": "`SourceContext` represents information about the source of a\nprotobuf element, like the file in which it is defined.", - "type": "object", - "properties": { - "fileName": { - "description": "The path-qualified name of the .proto file that contained the associated\nprotobuf element. For example: `\"google/protobuf/source_context.proto\"`.", - "type": "string" + "flatPath": "v1/operations/{operationsId}", + "path": "v1/{+name}", + "id": "servicemanagement.operations.get" } - }, - "id": "SourceContext" + } } }, - "revision": "20170117", - "basePath": "", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "canonicalName": "Service Management", - "discoveryVersion": "v1", - "baseUrl": "https://servicemanagement.googleapis.com/", - "name": "servicemanagement", "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", + "$.xgafv": { + "description": "V1 error format.", "type": "string", - "location": "query" + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] }, "alt": { - "description": "Data format for response.", - "location": "query", "enum": [ "json", "media", "proto" ], - "default": "json", + "type": "string", "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "type": "string" + "location": "query", + "description": "Data format for response.", + "default": "json" }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" }, "bearer_token": { "description": "OAuth bearer token.", "type": "string", "location": "query" }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, "upload_protocol": { + "location": "query", "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", "type": "string", "location": "query" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" } - }, - "documentationLink": "https://cloud.google.com/service-management/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1", - "rootUrl": "https://servicemanagement.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go index 43a248ec7..3c61fecb4 100644 --- a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go +++ b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*APIService, error) { } type APIService struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Operations *OperationsService @@ -87,6 +88,10 @@ func (s *APIService) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOperationsService(s *APIService) *OperationsService { rs := &OperationsService{s: s} return rs @@ -611,6 +616,46 @@ func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AuthorizationConfig: Configuration of authorization. +// +// This section determines the authorization provider, if unspecified, +// then no +// authorization check will be done. +// +// Example: +// +// experimental: +// authorization: +// provider: firebaserules.googleapis.com +type AuthorizationConfig struct { + // Provider: The name of the authorization provider, such + // as + // firebaserules.googleapis.com. + Provider string `json:"provider,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Provider") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Provider") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AuthorizationConfig) MarshalJSON() ([]byte, error) { + type noMethod AuthorizationConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Backend: `Backend` defines the backend configuration for a service. type Backend struct { // Rules: A list of API backend rules that apply to individual API @@ -815,6 +860,18 @@ type Condition struct { // "ATTRIBUTION" - The principal (even if an authority selector is // present), which // must only be used for attribution, not authorization. + // "APPROVER" - An approver (distinct from the requester) that has + // authorized this + // request. + // When used with IN, the condition indicates that one of the + // approvers + // associated with the request matches the specified principal, or is + // a + // member of the specified group. Approvers can only grant + // additional + // access, and are thus only used in a strictly positive context + // (e.g. ALLOW/IN or DENY/NOT_IN). + // See: go/rpc-security-policy-dynamicauth. Iam string `json:"iam,omitempty"` // Op: An operator to apply the subject with. @@ -1805,6 +1862,36 @@ func (s *EnumValue) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Experimental: Experimental service configuration. These configuration +// options can +// only be used by whitelisted users. +type Experimental struct { + // Authorization: Authorization configuration. + Authorization *AuthorizationConfig `json:"authorization,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Authorization") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Authorization") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Experimental) MarshalJSON() ([]byte, error) { + type noMethod Experimental + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Field: A single field of a message type. type Field struct { // Cardinality: The field cardinality. @@ -4041,6 +4128,9 @@ type Service struct { // - name: google.someapi.v1.SomeEnum Enums []*Enum `json:"enums,omitempty"` + // Experimental: Experimental configuration. + Experimental *Experimental `json:"experimental,omitempty"` + // Http: HTTP configuration. Http *Http `json:"http,omitempty"` @@ -4344,11 +4434,12 @@ type Step struct { // // Possible values: // "STATUS_UNSPECIFIED" - Unspecifed code. - // "DONE" - The step has completed without errors. - // "NOT_STARTED" - The step has not started yet. - // "IN_PROGRESS" - The step is in progress. - // "FAILED" - The step has completed with errors. - // "CANCELLED" - The step has completed with cancellation. + // "DONE" - The operation or step has completed without errors. + // "NOT_STARTED" - The operation or step has not started yet. + // "IN_PROGRESS" - The operation or step is in progress. + // "FAILED" - The operation or step has completed with errors. + // "CANCELLED" - The operation or step has completed with + // cancellation. Status string `json:"status,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -5101,6 +5192,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5234,6 +5326,7 @@ func (c *ServicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedservice) if err != nil { @@ -5363,6 +5456,7 @@ func (c *ServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}") @@ -5493,6 +5587,7 @@ func (c *ServicesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.disableservicerequest) if err != nil { @@ -5634,6 +5729,7 @@ func (c *ServicesEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.enableservicerequest) if err != nil { @@ -5784,6 +5880,7 @@ func (c *ServicesGenerateConfigReportCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateconfigreportrequest) if err != nil { @@ -5918,6 +6015,7 @@ func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6067,6 +6165,7 @@ func (c *ServicesGetConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6207,6 +6306,7 @@ func (c *ServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { @@ -6396,6 +6496,7 @@ func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6561,6 +6662,7 @@ func (c *ServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { @@ -6663,6 +6765,12 @@ type ServicesTestIamPermissionsCall struct { // If the resource does not exist, this will return an empty set // of // permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware +// UIs and command-line tools, not for authorization checking. This +// operation +// may "fail open" without warning. func (r *ServicesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ServicesTestIamPermissionsCall { c := &ServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6701,6 +6809,7 @@ func (c *ServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { @@ -6756,7 +6865,7 @@ func (c *ServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", // "flatPath": "v1/services/{servicesId}:testIamPermissions", // "httpMethod": "POST", // "id": "servicemanagement.services.testIamPermissions", @@ -6843,6 +6952,7 @@ func (c *ServicesUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:undelete") @@ -6975,6 +7085,7 @@ func (c *ServicesConfigsCreateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.service) if err != nil { @@ -7121,6 +7232,7 @@ func (c *ServicesConfigsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7286,6 +7398,7 @@ func (c *ServicesConfigsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7462,6 +7575,7 @@ func (c *ServicesConfigsSubmitCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.submitconfigsourcerequest) if err != nil { @@ -7611,6 +7725,7 @@ func (c *ServicesRolloutsCreateCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollout) if err != nil { @@ -7757,6 +7872,7 @@ func (c *ServicesRolloutsGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7922,6 +8038,7 @@ func (c *ServicesRolloutsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/serviceuser/v1/serviceuser-api.json b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-api.json new file mode 100644 index 000000000..4562d8e7b --- /dev/null +++ b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-api.json @@ -0,0 +1,1647 @@ +{ + "title": "Google Service User API", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "services": { + "methods": { + "enable": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "parameters": { + "name": { + "description": "Name of the consumer and the service to enable for that consumer.\n\nA valid path would be:\n- /v1/projects/my-project/services/servicemanagement.googleapis.com:enable", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/services/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/services/{servicesId}:enable", + "path": "v1/{+name}:enable", + "id": "serviceuser.projects.services.enable", + "description": "Enable a managed service for a consumer with the default settings.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e\n\ngoogle.rpc.Status errors may contain a\ngoogle.rpc.PreconditionFailure error detail.", + "request": { + "$ref": "EnableServiceRequest" + } + }, + "list": { + "description": "List enabled services for the specified consumer.", + "response": { + "$ref": "ListEnabledServicesResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ], + "parameters": { + "parent": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "List enabled services for the specified parent.\n\nAn example valid parent would be:\n- projects/my-project" + }, + "pageToken": { + "location": "query", + "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Requested size of the next page of data.", + "format": "int32", + "type": "integer" + } + }, + "flatPath": "v1/projects/{projectsId}/services", + "path": "v1/{+parent}/services", + "id": "serviceuser.projects.services.list" + }, + "disable": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ], + "parameters": { + "name": { + "location": "path", + "description": "Name of the consumer and the service to disable for that consumer.\n\nThe Service User implementation accepts the following forms for consumer:\n- \"project:\u003cproject_id\u003e\"\n\nA valid path would be:\n- /v1/projects/my-project/services/servicemanagement.googleapis.com:disable", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/services/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/services/{servicesId}:disable", + "path": "v1/{+name}:disable", + "id": "serviceuser.projects.services.disable", + "description": "Disable a managed service for a consumer.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + "request": { + "$ref": "DisableServiceRequest" + } + } + } + } + } + } + }, + "parameters": { + "access_token": { + "type": "string", + "location": "query", + "description": "OAuth access token." + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "type": "string", + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\")." + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ] + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + } + }, + "version": "v1", + "baseUrl": "https://serviceuser.googleapis.com/", + "servicePath": "", + "description": "The Service User API allows service consumers to enable services they want to use on Google Cloud Platform or disable services they no longer use. Consumers can also list the set of services they have already enabled.", + "kind": "discovery#restDescription", + "basePath": "", + "revision": "20170211", + "documentationLink": "https://cloud.google.com/service-user/", + "id": "serviceuser:v1", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "Visibility": { + "properties": { + "rules": { + "description": "A list of visibility rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "VisibilityRule" + } + } + }, + "id": "Visibility", + "description": "`Visibility` defines restrictions for the visibility of service\nelements. Restrictions are specified using visibility labels\n(e.g., TRUSTED_TESTER) that are elsewhere linked to users and projects.\n\nUsers and projects can have access to more than one visibility label. The\neffective visibility for multiple labels is the union of each label's\nelements, plus any unrestricted elements.\n\nIf an element and its parents have no restrictions, visibility is\nunconditionally granted.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: TRUSTED_TESTER\n - selector: google.calendar.Calendar.Delegate\n restriction: GOOGLE_INTERNAL\n\nHere, all methods are publicly visible except for the restricted methods\nEnhancedSearch and Delegate.", + "type": "object" + }, + "SystemParameters": { + "description": "### System parameter configuration\n\nA system parameter is a special kind of parameter defined by the API\nsystem, not by an individual API. It is typically mapped to an HTTP header\nand/or a URL query parameter. This configuration specifies which methods\nchange the names of the system parameters.", + "type": "object", + "properties": { + "rules": { + "type": "array", + "items": { + "$ref": "SystemParameterRule" + }, + "description": "Define system parameters.\n\nThe parameters defined here will override the default parameters\nimplemented by the system. If this field is missing from the service\nconfig, default system parameters will be used. Default system parameters\nand names is implementation-dependent.\n\nExample: define api key for all methods\n\n system_parameters\n rules:\n - selector: \"*\"\n parameters:\n - name: api_key\n url_query_parameter: api_key\n\n\nExample: define 2 api key names for a specific method.\n\n system_parameters\n rules:\n - selector: \"/ListShelves\"\n parameters:\n - name: api_key\n http_header: Api-Key1\n - name: api_key\n http_header: Api-Key2\n\n**NOTE:** All service configuration rules follow \"last one wins\" order." + } + }, + "id": "SystemParameters" + }, + "EnabledService": { + "description": "An EnabledService message contains the details about a service that has been\nenabled for use.", + "type": "object", + "properties": { + "service": { + "$ref": "Service", + "description": "The Service definition for the enabled service\nOnly the name and title fields will be populated." + } + }, + "id": "EnabledService" + }, + "LoggingDestination": { + "id": "LoggingDestination", + "description": "Configuration of a specific logging destination (the producer project\nor the consumer project).", + "type": "object", + "properties": { + "logs": { + "description": "Names of the logs to be sent to this destination. Each name must\nbe defined in the Service.logs section. If the log name is\nnot a domain scoped name, it will be automatically prefixed with\nthe service name followed by \"/\".", + "type": "array", + "items": { + "type": "string" + } + }, + "monitoredResource": { + "description": "The monitored resource type. The type must be defined in the\nService.monitored_resources section.", + "type": "string" + } + } + }, + "Option": { + "description": "A protocol buffer option, which can be attached to a message, field,\nenumeration, etc.", + "type": "object", + "properties": { + "name": { + "description": "The option's name. For protobuf built-in options (options defined in\ndescriptor.proto), this is the short name. For example, `\"map_entry\"`.\nFor custom options, it should be the fully-qualified name. For example,\n`\"google.api.http\"`.", + "type": "string" + }, + "value": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The option's value packed in an Any message. If the value is a primitive,\nthe corresponding wrapper type defined in google/protobuf/wrappers.proto\nshould be used. If the value is an enum, it should be stored as an int32\nvalue using the google.protobuf.Int32Value type.", + "type": "object" + } + }, + "id": "Option" + }, + "Logging": { + "description": "Logging configuration of the service.\n\nThe following example shows how to configure logs to be sent to the\nproducer and consumer projects. In the example, the `activity_history`\nlog is sent to both the producer and consumer projects, whereas the\n`purchase_history` log is only sent to the producer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n logs:\n - name: activity_history\n labels:\n - key: /customer_id\n - name: purchase_history\n logging:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history\n - purchase_history\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history", + "type": "object", + "properties": { + "producerDestinations": { + "description": "Logging configurations for sending logs to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none producer destination.", + "type": "array", + "items": { + "$ref": "LoggingDestination" + } + }, + "consumerDestinations": { + "description": "Logging configurations for sending logs to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none consumer destination.", + "type": "array", + "items": { + "$ref": "LoggingDestination" + } + } + }, + "id": "Logging" + }, + "Method": { + "description": "Method represents a method of an api.", + "type": "object", + "properties": { + "options": { + "description": "Any metadata attached to the method.", + "type": "array", + "items": { + "$ref": "Option" + } + }, + "responseStreaming": { + "description": "If true, the response is streamed.", + "type": "boolean" + }, + "name": { + "description": "The simple name of this method.", + "type": "string" + }, + "requestTypeUrl": { + "description": "A URL of the input message type.", + "type": "string" + }, + "requestStreaming": { + "type": "boolean", + "description": "If true, the request is streamed." + }, + "syntax": { + "enumDescriptions": [ + "Syntax `proto2`.", + "Syntax `proto3`." + ], + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax of this method.", + "type": "string" + }, + "responseTypeUrl": { + "description": "The URL of the output message type.", + "type": "string" + } + }, + "id": "Method" + }, + "Mixin": { + "id": "Mixin", + "description": "Declares an API to be included in this API. The including API must\nredeclare all the methods from the included API, but documentation\nand options are inherited as follows:\n\n- If after comment and whitespace stripping, the documentation\n string of the redeclared method is empty, it will be inherited\n from the original method.\n\n- Each annotation belonging to the service config (http,\n visibility) which is not set in the redeclared method will be\n inherited.\n\n- If an http annotation is inherited, the path pattern will be\n modified as follows. Any version prefix will be replaced by the\n version of the including API plus the root path if specified.\n\nExample of a simple mixin:\n\n package google.acl.v1;\n service AccessControl {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n }\n }\n\n package google.storage.v2;\n service Storage {\n // rpc GetAcl(GetAclRequest) returns (Acl);\n\n // Get a data record.\n rpc GetData(GetDataRequest) returns (Data) {\n option (google.api.http).get = \"/v2/{resource=**}\";\n }\n }\n\nExample of a mixin configuration:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n\nThe mixin construct implies that all methods in `AccessControl` are\nalso declared with same name and request/response types in\n`Storage`. A documentation generator or annotation processor will\nsee the effective `Storage.GetAcl` method after inherting\ndocumentation and annotations as follows:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n }\n ...\n }\n\nNote how the version in the path pattern changed from `v1` to `v2`.\n\nIf the `root` field in the mixin is specified, it should be a\nrelative path under which inherited HTTP paths are placed. Example:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n root: acls\n\nThis implies the following inherited HTTP annotation:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n }\n ...\n }", + "type": "object", + "properties": { + "root": { + "description": "If non-empty specifies a path under which inherited HTTP paths\nare rooted.", + "type": "string" + }, + "name": { + "type": "string", + "description": "The fully qualified name of the API which is included." + } + } + }, + "CustomError": { + "properties": { + "types": { + "description": "The list of custom error detail types, e.g. 'google.foo.v1.CustomError'.", + "type": "array", + "items": { + "type": "string" + } + }, + "rules": { + "description": "The list of custom error rules that apply to individual API messages.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "CustomErrorRule" + } + } + }, + "id": "CustomError", + "description": "Customize service error responses. For example, list any service\nspecific protobuf types that can appear in error detail lists of\nerror responses.\n\nExample:\n\n custom_error:\n types:\n - google.foo.v1.CustomError\n - google.foo.v1.AnotherError", + "type": "object" + }, + "Http": { + "description": "Defines the HTTP configuration for a service. It contains a list of\nHttpRule, each specifying the mapping of an RPC method\nto one or more HTTP REST API methods.", + "type": "object", + "properties": { + "rules": { + "description": "A list of HTTP configuration rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "HttpRule" + } + } + }, + "id": "Http" + }, + "Control": { + "type": "object", + "properties": { + "environment": { + "description": "The service control environment to use. If empty, no control plane\nfeature (like quota and billing) will be enabled.", + "type": "string" + } + }, + "id": "Control", + "description": "Selects and configures the service controller used by the service. The\nservice controller handles features like abuse, quota, billing, logging,\nmonitoring, etc." + }, + "SystemParameter": { + "description": "Define a parameter's name and location. The parameter may be passed as either\nan HTTP header or a URL query parameter, and if both are passed the behavior\nis implementation-dependent.", + "type": "object", + "properties": { + "name": { + "description": "Define the name of the parameter, such as \"api_key\" . It is case sensitive.", + "type": "string" + }, + "urlQueryParameter": { + "description": "Define the URL query parameter name to use for the parameter. It is case\nsensitive.", + "type": "string" + }, + "httpHeader": { + "description": "Define the HTTP header name to use for the parameter. It is case\ninsensitive.", + "type": "string" + } + }, + "id": "SystemParameter" + }, + "Field": { + "description": "A single field of a message type.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enumDescriptions": [ + "Field type unknown.", + "Field type double.", + "Field type float.", + "Field type int64.", + "Field type uint64.", + "Field type int32.", + "Field type fixed64.", + "Field type fixed32.", + "Field type bool.", + "Field type string.", + "Field type group. Proto2 syntax only, and deprecated.", + "Field type message.", + "Field type bytes.", + "Field type uint32.", + "Field type enum.", + "Field type sfixed32.", + "Field type sfixed64.", + "Field type sint32.", + "Field type sint64." + ], + "enum": [ + "TYPE_UNKNOWN", + "TYPE_DOUBLE", + "TYPE_FLOAT", + "TYPE_INT64", + "TYPE_UINT64", + "TYPE_INT32", + "TYPE_FIXED64", + "TYPE_FIXED32", + "TYPE_BOOL", + "TYPE_STRING", + "TYPE_GROUP", + "TYPE_MESSAGE", + "TYPE_BYTES", + "TYPE_UINT32", + "TYPE_ENUM", + "TYPE_SFIXED32", + "TYPE_SFIXED64", + "TYPE_SINT32", + "TYPE_SINT64" + ], + "description": "The field type." + }, + "jsonName": { + "description": "The field JSON name.", + "type": "string" + }, + "options": { + "description": "The protocol buffer options.", + "type": "array", + "items": { + "$ref": "Option" + } + }, + "oneofIndex": { + "description": "The index of the field type in `Type.oneofs`, for message or enumeration\ntypes. The first type has index 1; zero means the type is not in the list.", + "format": "int32", + "type": "integer" + }, + "packed": { + "type": "boolean", + "description": "Whether to use alternative packed wire representation." + }, + "cardinality": { + "type": "string", + "enumDescriptions": [ + "For fields with unknown cardinality.", + "For optional fields.", + "For required fields. Proto2 syntax only.", + "For repeated fields." + ], + "enum": [ + "CARDINALITY_UNKNOWN", + "CARDINALITY_OPTIONAL", + "CARDINALITY_REQUIRED", + "CARDINALITY_REPEATED" + ], + "description": "The field cardinality." + }, + "defaultValue": { + "description": "The string value of the default value of this field. Proto2 syntax only.", + "type": "string" + }, + "name": { + "description": "The field name.", + "type": "string" + }, + "typeUrl": { + "type": "string", + "description": "The field type URL, without the scheme, for message or enumeration\ntypes. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`." + }, + "number": { + "description": "The field number.", + "format": "int32", + "type": "integer" + } + }, + "id": "Field" + }, + "Monitoring": { + "description": "Monitoring configuration of the service.\n\nThe example below shows how to configure monitored resources and metrics\nfor monitoring. In the example, a monitored resource and two metrics are\ndefined. The `library.googleapis.com/book/returned_count` metric is sent\nto both producer and consumer projects, whereas the\n`library.googleapis.com/book/overdue_count` metric is only sent to the\nconsumer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n metrics:\n - name: library.googleapis.com/book/returned_count\n metric_kind: DELTA\n value_type: INT64\n labels:\n - key: /customer_id\n - name: library.googleapis.com/book/overdue_count\n metric_kind: GAUGE\n value_type: INT64\n labels:\n - key: /customer_id\n monitoring:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n - library.googleapis.com/book/overdue_count", + "type": "object", + "properties": { + "consumerDestinations": { + "description": "Monitoring configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A metric can be used in at most\none consumer destination.", + "type": "array", + "items": { + "$ref": "MonitoringDestination" + } + }, + "producerDestinations": { + "description": "Monitoring configurations for sending metrics to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A metric can be used in at most\none producer destination.", + "type": "array", + "items": { + "$ref": "MonitoringDestination" + } + } + }, + "id": "Monitoring" + }, + "Enum": { + "type": "object", + "properties": { + "name": { + "description": "Enum type name.", + "type": "string" + }, + "enumvalue": { + "description": "Enum value definitions.", + "type": "array", + "items": { + "$ref": "EnumValue" + } + }, + "options": { + "description": "Protocol buffer options.", + "type": "array", + "items": { + "$ref": "Option" + } + }, + "sourceContext": { + "description": "The source context.", + "$ref": "SourceContext" + }, + "syntax": { + "type": "string", + "enumDescriptions": [ + "Syntax `proto2`.", + "Syntax `proto3`." + ], + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax." + } + }, + "id": "Enum", + "description": "Enum type definition." + }, + "EnableServiceRequest": { + "properties": {}, + "id": "EnableServiceRequest", + "description": "Request message for EnableService method.", + "type": "object" + }, + "LabelDescriptor": { + "description": "A description of a label.", + "type": "object", + "properties": { + "key": { + "description": "The label key.", + "type": "string" + }, + "description": { + "description": "A human-readable description for the label.", + "type": "string" + }, + "valueType": { + "description": "The type of data that can be assigned to the label.", + "type": "string", + "enumDescriptions": [ + "A variable-length string. This is the default.", + "Boolean; true or false.", + "A 64-bit signed integer." + ], + "enum": [ + "STRING", + "BOOL", + "INT64" + ] + } + }, + "id": "LabelDescriptor" + }, + "Type": { + "description": "A protocol buffer message type.", + "type": "object", + "properties": { + "options": { + "description": "The protocol buffer options.", + "type": "array", + "items": { + "$ref": "Option" + } + }, + "fields": { + "description": "The list of fields.", + "type": "array", + "items": { + "$ref": "Field" + } + }, + "name": { + "description": "The fully qualified message name.", + "type": "string" + }, + "oneofs": { + "description": "The list of types appearing in `oneof` definitions in this type.", + "type": "array", + "items": { + "type": "string" + } + }, + "syntax": { + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax.", + "type": "string", + "enumDescriptions": [ + "Syntax `proto2`.", + "Syntax `proto3`." + ] + }, + "sourceContext": { + "$ref": "SourceContext", + "description": "The source context." + } + }, + "id": "Type" + }, + "Experimental": { + "description": "Experimental service configuration. These configuration options can\nonly be used by whitelisted users.", + "type": "object", + "properties": { + "authorization": { + "$ref": "AuthorizationConfig", + "description": "Authorization configuration." + } + }, + "id": "Experimental" + }, + "Backend": { + "id": "Backend", + "description": "`Backend` defines the backend configuration for a service.", + "type": "object", + "properties": { + "rules": { + "description": "A list of API backend rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "BackendRule" + } + } + } + }, + "AuthorizationConfig": { + "description": "Configuration of authorization.\n\nThis section determines the authorization provider, if unspecified, then no\nauthorization check will be done.\n\nExample:\n\n experimental:\n authorization:\n provider: firebaserules.googleapis.com", + "type": "object", + "properties": { + "provider": { + "type": "string", + "description": "The name of the authorization provider, such as\nfirebaserules.googleapis.com." + } + }, + "id": "AuthorizationConfig" + }, + "DocumentationRule": { + "properties": { + "description": { + "type": "string", + "description": "Description of the selected API(s)." + }, + "deprecationDescription": { + "description": "Deprecation description of the selected element(s). It can be provided if an\nelement is marked as `deprecated`.", + "type": "string" + }, + "selector": { + "type": "string", + "description": "The selector is a comma-separated list of patterns. Each pattern is a\nqualified name of the element which may end in \"*\", indicating a wildcard.\nWildcards are only allowed at the end and for a whole component of the\nqualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". To\nspecify a default for all applicable elements, the whole pattern \"*\"\nis used." + } + }, + "id": "DocumentationRule", + "description": "A documentation rule provides information about individual API elements.", + "type": "object" + }, + "ContextRule": { + "description": "A context rule provides information about the context for an individual API\nelement.", + "type": "object", + "properties": { + "selector": { + "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + }, + "provided": { + "description": "A list of full type names of provided contexts.", + "type": "array", + "items": { + "type": "string" + } + }, + "requested": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of full type names of requested contexts." + } + }, + "id": "ContextRule" + }, + "SourceContext": { + "description": "`SourceContext` represents information about the source of a\nprotobuf element, like the file in which it is defined.", + "type": "object", + "properties": { + "fileName": { + "description": "The path-qualified name of the .proto file that contained the associated\nprotobuf element. For example: `\"google/protobuf/source_context.proto\"`.", + "type": "string" + } + }, + "id": "SourceContext" + }, + "MetricDescriptor": { + "id": "MetricDescriptor", + "description": "Defines a metric type and its schema. Once a metric descriptor is created,\ndeleting or altering it stops data collection and makes the metric type's\nexisting data unusable.", + "type": "object", + "properties": { + "metricKind": { + "description": "Whether the metric records instantaneous values, changes to a value, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "An instantaneous measurement of a value.", + "The change in a value during a time interval.", + "A value accumulated over a time interval. Cumulative\nmeasurements in a time series should have the same start time\nand increasing end times, until an event resets the cumulative\nvalue to zero and sets a new start time for the following\npoints." + ], + "enum": [ + "METRIC_KIND_UNSPECIFIED", + "GAUGE", + "DELTA", + "CUMULATIVE" + ] + }, + "description": { + "description": "A detailed description of the metric, which can be used in documentation.", + "type": "string" + }, + "displayName": { + "description": "A concise name for the metric, which can be displayed in user interfaces.\nUse sentence case without an ending period, for example \"Request count\".", + "type": "string" + }, + "unit": { + "description": "The unit in which the metric value is reported. It is only applicable\nif the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The\nsupported units are a subset of [The Unified Code for Units of\nMeasure](http://unitsofmeasure.org/ucum.html) standard:\n\n**Basic units (UNIT)**\n\n* `bit` bit\n* `By` byte\n* `s` second\n* `min` minute\n* `h` hour\n* `d` day\n\n**Prefixes (PREFIX)**\n\n* `k` kilo (10**3)\n* `M` mega (10**6)\n* `G` giga (10**9)\n* `T` tera (10**12)\n* `P` peta (10**15)\n* `E` exa (10**18)\n* `Z` zetta (10**21)\n* `Y` yotta (10**24)\n* `m` milli (10**-3)\n* `u` micro (10**-6)\n* `n` nano (10**-9)\n* `p` pico (10**-12)\n* `f` femto (10**-15)\n* `a` atto (10**-18)\n* `z` zepto (10**-21)\n* `y` yocto (10**-24)\n* `Ki` kibi (2**10)\n* `Mi` mebi (2**20)\n* `Gi` gibi (2**30)\n* `Ti` tebi (2**40)\n\n**Grammar**\n\nThe grammar includes the dimensionless unit `1`, such as `1/s`.\n\nThe grammar also includes these connectors:\n\n* `/` division (as an infix operator, e.g. `1/s`).\n* `.` multiplication (as an infix operator, e.g. `GBy.d`)\n\nThe grammar for a unit is as follows:\n\n Expression = Component { \".\" Component } { \"/\" Component } ;\n\n Component = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\n Annotation = \"{\" NAME \"}\" ;\n\nNotes:\n\n* `Annotation` is just a comment if it follows a `UNIT` and is\n equivalent to `1` if it is used alone. For examples,\n `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.\n* `NAME` is a sequence of non-blank printable ASCII characters not\n containing '{' or '}'.", + "type": "string" + }, + "labels": { + "description": "The set of labels that can be used to describe a specific\ninstance of this metric type. For example, the\n`appengine.googleapis.com/http/server/response_latencies` metric\ntype has a label for the HTTP response code, `response_code`, so\nyou can look at latencies for successful responses or just\nfor responses that failed.", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } + }, + "name": { + "description": "The resource name of the metric descriptor. Depending on the\nimplementation, the name typically includes: (1) the parent resource name\nthat defines the scope of the metric type or of its data; and (2) the\nmetric's URL-encoded type, which also appears in the `type` field of this\ndescriptor. For example, following is the resource name of a custom\nmetric within the GCP project `my-project-id`:\n\n \"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"", + "type": "string" + }, + "type": { + "description": "The metric type, including its DNS name prefix. The type is not\nURL-encoded. All user-defined custom metric types have the DNS name\n`custom.googleapis.com`. Metric types should use a natural hierarchical\ngrouping. For example:\n\n \"custom.googleapis.com/invoice/paid/amount\"\n \"appengine.googleapis.com/http/server/response_latencies\"", + "type": "string" + }, + "valueType": { + "enumDescriptions": [ + "Do not use this default value.", + "The value is a boolean.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a signed 64-bit integer.", + "The value is a double precision floating point number.", + "The value is a text string.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a `Distribution`.", + "The value is money." + ], + "enum": [ + "VALUE_TYPE_UNSPECIFIED", + "BOOL", + "INT64", + "DOUBLE", + "STRING", + "DISTRIBUTION", + "MONEY" + ], + "description": "Whether the measurement is an integer, a floating-point number, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "type": "string" + } + } + }, + "ListEnabledServicesResponse": { + "description": "Response message for `ListEnabledServices` method.", + "type": "object", + "properties": { + "services": { + "description": "Services enabled for the specified parent.", + "type": "array", + "items": { + "$ref": "EnabledService" + } + }, + "nextPageToken": { + "description": "Token that can be passed to `ListEnabledServices` to resume a paginated\nquery.", + "type": "string" + } + }, + "id": "ListEnabledServicesResponse" + }, + "Endpoint": { + "description": "`Endpoint` describes a network endpoint that serves a set of APIs.\nA service may expose any number of endpoints, and all endpoints share the\nsame service configuration, such as quota configuration and monitoring\nconfiguration.\n\nExample service configuration:\n\n name: library-example.googleapis.com\n endpoints:\n # Below entry makes 'google.example.library.v1.Library'\n # API be served from endpoint address library-example.googleapis.com.\n # It also allows HTTP OPTIONS calls to be passed to the backend, for\n # it to decide whether the subsequent cross-origin request is\n # allowed to proceed.\n - name: library-example.googleapis.com\n allow_cors: true", + "type": "object", + "properties": { + "apis": { + "description": "The list of APIs served by this endpoint.", + "type": "array", + "items": { + "type": "string" + } + }, + "aliases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "DEPRECATED: This field is no longer supported. Instead of using aliases,\nplease specify multiple google.api.Endpoint for each of the intented\nalias.\n\nAdditional names that this endpoint will be hosted on." + }, + "features": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of features enabled on this endpoint." + }, + "allowCors": { + "type": "boolean", + "description": "Allowing\n[CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka\ncross-domain traffic, would allow the backends served from this endpoint to\nreceive and respond to HTTP OPTIONS requests. The response will be used by\nthe browser to determine whether the subsequent cross-origin request is\nallowed to proceed." + }, + "name": { + "description": "The canonical name of this endpoint.", + "type": "string" + } + }, + "id": "Endpoint" + }, + "OAuthRequirements": { + "description": "OAuth scopes are a way to define data and permissions on data. For example,\nthere are scopes defined for \"Read-only access to Google Calendar\" and\n\"Access to Cloud Platform\". Users can consent to a scope for an application,\ngiving it permission to access that data on their behalf.\n\nOAuth scope specifications should be fairly coarse grained; a user will need\nto see and understand the text description of what your scope means.\n\nIn most cases: use one or at most two OAuth scopes for an entire family of\nproducts. If your product has multiple APIs, you should probably be sharing\nthe OAuth scope across all of those APIs.\n\nWhen you need finer grained OAuth consent screens: talk with your product\nmanagement about how developers will use them in practice.\n\nPlease note that even though each of the canonical scopes is enough for a\nrequest to be accepted and passed to the backend, a request can still fail\ndue to the backend requiring additional scopes or permissions.", + "type": "object", + "properties": { + "canonicalScopes": { + "description": "The list of publicly documented OAuth scopes that are allowed access. An\nOAuth token containing any of these scopes will be accepted.\n\nExample:\n\n canonical_scopes: https://www.googleapis.com/auth/calendar,\n https://www.googleapis.com/auth/calendar.read", + "type": "string" + } + }, + "id": "OAuthRequirements" + }, + "Usage": { + "description": "Configuration controlling usage of a service.", + "type": "object", + "properties": { + "requirements": { + "description": "Requirements that must be satisfied before a consumer project can use the\nservice. Each requirement is of the form \u003cservice.name\u003e/\u003crequirement-id\u003e;\nfor example 'serviceusage.googleapis.com/billing-enabled'.", + "type": "array", + "items": { + "type": "string" + } + }, + "producerNotificationChannel": { + "description": "The full resource name of a channel used for sending notifications to the\nservice producer.\n\nGoogle Service Management currently only supports\n[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification\nchannel. To use Google Cloud Pub/Sub as the channel, this must be the name\nof a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format\ndocumented in https://cloud.google.com/pubsub/docs/overview.", + "type": "string" + }, + "rules": { + "description": "A list of usage rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "UsageRule" + } + } + }, + "id": "Usage" + }, + "Context": { + "description": "`Context` defines which contexts an API requests.\n\nExample:\n\n context:\n rules:\n - selector: \"*\"\n requested:\n - google.rpc.context.ProjectContext\n - google.rpc.context.OriginContext\n\nThe above specifies that all methods in the API request\n`google.rpc.context.ProjectContext` and\n`google.rpc.context.OriginContext`.\n\nAvailable context types are defined in package\n`google.rpc.context`.", + "type": "object", + "properties": { + "rules": { + "description": "A list of RPC context rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "ContextRule" + } + } + }, + "id": "Context" + }, + "LogDescriptor": { + "type": "object", + "properties": { + "name": { + "description": "The name of the log. It must be less than 512 characters long and can\ninclude the following characters: upper- and lower-case alphanumeric\ncharacters [A-Za-z0-9], and punctuation characters including\nslash, underscore, hyphen, period [/_-.].", + "type": "string" + }, + "description": { + "type": "string", + "description": "A human-readable description of this log. This information appears in\nthe documentation and can contain details." + }, + "displayName": { + "description": "The human-readable name for this log. This information appears on\nthe user interface and should be concise.", + "type": "string" + }, + "labels": { + "description": "The set of labels that are available to describe a specific log entry.\nRuntime requests that contain labels not specified here are\nconsidered invalid.", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } + } + }, + "id": "LogDescriptor", + "description": "A description of a log type. Example in YAML format:\n\n - name: library.googleapis.com/activity_history\n description: The history of borrowing and returning library items.\n display_name: Activity\n labels:\n - key: /customer_id\n description: Identifier of a library customer" + }, + "MonitoredResourceDescriptor": { + "id": "MonitoredResourceDescriptor", + "description": "An object that describes the schema of a MonitoredResource object using a\ntype name and a set of labels. For example, the monitored resource\ndescriptor for Google Compute Engine VM instances has a type of\n`\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and\n`\"zone\"` to identify particular VM instances.\n\nDifferent APIs can support different monitored resource types. APIs generally\nprovide a `list` method that returns the monitored resource descriptors used\nby the API.", + "type": "object", + "properties": { + "displayName": { + "description": "Optional. A concise name for the monitored resource type that might be\ndisplayed in user interfaces. It should be a Title Cased Noun Phrase,\nwithout any article or other determiners. For example,\n`\"Google Cloud SQL Database\"`.", + "type": "string" + }, + "description": { + "description": "Optional. A detailed description of the monitored resource type that might\nbe used in documentation.", + "type": "string" + }, + "type": { + "description": "Required. The monitored resource type. For example, the type\n`\"cloudsql_database\"` represents databases in Google Cloud SQL.\nThe maximum length of this value is 256 characters.", + "type": "string" + }, + "labels": { + "description": "Required. A set of labels used to describe instances of this monitored\nresource type. For example, an individual Google Cloud SQL database is\nidentified by values for the labels `\"database_id\"` and `\"zone\"`.", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } + }, + "name": { + "description": "Optional. The resource name of the monitored resource descriptor:\n`\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where\n{type} is the value of the `type` field in this object and\n{project_id} is a project ID that provides API-specific context for\naccessing the type. APIs that do not use project information can use the\nresource name format `\"monitoredResourceDescriptors/{type}\"`.", + "type": "string" + } + } + }, + "CustomErrorRule": { + "id": "CustomErrorRule", + "description": "A custom error rule.", + "type": "object", + "properties": { + "isErrorType": { + "description": "Mark this message as possible payload in error response. Otherwise,\nobjects of this type will be filtered when they appear in error payload.", + "type": "boolean" + }, + "selector": { + "description": "Selects messages to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + } + } + }, + "MediaDownload": { + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", + "type": "object", + "properties": { + "enabled": { + "description": "Whether download is enabled.", + "type": "boolean" + } + }, + "id": "MediaDownload" + }, + "DisableServiceRequest": { + "id": "DisableServiceRequest", + "description": "Request message for DisableService method.", + "type": "object", + "properties": {} + }, + "MediaUpload": { + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration.", + "type": "object", + "properties": { + "enabled": { + "description": "Whether upload is enabled.", + "type": "boolean" + } + }, + "id": "MediaUpload" + }, + "UsageRule": { + "properties": { + "selector": { + "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "type": "string" + }, + "allowUnregisteredCalls": { + "description": "True, if the method allows unregistered calls; false otherwise.", + "type": "boolean" + } + }, + "id": "UsageRule", + "description": "Usage configuration rules for the service.\n\nNOTE: Under development.\n\n\nUse this rule to configure unregistered calls for the service. Unregistered\ncalls are calls that do not contain consumer project identity.\n(Example: calls that do not contain an API key).\nBy default, API methods do not allow unregistered calls, and each method call\nmust be identified by a consumer project identity. Use this rule to\nallow/disallow unregistered calls.\n\nExample of an API that wants to allow unregistered calls for entire service.\n\n usage:\n rules:\n - selector: \"*\"\n allow_unregistered_calls: true\n\nExample of a method that wants to allow unregistered calls.\n\n usage:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allow_unregistered_calls: true", + "type": "object" + }, + "AuthRequirement": { + "id": "AuthRequirement", + "description": "User-defined authentication requirements, including support for\n[JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "type": "object", + "properties": { + "providerId": { + "description": "id from authentication provider.\n\nExample:\n\n provider_id: bookstore_auth", + "type": "string" + }, + "audiences": { + "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is\nimplemented and accepted in all the runtime components.\n\nThe list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "type": "string" + } + } + }, + "Documentation": { + "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: (== include google/foo/overview.md ==)\n - name: Tutorial\n content: (== include google/foo/tutorial.md ==)\n subpages;\n - name: Java\n content: (== include google/foo/tutorial_java.md ==)\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e[fully.qualified.proto.name][]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e[display text][fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e(-- internal comment --)\u003c/code\u003e\u003c/pre\u003e\nComments can be made conditional using a visibility label. The below\ntext will be only rendered if the `BETA` label is available:\n\u003cpre\u003e\u003ccode\u003e(--BETA: comment for BETA users --)\u003c/code\u003e\u003c/pre\u003e\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e(== include path/to/file ==)\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e(== resource_for v1.shelves.books ==)\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "type": "object", + "properties": { + "summary": { + "type": "string", + "description": "A short summary of what the service does. Can only be provided by\nplain text." + }, + "documentationRootUrl": { + "description": "The URL to the root of documentation.", + "type": "string" + }, + "rules": { + "description": "A list of documentation rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "type": "array", + "items": { + "$ref": "DocumentationRule" + } + }, + "overview": { + "type": "string", + "description": "Declares a single overview page. For example:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n overview: (== include overview.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nThis is a shortcut for the following declaration (using pages style):\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n pages:\n - name: Overview\n content: (== include overview.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nNote: you cannot specify both `overview` field and `pages` field." + }, + "pages": { + "description": "The top level pages for the documentation set.", + "type": "array", + "items": { + "$ref": "Page" + } + } + }, + "id": "Documentation" + }, + "BackendRule": { + "id": "BackendRule", + "description": "A backend rule provides configuration for an individual API element.", + "type": "object", + "properties": { + "address": { + "description": "The address of the API backend.", + "type": "string" + }, + "selector": { + "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + }, + "deadline": { + "description": "The number of seconds to wait for a response from a request. The\ndefault depends on the deployment context.", + "format": "double", + "type": "number" + } + } + }, + "AuthenticationRule": { + "description": "Authentication rules for the service.\n\nBy default, if a method has any authentication requirements, every request\nmust include a valid credential matching one of the requirements.\nIt's an error to include more than one kind of credential in a single\nrequest.\n\nIf a method doesn't have any auth requirements, request credentials will be\nignored.", + "type": "object", + "properties": { + "selector": { + "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + }, + "allowWithoutCredential": { + "description": "Whether to allow requests without a credential. The credential can be\nan OAuth token, Google cookies (first-party auth) or EndUserCreds.\n\nFor requests without credentials, if the service control environment is\nspecified, each incoming request **must** be associated with a service\nconsumer. This can be done by passing an API key that belongs to a consumer\nproject.", + "type": "boolean" + }, + "oauth": { + "description": "The requirements for OAuth credentials.", + "$ref": "OAuthRequirements" + }, + "requirements": { + "description": "Requirements for additional authentication providers.", + "type": "array", + "items": { + "$ref": "AuthRequirement" + } + } + }, + "id": "AuthenticationRule" + }, + "Api": { + "id": "Api", + "description": "Api is a light-weight descriptor for a protocol buffer service.", + "type": "object", + "properties": { + "methods": { + "type": "array", + "items": { + "$ref": "Method" + }, + "description": "The methods of this api, in unspecified order." + }, + "name": { + "description": "The fully qualified name of this api, including package name\nfollowed by the api's simple name.", + "type": "string" + }, + "sourceContext": { + "description": "Source context for the protocol buffer service represented by this\nmessage.", + "$ref": "SourceContext" + }, + "syntax": { + "description": "The source syntax of the service.", + "type": "string", + "enumDescriptions": [ + "Syntax `proto2`.", + "Syntax `proto3`." + ], + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ] + }, + "version": { + "type": "string", + "description": "A version string for this api. If specified, must have the form\n`major-version.minor-version`, as in `1.10`. If the minor version\nis omitted, it defaults to zero. If the entire version field is\nempty, the major version is derived from the package name, as\noutlined below. If the field is not empty, the version in the\npackage name will be verified to be consistent with what is\nprovided here.\n\nThe versioning schema uses [semantic\nversioning](http://semver.org) where the major version number\nindicates a breaking change and the minor version an additive,\nnon-breaking change. Both version numbers are signals to users\nwhat to expect from different versions, and should be carefully\nchosen based on the product plan.\n\nThe major version is also reflected in the package name of the\nAPI, which must end in `v\u003cmajor-version\u003e`, as in\n`google.feature.v1`. For major versions 0 and 1, the suffix can\nbe omitted. Zero major versions must only be used for\nexperimental, none-GA apis.\n" + }, + "mixins": { + "description": "Included APIs. See Mixin.", + "type": "array", + "items": { + "$ref": "Mixin" + } + }, + "options": { + "description": "Any metadata attached to the API.", + "type": "array", + "items": { + "$ref": "Option" + } + } + } + }, + "Authentication": { + "description": "`Authentication` defines the authentication configuration for an API.\n\nExample for an API targeted for external use:\n\n name: calendar.googleapis.com\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "type": "object", + "properties": { + "rules": { + "type": "array", + "items": { + "$ref": "AuthenticationRule" + }, + "description": "A list of authentication rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order." + }, + "providers": { + "description": "Defines a set of authentication providers that a service supports.", + "type": "array", + "items": { + "$ref": "AuthProvider" + } + } + }, + "id": "Authentication" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "type": "object", + "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + }, + "response": { + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "type": "string" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object" + } + }, + "id": "Operation" + }, + "Page": { + "description": "Represents a documentation page. A page can contain subpages to represent\nnested documentation set structure.", + "type": "object", + "properties": { + "subpages": { + "description": "Subpages of this page. The order of subpages specified here will be\nhonored in the generated docset.", + "type": "array", + "items": { + "$ref": "Page" + } + }, + "name": { + "description": "The name of the page. It will be used as an identity of the page to\ngenerate URI of the page, text of the link to this page in navigation,\netc. The full page name (start from the root page name to this page\nconcatenated with `.`) can be used as reference to the page in your\ndocumentation. For example:\n\u003cpre\u003e\u003ccode\u003epages:\n- name: Tutorial\n content: (== include tutorial.md ==)\n subpages:\n - name: Java\n content: (== include tutorial_java.md ==)\n\u003c/code\u003e\u003c/pre\u003e\nYou can reference `Java` page using Markdown reference link syntax:\n`Java`.", + "type": "string" + }, + "content": { + "description": "The Markdown content of the page. You can use \u003ccode\u003e(== include {path} ==)\u003c/code\u003e\nto include content from a Markdown file.", + "type": "string" + } + }, + "id": "Page" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + } + }, + "id": "Status" + }, + "AuthProvider": { + "type": "object", + "properties": { + "jwksUri": { + "type": "string", + "description": "URL of the provider's public key set to validate signature of the JWT. See\n[OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).\nOptional if the key set document:\n - can be retrieved from\n [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html\n of the issuer.\n - can be inferred from the email domain of the issuer (e.g. a Google service account).\n\nExample: https://www.googleapis.com/oauth2/v1/certs" + }, + "audiences": { + "description": "The list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "type": "string" + }, + "id": { + "description": "The unique identifier of the auth provider. It will be referred to by\n`AuthRequirement.provider_id`.\n\nExample: \"bookstore_auth\".", + "type": "string" + }, + "issuer": { + "description": "Identifies the principal that issued the JWT. See\nhttps://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1\nUsually a URL or an email address.\n\nExample: https://securetoken.google.com\nExample: 1234567-compute@developer.gserviceaccount.com", + "type": "string" + } + }, + "id": "AuthProvider", + "description": "Configuration for an anthentication provider, including support for\n[JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32)." + }, + "Service": { + "id": "Service", + "description": "`Service` is the root object of Google service configuration schema. It\ndescribes basic information about a service, such as the name and the\ntitle, and delegates other aspects to sub-sections. Each sub-section is\neither a proto message or a repeated proto message that configures a\nspecific aspect, such as auth. See each proto message definition for details.\n\nExample:\n\n type: google.api.Service\n config_version: 3\n name: calendar.googleapis.com\n title: Google Calendar API\n apis:\n - name: google.calendar.v3.Calendar\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "type": "object", + "properties": { + "documentation": { + "$ref": "Documentation", + "description": "Additional API documentation." + }, + "logging": { + "$ref": "Logging", + "description": "Logging configuration." + }, + "monitoredResources": { + "description": "Defines the monitored resources used by this service. This is required\nby the Service.monitoring and Service.logging configurations.", + "type": "array", + "items": { + "$ref": "MonitoredResourceDescriptor" + } + }, + "enums": { + "description": "A list of all enum types included in this API service. Enums\nreferenced directly or indirectly by the `apis` are automatically\nincluded. Enums which are not referenced but shall be included\nshould be listed here by name. Example:\n\n enums:\n - name: google.someapi.v1.SomeEnum", + "type": "array", + "items": { + "$ref": "Enum" + } + }, + "context": { + "$ref": "Context", + "description": "Context configuration." + }, + "id": { + "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. If empty, the server may choose to\ngenerate one instead.", + "type": "string" + }, + "usage": { + "$ref": "Usage", + "description": "Configuration controlling usage of this service." + }, + "metrics": { + "description": "Defines the metrics used by this service.", + "type": "array", + "items": { + "$ref": "MetricDescriptor" + } + }, + "authentication": { + "description": "Auth configuration.", + "$ref": "Authentication" + }, + "experimental": { + "$ref": "Experimental", + "description": "Experimental configuration." + }, + "control": { + "description": "Configuration for the service control plane.", + "$ref": "Control" + }, + "configVersion": { + "description": "The version of the service configuration. The config version may\ninfluence interpretation of the configuration, for example, to\ndetermine defaults. This is documented together with applicable\noptions. The current default for the config version itself is `3`.", + "format": "uint32", + "type": "integer" + }, + "monitoring": { + "description": "Monitoring configuration.", + "$ref": "Monitoring" + }, + "producerProjectId": { + "description": "The id of the Google developer project that owns the service.\nMembers of this project can manage the service configuration,\nmanage consumption of the service, etc.", + "type": "string" + }, + "systemTypes": { + "description": "A list of all proto message types included in this API service.\nIt serves similar purpose as [google.api.Service.types], except that\nthese types are not needed by user-defined APIs. Therefore, they will not\nshow up in the generated discovery doc. This field should only be used\nto define system APIs in ESF.", + "type": "array", + "items": { + "$ref": "Type" + } + }, + "visibility": { + "$ref": "Visibility", + "description": "API visibility configuration." + }, + "name": { + "type": "string", + "description": "The DNS address at which this service is available,\ne.g. `calendar.googleapis.com`." + }, + "customError": { + "description": "Custom error configuration.", + "$ref": "CustomError" + }, + "title": { + "description": "The product title associated with this service.", + "type": "string" + }, + "endpoints": { + "description": "Configuration for network endpoints. If this is empty, then an endpoint\nwith the same name as the service is automatically generated to service all\ndefined APIs.", + "type": "array", + "items": { + "$ref": "Endpoint" + } + }, + "logs": { + "description": "Defines the logs used by this service.", + "type": "array", + "items": { + "$ref": "LogDescriptor" + } + }, + "apis": { + "type": "array", + "items": { + "$ref": "Api" + }, + "description": "A list of API interfaces exported by this service. Only the `name` field\nof the google.protobuf.Api needs to be provided by the configuration\nauthor, as the remaining fields will be derived from the IDL during the\nnormalization process. It is an error to specify an API interface here\nwhich cannot be resolved against the associated IDL files." + }, + "types": { + "description": "A list of all proto message types included in this API service.\nTypes referenced directly or indirectly by the `apis` are\nautomatically included. Messages which are not referenced but\nshall be included, such as types used by the `google.protobuf.Any` type,\nshould be listed here by name. Example:\n\n types:\n - name: google.protobuf.Int32", + "type": "array", + "items": { + "$ref": "Type" + } + }, + "http": { + "description": "HTTP configuration.", + "$ref": "Http" + }, + "backend": { + "$ref": "Backend", + "description": "API backend configuration." + }, + "systemParameters": { + "$ref": "SystemParameters", + "description": "System parameter configuration." + } + } + }, + "EnumValue": { + "id": "EnumValue", + "description": "Enum value definition.", + "type": "object", + "properties": { + "name": { + "description": "Enum value name.", + "type": "string" + }, + "options": { + "description": "Protocol buffer options.", + "type": "array", + "items": { + "$ref": "Option" + } + }, + "number": { + "description": "Enum value number.", + "format": "int32", + "type": "integer" + } + } + }, + "CustomHttpPattern": { + "description": "A custom pattern is used for defining custom HTTP verb.", + "type": "object", + "properties": { + "kind": { + "description": "The name of this custom HTTP verb.", + "type": "string" + }, + "path": { + "description": "The path matched by this custom verb.", + "type": "string" + } + }, + "id": "CustomHttpPattern" + }, + "SystemParameterRule": { + "description": "Define a system parameter rule mapping system parameter definitions to\nmethods.", + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details." + }, + "parameters": { + "description": "Define parameters. Multiple names may be defined for a parameter.\nFor a given method call, only one of them should be used. If multiple\nnames are used the behavior is implementation-dependent.\nIf none of the specified names are present the behavior is\nparameter-dependent.", + "type": "array", + "items": { + "$ref": "SystemParameter" + } + } + }, + "id": "SystemParameterRule" + }, + "VisibilityRule": { + "description": "A visibility rule provides visibility configuration for an individual API\nelement.", + "type": "object", + "properties": { + "restriction": { + "description": "A comma-separated list of visibility labels that apply to the `selector`.\nAny of the listed labels can be used to grant the visibility.\n\nIf a rule has multiple labels, removing one of the labels but not all of\nthem can break clients.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: GOOGLE_INTERNAL, TRUSTED_TESTER\n\nRemoving GOOGLE_INTERNAL from this restriction will break clients that\nrely on this method and only had access to it through GOOGLE_INTERNAL.", + "type": "string" + }, + "selector": { + "description": "Selects methods, messages, fields, enums, etc. to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + } + }, + "id": "VisibilityRule" + }, + "HttpRule": { + "properties": { + "responseBody": { + "description": "The name of the response field whose value is mapped to the HTTP body of\nresponse. Other response fields are ignored. This field is optional. When\nnot set, the response message will be used as HTTP body of response.\nNOTE: the referred field must be not a repeated field and must be present\nat the top-level of response message type.", + "type": "string" + }, + "mediaUpload": { + "$ref": "MediaUpload", + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration." + }, + "selector": { + "description": "Selects methods to which this rule applies.\n\nRefer to selector for syntax details.", + "type": "string" + }, + "custom": { + "$ref": "CustomHttpPattern", + "description": "Custom pattern is used for defining custom verbs." + }, + "get": { + "description": "Used for listing and getting information about resources.", + "type": "string" + }, + "patch": { + "description": "Used for updating a resource.", + "type": "string" + }, + "put": { + "description": "Used for updating a resource.", + "type": "string" + }, + "delete": { + "description": "Used for deleting a resource.", + "type": "string" + }, + "body": { + "description": "The name of the request field whose value is mapped to the HTTP body, or\n`*` for mapping all fields not captured by the path pattern to the HTTP\nbody. NOTE: the referred field must not be a repeated field and must be\npresent at the top-level of request message type.", + "type": "string" + }, + "post": { + "description": "Used for creating a resource.", + "type": "string" + }, + "mediaDownload": { + "$ref": "MediaDownload", + "description": "Do not use this. For media support, add instead\n[][google.bytestream.RestByteStream] as an API to your\nconfiguration." + }, + "additionalBindings": { + "description": "Additional HTTP bindings for the selector. Nested bindings must\nnot contain an `additional_bindings` field themselves (that is,\nthe nesting may only be one level deep).", + "type": "array", + "items": { + "$ref": "HttpRule" + } + } + }, + "id": "HttpRule", + "description": "`HttpRule` defines the mapping of an RPC method to one or more HTTP\nREST APIs. The mapping determines what portions of the request\nmessage are populated from the path, query parameters, or body of\nthe HTTP request. The mapping is typically specified as an\n`google.api.http` annotation, see \"google/api/annotations.proto\"\nfor details.\n\nThe mapping consists of a field specifying the path template and\nmethod kind. The path template can refer to fields in the request\nmessage, as in the example below which describes a REST GET\noperation on a resource collection of messages:\n\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http).get = \"/v1/messages/{message_id}/{sub.subfield}\";\n }\n }\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // mapped to the URL\n SubMessage sub = 2; // `sub.subfield` is url-mapped\n }\n message Message {\n string text = 1; // content of the resource\n }\n\nThe same http annotation can alternatively be expressed inside the\n`GRPC API Configuration` YAML file.\n\n http:\n rules:\n - selector: \u003cproto_package_name\u003e.Messaging.GetMessage\n get: /v1/messages/{message_id}/{sub.subfield}\n\nThis definition enables an automatic, bidrectional mapping of HTTP\nJSON to RPC. Example:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456/foo` | `GetMessage(message_id: \"123456\" sub: SubMessage(subfield: \"foo\"))`\n\nIn general, not only fields but also field paths can be referenced\nfrom a path pattern. Fields mapped to the path pattern cannot be\nrepeated and must have a primitive (non-message) type.\n\nAny fields in the request message which are not bound by the path\npattern automatically become (optional) HTTP query\nparameters. Assume the following definition of the request message:\n\n\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // mapped to the URL\n int64 revision = 2; // becomes a parameter\n SubMessage sub = 3; // `sub.subfield` becomes a parameter\n }\n\n\nThis enables a HTTP JSON to RPC mapping as below:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))`\n\nNote that fields which are mapped to HTTP parameters must have a\nprimitive type or a repeated primitive type. Message types are not\nallowed. In the case of a repeated type, the parameter can be\nrepeated in the URL, as in `...?param=A¶m=B`.\n\nFor HTTP method kinds which allow a request body, the `body` field\nspecifies the mapping. Consider a REST update method on the\nmessage resource collection:\n\n\n service Messaging {\n rpc UpdateMessage(UpdateMessageRequest) returns (Message) {\n option (google.api.http) = {\n put: \"/v1/messages/{message_id}\"\n body: \"message\"\n };\n }\n }\n message UpdateMessageRequest {\n string message_id = 1; // mapped to the URL\n Message message = 2; // mapped to the body\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled, where the\nrepresentation of the JSON in the request body is determined by\nprotos JSON encoding:\n\nHTTP | RPC\n-----|-----\n`PUT /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })`\n\nThe special name `*` can be used in the body mapping to define that\nevery field not bound by the path template should be mapped to the\nrequest body. This enables the following alternative definition of\nthe update method:\n\n service Messaging {\n rpc UpdateMessage(Message) returns (Message) {\n option (google.api.http) = {\n put: \"/v1/messages/{message_id}\"\n body: \"*\"\n };\n }\n }\n message Message {\n string message_id = 1;\n string text = 2;\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled:\n\nHTTP | RPC\n-----|-----\n`PUT /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" text: \"Hi!\")`\n\nNote that when using `*` in the body mapping, it is not possible to\nhave HTTP parameters, as all fields not bound by the path end in\nthe body. This makes this option more rarely used in practice of\ndefining REST APIs. The common usage of `*` is in custom methods\nwhich don't use the URL at all for transferring data.\n\nIt is possible to define multiple HTTP methods for one RPC by using\nthe `additional_bindings` option. Example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/messages/{message_id}\"\n additional_bindings {\n get: \"/v1/users/{user_id}/messages/{message_id}\"\n }\n };\n }\n }\n message GetMessageRequest {\n string message_id = 1;\n string user_id = 2;\n }\n\n\nThis enables the following two alternative HTTP JSON to RPC\nmappings:\n\nHTTP | RPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")`\n`GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id: \"123456\")`\n\n# Rules for HTTP mapping\n\nThe rules for mapping HTTP path, query parameters, and body fields\nto the request message are as follows:\n\n1. The `body` field specifies either `*` or a field path, or is\n omitted. If omitted, it assumes there is no HTTP body.\n2. Leaf fields (recursive expansion of nested messages in the\n request) can be classified into three types:\n (a) Matched in the URL template.\n (b) Covered by body (if body is `*`, everything except (a) fields;\n else everything under the body field)\n (c) All other fields.\n3. URL query parameters found in the HTTP request are mapped to (c) fields.\n4. Any body sent with an HTTP request can contain only (b) fields.\n\nThe syntax of the path template is as follows:\n\n Template = \"/\" Segments [ Verb ] ;\n Segments = Segment { \"/\" Segment } ;\n Segment = \"*\" | \"**\" | LITERAL | Variable ;\n Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ;\n FieldPath = IDENT { \".\" IDENT } ;\n Verb = \":\" LITERAL ;\n\nThe syntax `*` matches a single path segment. It follows the semantics of\n[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String\nExpansion.\n\nThe syntax `**` matches zero or more path segments. It follows the semantics\nof [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved\nExpansion. NOTE: it must be the last segment in the path except the Verb.\n\nThe syntax `LITERAL` matches literal text in the URL path.\n\nThe syntax `Variable` matches the entire path as specified by its template;\nthis nested template must not contain further variables. If a variable\nmatches a single path segment, its template may be omitted, e.g. `{var}`\nis equivalent to `{var=*}`.\n\nNOTE: the field paths in variables and in the `body` must not refer to\nrepeated fields or map fields.\n\nUse CustomHttpPattern to specify any HTTP method that is not included in the\n`pattern` field, such as HEAD, or \"*\" to leave the HTTP method unspecified for\na given URL path rule. The wild-card rule is useful for services that provide\ncontent to Web (HTML) clients.", + "type": "object" + }, + "MonitoringDestination": { + "type": "object", + "properties": { + "metrics": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Names of the metrics to report to this monitoring destination.\nEach name must be defined in Service.metrics section." + }, + "monitoredResource": { + "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "type": "string" + } + }, + "id": "MonitoringDestination", + "description": "Configuration of a specific monitoring destination (the producer project\nor the consumer project)." + } + }, + "protocol": "rest", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "canonicalName": "Service User", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/service.management": { + "description": "Manage your Google API service configuration" + } + } + } + }, + "rootUrl": "https://serviceuser.googleapis.com/", + "ownerDomain": "google.com", + "name": "serviceuser", + "batchPath": "batch" +} diff --git a/vendor/google.golang.org/api/serviceuser/v1/serviceuser-gen.go b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-gen.go new file mode 100644 index 000000000..85fb2f79e --- /dev/null +++ b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-gen.go @@ -0,0 +1,4028 @@ +// Package serviceuser provides access to the Google Service User API. +// +// See https://cloud.google.com/service-user/ +// +// Usage example: +// +// import "google.golang.org/api/serviceuser/v1" +// ... +// serviceuserService, err := serviceuser.New(oauthHttpClient) +package serviceuser // import "google.golang.org/api/serviceuser/v1" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "serviceuser:v1" +const apiName = "serviceuser" +const apiVersion = "v1" +const basePath = "https://serviceuser.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your Google API service configuration + ServiceManagementScope = "https://www.googleapis.com/auth/service.management" +) + +func New(client *http.Client) (*APIService, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &APIService{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type APIService struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Projects *ProjectsService +} + +func (s *APIService) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *APIService) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewProjectsService(s *APIService) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Services = NewProjectsServicesService(s) + return rs +} + +type ProjectsService struct { + s *APIService + + Services *ProjectsServicesService +} + +func NewProjectsServicesService(s *APIService) *ProjectsServicesService { + rs := &ProjectsServicesService{s: s} + return rs +} + +type ProjectsServicesService struct { + s *APIService +} + +// Api: Api is a light-weight descriptor for a protocol buffer service. +type Api struct { + // Methods: The methods of this api, in unspecified order. + Methods []*Method `json:"methods,omitempty"` + + // Mixins: Included APIs. See Mixin. + Mixins []*Mixin `json:"mixins,omitempty"` + + // Name: The fully qualified name of this api, including package + // name + // followed by the api's simple name. + Name string `json:"name,omitempty"` + + // Options: Any metadata attached to the API. + Options []*Option `json:"options,omitempty"` + + // SourceContext: Source context for the protocol buffer service + // represented by this + // message. + SourceContext *SourceContext `json:"sourceContext,omitempty"` + + // Syntax: The source syntax of the service. + // + // Possible values: + // "SYNTAX_PROTO2" - Syntax `proto2`. + // "SYNTAX_PROTO3" - Syntax `proto3`. + Syntax string `json:"syntax,omitempty"` + + // Version: A version string for this api. If specified, must have the + // form + // `major-version.minor-version`, as in `1.10`. If the minor version + // is omitted, it defaults to zero. If the entire version field + // is + // empty, the major version is derived from the package name, + // as + // outlined below. If the field is not empty, the version in the + // package name will be verified to be consistent with what is + // provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version + // number + // indicates a breaking change and the minor version an + // additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be + // carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // API, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, none-GA apis. + // + Version string `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Methods") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Methods") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Api) MarshalJSON() ([]byte, error) { + type noMethod Api + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuthProvider: Configuration for an anthentication provider, including +// support for +// [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . +type AuthProvider struct { + // Audiences: The list of + // JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- + // token-32#section-4.1.3). + // that are allowed to access. A JWT containing any of these audiences + // will + // be accepted. When this setting is absent, only JWTs with + // audience + // "https://Service_name/API_name" + // will be accepted. For example, if no audiences are in the + // setting, + // LibraryService API will only accept JWTs with the following + // audience + // "https://library-example.googleapis.com/google.example.librar + // y.v1.LibraryService". + // + // Example: + // + // audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com + Audiences string `json:"audiences,omitempty"` + + // Id: The unique identifier of the auth provider. It will be referred + // to by + // `AuthRequirement.provider_id`. + // + // Example: "bookstore_auth". + Id string `json:"id,omitempty"` + + // Issuer: Identifies the principal that issued the JWT. + // See + // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#sec + // tion-4.1.1 + // Usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + Issuer string `json:"issuer,omitempty"` + + // JwksUri: URL of the provider's public key set to validate signature + // of the JWT. See + // [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html# + // ProviderMetadata). + // Optional if the key set document: + // - can be retrieved from + // [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html + // + // of the issuer. + // - can be inferred from the email domain of the issuer (e.g. a Google + // service account). + // + // Example: https://www.googleapis.com/oauth2/v1/certs + JwksUri string `json:"jwksUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Audiences") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Audiences") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AuthProvider) MarshalJSON() ([]byte, error) { + type noMethod AuthProvider + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuthRequirement: User-defined authentication requirements, including +// support for +// [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . +type AuthRequirement struct { + // Audiences: NOTE: This will be deprecated soon, once + // AuthProvider.audiences is + // implemented and accepted in all the runtime components. + // + // The list of + // JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- + // token-32#section-4.1.3). + // that are allowed to access. A JWT containing any of these audiences + // will + // be accepted. When this setting is absent, only JWTs with + // audience + // "https://Service_name/API_name" + // will be accepted. For example, if no audiences are in the + // setting, + // LibraryService API will only accept JWTs with the following + // audience + // "https://library-example.googleapis.com/google.example.librar + // y.v1.LibraryService". + // + // Example: + // + // audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com + Audiences string `json:"audiences,omitempty"` + + // ProviderId: id from authentication provider. + // + // Example: + // + // provider_id: bookstore_auth + ProviderId string `json:"providerId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Audiences") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Audiences") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AuthRequirement) MarshalJSON() ([]byte, error) { + type noMethod AuthRequirement + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Authentication: `Authentication` defines the authentication +// configuration for an API. +// +// Example for an API targeted for external use: +// +// name: calendar.googleapis.com +// authentication: +// providers: +// - id: google_calendar_auth +// jwks_uri: https://www.googleapis.com/oauth2/v1/certs +// issuer: https://securetoken.google.com +// rules: +// - selector: "*" +// requirements: +// provider_id: google_calendar_auth +type Authentication struct { + // Providers: Defines a set of authentication providers that a service + // supports. + Providers []*AuthProvider `json:"providers,omitempty"` + + // Rules: A list of authentication rules that apply to individual API + // methods. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*AuthenticationRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Providers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Providers") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Authentication) MarshalJSON() ([]byte, error) { + type noMethod Authentication + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuthenticationRule: Authentication rules for the service. +// +// By default, if a method has any authentication requirements, every +// request +// must include a valid credential matching one of the +// requirements. +// It's an error to include more than one kind of credential in a +// single +// request. +// +// If a method doesn't have any auth requirements, request credentials +// will be +// ignored. +type AuthenticationRule struct { + // AllowWithoutCredential: Whether to allow requests without a + // credential. The credential can be + // an OAuth token, Google cookies (first-party auth) or + // EndUserCreds. + // + // For requests without credentials, if the service control environment + // is + // specified, each incoming request **must** be associated with a + // service + // consumer. This can be done by passing an API key that belongs to a + // consumer + // project. + AllowWithoutCredential bool `json:"allowWithoutCredential,omitempty"` + + // Oauth: The requirements for OAuth credentials. + Oauth *OAuthRequirements `json:"oauth,omitempty"` + + // Requirements: Requirements for additional authentication providers. + Requirements []*AuthRequirement `json:"requirements,omitempty"` + + // Selector: Selects the methods to which this rule applies. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AllowWithoutCredential") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowWithoutCredential") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { + type noMethod AuthenticationRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuthorizationConfig: Configuration of authorization. +// +// This section determines the authorization provider, if unspecified, +// then no +// authorization check will be done. +// +// Example: +// +// experimental: +// authorization: +// provider: firebaserules.googleapis.com +type AuthorizationConfig struct { + // Provider: The name of the authorization provider, such + // as + // firebaserules.googleapis.com. + Provider string `json:"provider,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Provider") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Provider") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AuthorizationConfig) MarshalJSON() ([]byte, error) { + type noMethod AuthorizationConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Backend: `Backend` defines the backend configuration for a service. +type Backend struct { + // Rules: A list of API backend rules that apply to individual API + // methods. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*BackendRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rules") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Backend) MarshalJSON() ([]byte, error) { + type noMethod Backend + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendRule: A backend rule provides configuration for an individual +// API element. +type BackendRule struct { + // Address: The address of the API backend. + Address string `json:"address,omitempty"` + + // Deadline: The number of seconds to wait for a response from a + // request. The + // default depends on the deployment context. + Deadline float64 `json:"deadline,omitempty"` + + // Selector: Selects the methods to which this rule applies. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendRule) MarshalJSON() ([]byte, error) { + type noMethod BackendRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *BackendRule) UnmarshalJSON(data []byte) error { + type noMethod BackendRule + var s1 struct { + Deadline gensupport.JSONFloat64 `json:"deadline"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Deadline = float64(s1.Deadline) + return nil +} + +// Context: `Context` defines which contexts an API +// requests. +// +// Example: +// +// context: +// rules: +// - selector: "*" +// requested: +// - google.rpc.context.ProjectContext +// - google.rpc.context.OriginContext +// +// The above specifies that all methods in the API +// request +// `google.rpc.context.ProjectContext` +// and +// `google.rpc.context.OriginContext`. +// +// Available context types are defined in package +// `google.rpc.context`. +type Context struct { + // Rules: A list of RPC context rules that apply to individual API + // methods. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*ContextRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rules") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Context) MarshalJSON() ([]byte, error) { + type noMethod Context + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ContextRule: A context rule provides information about the context +// for an individual API +// element. +type ContextRule struct { + // Provided: A list of full type names of provided contexts. + Provided []string `json:"provided,omitempty"` + + // Requested: A list of full type names of requested contexts. + Requested []string `json:"requested,omitempty"` + + // Selector: Selects the methods to which this rule applies. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Provided") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Provided") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ContextRule) MarshalJSON() ([]byte, error) { + type noMethod ContextRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Control: Selects and configures the service controller used by the +// service. The +// service controller handles features like abuse, quota, billing, +// logging, +// monitoring, etc. +type Control struct { + // Environment: The service control environment to use. If empty, no + // control plane + // feature (like quota and billing) will be enabled. + Environment string `json:"environment,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Environment") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Environment") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Control) MarshalJSON() ([]byte, error) { + type noMethod Control + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CustomError: Customize service error responses. For example, list +// any service +// specific protobuf types that can appear in error detail lists +// of +// error responses. +// +// Example: +// +// custom_error: +// types: +// - google.foo.v1.CustomError +// - google.foo.v1.AnotherError +type CustomError struct { + // Rules: The list of custom error rules that apply to individual API + // messages. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*CustomErrorRule `json:"rules,omitempty"` + + // Types: The list of custom error detail types, e.g. + // 'google.foo.v1.CustomError'. + Types []string `json:"types,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rules") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CustomError) MarshalJSON() ([]byte, error) { + type noMethod CustomError + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CustomErrorRule: A custom error rule. +type CustomErrorRule struct { + // IsErrorType: Mark this message as possible payload in error response. + // Otherwise, + // objects of this type will be filtered when they appear in error + // payload. + IsErrorType bool `json:"isErrorType,omitempty"` + + // Selector: Selects messages to which this rule applies. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IsErrorType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IsErrorType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CustomErrorRule) MarshalJSON() ([]byte, error) { + type noMethod CustomErrorRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CustomHttpPattern: A custom pattern is used for defining custom HTTP +// verb. +type CustomHttpPattern struct { + // Kind: The name of this custom HTTP verb. + Kind string `json:"kind,omitempty"` + + // Path: The path matched by this custom verb. + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { + type noMethod CustomHttpPattern + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DisableServiceRequest: Request message for DisableService method. +type DisableServiceRequest struct { +} + +// Documentation: `Documentation` provides the information for +// describing a service. +// +// Example: +//
      documentation:
      +//   summary: >
      +//     The Google Calendar API gives access
      +//     to most calendar features.
      +//   pages:
      +//   - name: Overview
      +//     content: (== include google/foo/overview.md ==)
      +//   - name: Tutorial
      +//     content: (== include google/foo/tutorial.md ==)
      +//     subpages;
      +//     - name: Java
      +//       content: (== include google/foo/tutorial_java.md ==)
      +//   rules:
      +//   - selector: google.calendar.Calendar.Get
      +//     description: >
      +//       ...
      +//   - selector: google.calendar.Calendar.Put
      +//     description: >
      +//       ...
      +// 
      +// Documentation is provided in markdown syntax. In addition to +// standard markdown features, definition lists, tables and fenced +// code blocks are supported. Section headers can be provided and +// are +// interpreted relative to the section nesting of the context where +// a documentation fragment is embedded. +// +// Documentation from the IDL is merged with documentation defined +// via the config at normalization time, where documentation provided +// by config rules overrides IDL provided. +// +// A number of constructs specific to the API platform are supported +// in documentation text. +// +// In order to reference a proto element, the following +// notation can be +// used: +//
      [fully.qualified.proto.name][]
      +// T +// o override the display text used for the link, this can be +// used: +//
      [display
      +// text][fully.qualified.proto.name]
      +// Text can be excluded from doc using the following +// notation: +//
      (-- internal comment --)
      +// Comments can be made conditional using a visibility label. The +// below +// text will be only rendered if the `BETA` label is +// available: +//
      (--BETA: comment for BETA users --)
      +// A few directives are available in documentation. Note that +// directives must appear on a single line to be properly +// identified. The `include` directive includes a markdown file from +// an external source: +//
      (== include path/to/file ==)
      +// The `resource_for` directive marks a message to be the resource of +// a collection in REST view. If it is not specified, tools attempt +// to infer the resource from the operations in a +// collection: +//
      (== resource_for v1.shelves.books
      +// ==)
      +// The directive `suppress_warning` does not directly affect +// documentation +// and is documented together with service config validation. +type Documentation struct { + // DocumentationRootUrl: The URL to the root of documentation. + DocumentationRootUrl string `json:"documentationRootUrl,omitempty"` + + // Overview: Declares a single overview page. For + // example: + //
      documentation:
      +	//   summary: ...
      +	//   overview: (== include overview.md ==)
      +	// 
      + // This is a shortcut for the following declaration (using pages + // style): + //
      documentation:
      +	//   summary: ...
      +	//   pages:
      +	//   - name: Overview
      +	//     content: (== include overview.md ==)
      +	// 
      + // Note: you cannot specify both `overview` field and `pages` field. + Overview string `json:"overview,omitempty"` + + // Pages: The top level pages for the documentation set. + Pages []*Page `json:"pages,omitempty"` + + // Rules: A list of documentation rules that apply to individual API + // elements. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*DocumentationRule `json:"rules,omitempty"` + + // Summary: A short summary of what the service does. Can only be + // provided by + // plain text. + Summary string `json:"summary,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DocumentationRootUrl") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DocumentationRootUrl") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Documentation) MarshalJSON() ([]byte, error) { + type noMethod Documentation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DocumentationRule: A documentation rule provides information about +// individual API elements. +type DocumentationRule struct { + // DeprecationDescription: Deprecation description of the selected + // element(s). It can be provided if an + // element is marked as `deprecated`. + DeprecationDescription string `json:"deprecationDescription,omitempty"` + + // Description: Description of the selected API(s). + Description string `json:"description,omitempty"` + + // Selector: The selector is a comma-separated list of patterns. Each + // pattern is a + // qualified name of the element which may end in "*", indicating a + // wildcard. + // Wildcards are only allowed at the end and for a whole component of + // the + // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". + // To + // specify a default for all applicable elements, the whole pattern + // "*" + // is used. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DeprecationDescription") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeprecationDescription") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DocumentationRule) MarshalJSON() ([]byte, error) { + type noMethod DocumentationRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// EnableServiceRequest: Request message for EnableService method. +type EnableServiceRequest struct { +} + +// EnabledService: An EnabledService message contains the details about +// a service that has been +// enabled for use. +type EnabledService struct { + // Service: The Service definition for the enabled service + // Only the name and title fields will be populated. + Service *Service `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *EnabledService) MarshalJSON() ([]byte, error) { + type noMethod EnabledService + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Endpoint: `Endpoint` describes a network endpoint that serves a set +// of APIs. +// A service may expose any number of endpoints, and all endpoints share +// the +// same service configuration, such as quota configuration and +// monitoring +// configuration. +// +// Example service configuration: +// +// name: library-example.googleapis.com +// endpoints: +// # Below entry makes 'google.example.library.v1.Library' +// # API be served from endpoint address +// library-example.googleapis.com. +// # It also allows HTTP OPTIONS calls to be passed to the +// backend, for +// # it to decide whether the subsequent cross-origin request is +// # allowed to proceed. +// - name: library-example.googleapis.com +// allow_cors: true +type Endpoint struct { + // Aliases: DEPRECATED: This field is no longer supported. Instead of + // using aliases, + // please specify multiple google.api.Endpoint for each of the + // intented + // alias. + // + // Additional names that this endpoint will be hosted on. + Aliases []string `json:"aliases,omitempty"` + + // AllowCors: + // Allowing + // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sh + // aring), aka + // cross-domain traffic, would allow the backends served from this + // endpoint to + // receive and respond to HTTP OPTIONS requests. The response will be + // used by + // the browser to determine whether the subsequent cross-origin request + // is + // allowed to proceed. + AllowCors bool `json:"allowCors,omitempty"` + + // Apis: The list of APIs served by this endpoint. + Apis []string `json:"apis,omitempty"` + + // Features: The list of features enabled on this endpoint. + Features []string `json:"features,omitempty"` + + // Name: The canonical name of this endpoint. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Aliases") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Aliases") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Endpoint) MarshalJSON() ([]byte, error) { + type noMethod Endpoint + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Enum: Enum type definition. +type Enum struct { + // Enumvalue: Enum value definitions. + Enumvalue []*EnumValue `json:"enumvalue,omitempty"` + + // Name: Enum type name. + Name string `json:"name,omitempty"` + + // Options: Protocol buffer options. + Options []*Option `json:"options,omitempty"` + + // SourceContext: The source context. + SourceContext *SourceContext `json:"sourceContext,omitempty"` + + // Syntax: The source syntax. + // + // Possible values: + // "SYNTAX_PROTO2" - Syntax `proto2`. + // "SYNTAX_PROTO3" - Syntax `proto3`. + Syntax string `json:"syntax,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enumvalue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enumvalue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Enum) MarshalJSON() ([]byte, error) { + type noMethod Enum + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// EnumValue: Enum value definition. +type EnumValue struct { + // Name: Enum value name. + Name string `json:"name,omitempty"` + + // Number: Enum value number. + Number int64 `json:"number,omitempty"` + + // Options: Protocol buffer options. + Options []*Option `json:"options,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *EnumValue) MarshalJSON() ([]byte, error) { + type noMethod EnumValue + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Experimental: Experimental service configuration. These configuration +// options can +// only be used by whitelisted users. +type Experimental struct { + // Authorization: Authorization configuration. + Authorization *AuthorizationConfig `json:"authorization,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Authorization") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Authorization") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Experimental) MarshalJSON() ([]byte, error) { + type noMethod Experimental + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Field: A single field of a message type. +type Field struct { + // Cardinality: The field cardinality. + // + // Possible values: + // "CARDINALITY_UNKNOWN" - For fields with unknown cardinality. + // "CARDINALITY_OPTIONAL" - For optional fields. + // "CARDINALITY_REQUIRED" - For required fields. Proto2 syntax only. + // "CARDINALITY_REPEATED" - For repeated fields. + Cardinality string `json:"cardinality,omitempty"` + + // DefaultValue: The string value of the default value of this field. + // Proto2 syntax only. + DefaultValue string `json:"defaultValue,omitempty"` + + // JsonName: The field JSON name. + JsonName string `json:"jsonName,omitempty"` + + // Kind: The field type. + // + // Possible values: + // "TYPE_UNKNOWN" - Field type unknown. + // "TYPE_DOUBLE" - Field type double. + // "TYPE_FLOAT" - Field type float. + // "TYPE_INT64" - Field type int64. + // "TYPE_UINT64" - Field type uint64. + // "TYPE_INT32" - Field type int32. + // "TYPE_FIXED64" - Field type fixed64. + // "TYPE_FIXED32" - Field type fixed32. + // "TYPE_BOOL" - Field type bool. + // "TYPE_STRING" - Field type string. + // "TYPE_GROUP" - Field type group. Proto2 syntax only, and + // deprecated. + // "TYPE_MESSAGE" - Field type message. + // "TYPE_BYTES" - Field type bytes. + // "TYPE_UINT32" - Field type uint32. + // "TYPE_ENUM" - Field type enum. + // "TYPE_SFIXED32" - Field type sfixed32. + // "TYPE_SFIXED64" - Field type sfixed64. + // "TYPE_SINT32" - Field type sint32. + // "TYPE_SINT64" - Field type sint64. + Kind string `json:"kind,omitempty"` + + // Name: The field name. + Name string `json:"name,omitempty"` + + // Number: The field number. + Number int64 `json:"number,omitempty"` + + // OneofIndex: The index of the field type in `Type.oneofs`, for message + // or enumeration + // types. The first type has index 1; zero means the type is not in the + // list. + OneofIndex int64 `json:"oneofIndex,omitempty"` + + // Options: The protocol buffer options. + Options []*Option `json:"options,omitempty"` + + // Packed: Whether to use alternative packed wire representation. + Packed bool `json:"packed,omitempty"` + + // TypeUrl: The field type URL, without the scheme, for message or + // enumeration + // types. Example: "type.googleapis.com/google.protobuf.Timestamp". + TypeUrl string `json:"typeUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Cardinality") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Cardinality") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Field) MarshalJSON() ([]byte, error) { + type noMethod Field + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Http: Defines the HTTP configuration for a service. It contains a +// list of +// HttpRule, each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // Rules: A list of HTTP configuration rules that apply to individual + // API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*HttpRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rules") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Http) MarshalJSON() ([]byte, error) { + type noMethod Http + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRule: `HttpRule` defines the mapping of an RPC method to one or +// more HTTP +// REST APIs. The mapping determines what portions of the +// request +// message are populated from the path, query parameters, or body of +// the HTTP request. The mapping is typically specified as +// an +// `google.api.http` annotation, see "google/api/annotations.proto" +// for details. +// +// The mapping consists of a field specifying the path template +// and +// method kind. The path template can refer to fields in the +// request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = +// "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside +// the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of +// HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" +// sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be +// referenced +// from a path pattern. Fields mapped to the path pattern cannot +// be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the +// path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request +// message: +// +// +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have +// a +// primitive type or a repeated primitive type. Message types are +// not +// allowed. In the case of a repeated type, the parameter can +// be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` +// field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where +// the +// representation of the JSON in the request body is determined +// by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | +// `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define +// that +// every field not bound by the path template should be mapped to +// the +// request body. This enables the following alternative definition +// of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | +// `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible +// to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice +// of +// defining REST APIs. The common usage of `*` is in custom +// methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by +// using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to +// RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" +// message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it assumes there is no HTTP body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) +// fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) +// fields. +// 4. Any body sent with an HTTP request can contain only (b) +// fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. It follows the +// semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple +// String +// Expansion. +// +// The syntax `**` matches zero or more path segments. It follows the +// semantics +// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 +// Reserved +// Expansion. NOTE: it must be the last segment in the path except the +// Verb. +// +// The syntax `LITERAL` matches literal text in the URL path. +// +// The syntax `Variable` matches the entire path as specified by its +// template; +// this nested template must not contain further variables. If a +// variable +// matches a single path segment, its template may be omitted, e.g. +// `{var}` +// is equivalent to `{var=*}`. +// +// NOTE: the field paths in variables and in the `body` must not refer +// to +// repeated fields or map fields. +// +// Use CustomHttpPattern to specify any HTTP method that is not included +// in the +// `pattern` field, such as HEAD, or "*" to leave the HTTP method +// unspecified for +// a given URL path rule. The wild-card rule is useful for services that +// provide +// content to Web (HTML) clients. +type HttpRule struct { + // AdditionalBindings: Additional HTTP bindings for the selector. Nested + // bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `json:"additionalBindings,omitempty"` + + // Body: The name of the request field whose value is mapped to the HTTP + // body, or + // `*` for mapping all fields not captured by the path pattern to the + // HTTP + // body. NOTE: the referred field must not be a repeated field and must + // be + // present at the top-level of request message type. + Body string `json:"body,omitempty"` + + // Custom: Custom pattern is used for defining custom verbs. + Custom *CustomHttpPattern `json:"custom,omitempty"` + + // Delete: Used for deleting a resource. + Delete string `json:"delete,omitempty"` + + // Get: Used for listing and getting information about resources. + Get string `json:"get,omitempty"` + + // MediaDownload: Do not use this. For media support, add + // instead + // [][google.bytestream.RestByteStream] as an API to your + // configuration. + MediaDownload *MediaDownload `json:"mediaDownload,omitempty"` + + // MediaUpload: Do not use this. For media support, add + // instead + // [][google.bytestream.RestByteStream] as an API to your + // configuration. + MediaUpload *MediaUpload `json:"mediaUpload,omitempty"` + + // Patch: Used for updating a resource. + Patch string `json:"patch,omitempty"` + + // Post: Used for creating a resource. + Post string `json:"post,omitempty"` + + // Put: Used for updating a resource. + Put string `json:"put,omitempty"` + + // ResponseBody: The name of the response field whose value is mapped to + // the HTTP body of + // response. Other response fields are ignored. This field is optional. + // When + // not set, the response message will be used as HTTP body of + // response. + // NOTE: the referred field must be not a repeated field and must be + // present + // at the top-level of response message type. + ResponseBody string `json:"responseBody,omitempty"` + + // Selector: Selects methods to which this rule applies. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdditionalBindings") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdditionalBindings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *HttpRule) MarshalJSON() ([]byte, error) { + type noMethod HttpRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LabelDescriptor: A description of a label. +type LabelDescriptor struct { + // Description: A human-readable description for the label. + Description string `json:"description,omitempty"` + + // Key: The label key. + Key string `json:"key,omitempty"` + + // ValueType: The type of data that can be assigned to the label. + // + // Possible values: + // "STRING" - A variable-length string. This is the default. + // "BOOL" - Boolean; true or false. + // "INT64" - A 64-bit signed integer. + ValueType string `json:"valueType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { + type noMethod LabelDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListEnabledServicesResponse: Response message for +// `ListEnabledServices` method. +type ListEnabledServicesResponse struct { + // NextPageToken: Token that can be passed to `ListEnabledServices` to + // resume a paginated + // query. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Services: Services enabled for the specified parent. + Services []*EnabledService `json:"services,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListEnabledServicesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListEnabledServicesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LogDescriptor: A description of a log type. Example in YAML format: +// +// - name: library.googleapis.com/activity_history +// description: The history of borrowing and returning library +// items. +// display_name: Activity +// labels: +// - key: /customer_id +// description: Identifier of a library customer +type LogDescriptor struct { + // Description: A human-readable description of this log. This + // information appears in + // the documentation and can contain details. + Description string `json:"description,omitempty"` + + // DisplayName: The human-readable name for this log. This information + // appears on + // the user interface and should be concise. + DisplayName string `json:"displayName,omitempty"` + + // Labels: The set of labels that are available to describe a specific + // log entry. + // Runtime requests that contain labels not specified here + // are + // considered invalid. + Labels []*LabelDescriptor `json:"labels,omitempty"` + + // Name: The name of the log. It must be less than 512 characters long + // and can + // include the following characters: upper- and lower-case + // alphanumeric + // characters [A-Za-z0-9], and punctuation characters including + // slash, underscore, hyphen, period [/_-.]. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogDescriptor) MarshalJSON() ([]byte, error) { + type noMethod LogDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Logging: Logging configuration of the service. +// +// The following example shows how to configure logs to be sent to +// the +// producer and consumer projects. In the example, the +// `activity_history` +// log is sent to both the producer and consumer projects, whereas +// the +// `purchase_history` log is only sent to the producer project. +// +// monitored_resources: +// - type: library.googleapis.com/branch +// labels: +// - key: /city +// description: The city where the library branch is located +// in. +// - key: /name +// description: The name of the branch. +// logs: +// - name: activity_history +// labels: +// - key: /customer_id +// - name: purchase_history +// logging: +// producer_destinations: +// - monitored_resource: library.googleapis.com/branch +// logs: +// - activity_history +// - purchase_history +// consumer_destinations: +// - monitored_resource: library.googleapis.com/branch +// logs: +// - activity_history +type Logging struct { + // ConsumerDestinations: Logging configurations for sending logs to the + // consumer project. + // There can be multiple consumer destinations, each one must have + // a + // different monitored resource type. A log can be used in at most + // one consumer destination. + ConsumerDestinations []*LoggingDestination `json:"consumerDestinations,omitempty"` + + // ProducerDestinations: Logging configurations for sending logs to the + // producer project. + // There can be multiple producer destinations, each one must have + // a + // different monitored resource type. A log can be used in at most + // one producer destination. + ProducerDestinations []*LoggingDestination `json:"producerDestinations,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ConsumerDestinations") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerDestinations") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Logging) MarshalJSON() ([]byte, error) { + type noMethod Logging + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LoggingDestination: Configuration of a specific logging destination +// (the producer project +// or the consumer project). +type LoggingDestination struct { + // Logs: Names of the logs to be sent to this destination. Each name + // must + // be defined in the Service.logs section. If the log name is + // not a domain scoped name, it will be automatically prefixed with + // the service name followed by "/". + Logs []string `json:"logs,omitempty"` + + // MonitoredResource: The monitored resource type. The type must be + // defined in the + // Service.monitored_resources section. + MonitoredResource string `json:"monitoredResource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Logs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Logs") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LoggingDestination) MarshalJSON() ([]byte, error) { + type noMethod LoggingDestination + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MediaDownload: Do not use this. For media support, add +// instead +// [][google.bytestream.RestByteStream] as an API to your +// configuration. +type MediaDownload struct { + // Enabled: Whether download is enabled. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MediaDownload) MarshalJSON() ([]byte, error) { + type noMethod MediaDownload + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MediaUpload: Do not use this. For media support, add +// instead +// [][google.bytestream.RestByteStream] as an API to your +// configuration. +type MediaUpload struct { + // Enabled: Whether upload is enabled. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MediaUpload) MarshalJSON() ([]byte, error) { + type noMethod MediaUpload + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Method: Method represents a method of an api. +type Method struct { + // Name: The simple name of this method. + Name string `json:"name,omitempty"` + + // Options: Any metadata attached to the method. + Options []*Option `json:"options,omitempty"` + + // RequestStreaming: If true, the request is streamed. + RequestStreaming bool `json:"requestStreaming,omitempty"` + + // RequestTypeUrl: A URL of the input message type. + RequestTypeUrl string `json:"requestTypeUrl,omitempty"` + + // ResponseStreaming: If true, the response is streamed. + ResponseStreaming bool `json:"responseStreaming,omitempty"` + + // ResponseTypeUrl: The URL of the output message type. + ResponseTypeUrl string `json:"responseTypeUrl,omitempty"` + + // Syntax: The source syntax of this method. + // + // Possible values: + // "SYNTAX_PROTO2" - Syntax `proto2`. + // "SYNTAX_PROTO3" - Syntax `proto3`. + Syntax string `json:"syntax,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Method) MarshalJSON() ([]byte, error) { + type noMethod Method + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MetricDescriptor: Defines a metric type and its schema. Once a metric +// descriptor is created, +// deleting or altering it stops data collection and makes the metric +// type's +// existing data unusable. +type MetricDescriptor struct { + // Description: A detailed description of the metric, which can be used + // in documentation. + Description string `json:"description,omitempty"` + + // DisplayName: A concise name for the metric, which can be displayed in + // user interfaces. + // Use sentence case without an ending period, for example "Request + // count". + DisplayName string `json:"displayName,omitempty"` + + // Labels: The set of labels that can be used to describe a + // specific + // instance of this metric type. For example, + // the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*LabelDescriptor `json:"labels,omitempty"` + + // MetricKind: Whether the metric records instantaneous values, changes + // to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be + // supported. + // + // Possible values: + // "METRIC_KIND_UNSPECIFIED" - Do not use this default value. + // "GAUGE" - An instantaneous measurement of a value. + // "DELTA" - The change in a value during a time interval. + // "CUMULATIVE" - A value accumulated over a time interval. + // Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricKind string `json:"metricKind,omitempty"` + + // Name: The resource name of the metric descriptor. Depending on + // the + // implementation, the name typically includes: (1) the parent resource + // name + // that defines the scope of the metric type or of its data; and (2) + // the + // metric's URL-encoded type, which also appears in the `type` field of + // this + // descriptor. For example, following is the resource name of a + // custom + // metric within the GCP project `my-project-id`: + // + // + // "projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvo + // ice%2Fpaid%2Famount" + Name string `json:"name,omitempty"` + + // Type: The metric type, including its DNS name prefix. The type is + // not + // URL-encoded. All user-defined custom metric types have the DNS + // name + // `custom.googleapis.com`. Metric types should use a natural + // hierarchical + // grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `json:"type,omitempty"` + + // Unit: The unit in which the metric value is reported. It is only + // applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. + // The + // supported units are a subset of [The Unified Code for Units + // of + // Measure](http://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10**3) + // * `M` mega (10**6) + // * `G` giga (10**9) + // * `T` tera (10**12) + // * `P` peta (10**15) + // * `E` exa (10**18) + // * `Z` zetta (10**21) + // * `Y` yotta (10**24) + // * `m` milli (10**-3) + // * `u` micro (10**-6) + // * `n` nano (10**-9) + // * `p` pico (10**-12) + // * `f` femto (10**-15) + // * `a` atto (10**-18) + // * `z` zepto (10**-21) + // * `y` yocto (10**-24) + // * `Ki` kibi (2**10) + // * `Mi` mebi (2**20) + // * `Gi` gibi (2**30) + // * `Ti` tebi (2**40) + // + // **Grammar** + // + // The grammar includes the dimensionless unit `1`, such as `1/s`. + // + // The grammar also includes these connectors: + // + // * `/` division (as an infix operator, e.g. `1/s`). + // * `.` multiplication (as an infix operator, e.g. `GBy.d`) + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = [ PREFIX ] UNIT [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // * `Annotation` is just a comment if it follows a `UNIT` and is + // equivalent to `1` if it is used alone. For examples, + // `{requests}/s == 1/s`, `By{transmitted}/s == By/s`. + // * `NAME` is a sequence of non-blank printable ASCII characters not + // containing '{' or '}'. + Unit string `json:"unit,omitempty"` + + // ValueType: Whether the measurement is an integer, a floating-point + // number, etc. + // Some combinations of `metric_kind` and `value_type` might not be + // supported. + // + // Possible values: + // "VALUE_TYPE_UNSPECIFIED" - Do not use this default value. + // "BOOL" - The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + // "INT64" - The value is a signed 64-bit integer. + // "DOUBLE" - The value is a double precision floating point number. + // "STRING" - The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + // "DISTRIBUTION" - The value is a `Distribution`. + // "MONEY" - The value is money. + ValueType string `json:"valueType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { + type noMethod MetricDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Mixin: Declares an API to be included in this API. The including API +// must +// redeclare all the methods from the included API, but +// documentation +// and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including API plus the root path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// // rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` +// are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after +// inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to +// `v2`. +// +// If the `root` field in the mixin is specified, it should be +// a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = +// "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // Name: The fully qualified name of the API which is included. + Name string `json:"name,omitempty"` + + // Root: If non-empty specifies a path under which inherited HTTP + // paths + // are rooted. + Root string `json:"root,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Mixin) MarshalJSON() ([]byte, error) { + type noMethod Mixin + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MonitoredResourceDescriptor: An object that describes the schema of a +// MonitoredResource object using a +// type name and a set of labels. For example, the monitored +// resource +// descriptor for Google Compute Engine VM instances has a type +// of +// "gce_instance" and specifies the use of the labels "instance_id" +// and +// "zone" to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs +// generally +// provide a `list` method that returns the monitored resource +// descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + // Description: Optional. A detailed description of the monitored + // resource type that might + // be used in documentation. + Description string `json:"description,omitempty"` + + // DisplayName: Optional. A concise name for the monitored resource type + // that might be + // displayed in user interfaces. It should be a Title Cased Noun + // Phrase, + // without any article or other determiners. For example, + // "Google Cloud SQL Database". + DisplayName string `json:"displayName,omitempty"` + + // Labels: Required. A set of labels used to describe instances of this + // monitored + // resource type. For example, an individual Google Cloud SQL database + // is + // identified by values for the labels "database_id" and "zone". + Labels []*LabelDescriptor `json:"labels,omitempty"` + + // Name: Optional. The resource name of the monitored resource + // descriptor: + // "projects/{project_id}/monitoredResourceDescriptors/{type + // }" where + // {type} is the value of the `type` field in this object + // and + // {project_id} is a project ID that provides API-specific context + // for + // accessing the type. APIs that do not use project information can use + // the + // resource name format "monitoredResourceDescriptors/{type}". + Name string `json:"name,omitempty"` + + // Type: Required. The monitored resource type. For example, the + // type + // "cloudsql_database" represents databases in Google Cloud SQL. + // The maximum length of this value is 256 characters. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { + type noMethod MonitoredResourceDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Monitoring: Monitoring configuration of the service. +// +// The example below shows how to configure monitored resources and +// metrics +// for monitoring. In the example, a monitored resource and two metrics +// are +// defined. The `library.googleapis.com/book/returned_count` metric is +// sent +// to both producer and consumer projects, whereas +// the +// `library.googleapis.com/book/overdue_count` metric is only sent to +// the +// consumer project. +// +// monitored_resources: +// - type: library.googleapis.com/branch +// labels: +// - key: /city +// description: The city where the library branch is located +// in. +// - key: /name +// description: The name of the branch. +// metrics: +// - name: library.googleapis.com/book/returned_count +// metric_kind: DELTA +// value_type: INT64 +// labels: +// - key: /customer_id +// - name: library.googleapis.com/book/overdue_count +// metric_kind: GAUGE +// value_type: INT64 +// labels: +// - key: /customer_id +// monitoring: +// producer_destinations: +// - monitored_resource: library.googleapis.com/branch +// metrics: +// - library.googleapis.com/book/returned_count +// consumer_destinations: +// - monitored_resource: library.googleapis.com/branch +// metrics: +// - library.googleapis.com/book/returned_count +// - library.googleapis.com/book/overdue_count +type Monitoring struct { + // ConsumerDestinations: Monitoring configurations for sending metrics + // to the consumer project. + // There can be multiple consumer destinations, each one must have + // a + // different monitored resource type. A metric can be used in at + // most + // one consumer destination. + ConsumerDestinations []*MonitoringDestination `json:"consumerDestinations,omitempty"` + + // ProducerDestinations: Monitoring configurations for sending metrics + // to the producer project. + // There can be multiple producer destinations, each one must have + // a + // different monitored resource type. A metric can be used in at + // most + // one producer destination. + ProducerDestinations []*MonitoringDestination `json:"producerDestinations,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ConsumerDestinations") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerDestinations") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Monitoring) MarshalJSON() ([]byte, error) { + type noMethod Monitoring + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MonitoringDestination: Configuration of a specific monitoring +// destination (the producer project +// or the consumer project). +type MonitoringDestination struct { + // Metrics: Names of the metrics to report to this monitoring + // destination. + // Each name must be defined in Service.metrics section. + Metrics []string `json:"metrics,omitempty"` + + // MonitoredResource: The monitored resource type. The type must be + // defined in + // Service.monitored_resources section. + MonitoredResource string `json:"monitoredResource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Metrics") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Metrics") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { + type noMethod MonitoringDestination + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OAuthRequirements: OAuth scopes are a way to define data and +// permissions on data. For example, +// there are scopes defined for "Read-only access to Google Calendar" +// and +// "Access to Cloud Platform". Users can consent to a scope for an +// application, +// giving it permission to access that data on their behalf. +// +// OAuth scope specifications should be fairly coarse grained; a user +// will need +// to see and understand the text description of what your scope +// means. +// +// In most cases: use one or at most two OAuth scopes for an entire +// family of +// products. If your product has multiple APIs, you should probably be +// sharing +// the OAuth scope across all of those APIs. +// +// When you need finer grained OAuth consent screens: talk with your +// product +// management about how developers will use them in practice. +// +// Please note that even though each of the canonical scopes is enough +// for a +// request to be accepted and passed to the backend, a request can still +// fail +// due to the backend requiring additional scopes or permissions. +type OAuthRequirements struct { + // CanonicalScopes: The list of publicly documented OAuth scopes that + // are allowed access. An + // OAuth token containing any of these scopes will be + // accepted. + // + // Example: + // + // canonical_scopes: https://www.googleapis.com/auth/calendar, + // https://www.googleapis.com/auth/calendar.read + CanonicalScopes string `json:"canonicalScopes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanonicalScopes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CanonicalScopes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { + type noMethod OAuthRequirements + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Operation: This resource represents a long-running operation that is +// the result of a +// network API call. +type Operation struct { + // Done: If the value is `false`, it means the operation is still in + // progress. + // If true, the operation is completed, and either `error` or `response` + // is + // available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *Status `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. + // It typically + // contains progress information and common metadata such as create + // time. + // Some services might not provide such metadata. Any method that + // returns a + // long-running operation should document the metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that + // originally returns it. If you use the default HTTP mapping, + // the + // `name` should have the format of `operations/some/unique/name`. + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. + // If the original + // method returns no data on success, such as `Delete`, the response + // is + // `google.protobuf.Empty`. If the original method is + // standard + // `Get`/`Create`/`Update`, the response should be the resource. For + // other + // methods, the response should have the type `XxxResponse`, where + // `Xxx` + // is the original method name. For example, if the original method + // name + // is `TakeSnapshot()`, the inferred response type + // is + // `TakeSnapshotResponse`. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Option: A protocol buffer option, which can be attached to a message, +// field, +// enumeration, etc. +type Option struct { + // Name: The option's name. For protobuf built-in options (options + // defined in + // descriptor.proto), this is the short name. For example, + // "map_entry". + // For custom options, it should be the fully-qualified name. For + // example, + // "google.api.http". + Name string `json:"name,omitempty"` + + // Value: The option's value packed in an Any message. If the value is a + // primitive, + // the corresponding wrapper type defined in + // google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an + // int32 + // value using the google.protobuf.Int32Value type. + Value googleapi.RawMessage `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Option) MarshalJSON() ([]byte, error) { + type noMethod Option + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Page: Represents a documentation page. A page can contain subpages to +// represent +// nested documentation set structure. +type Page struct { + // Content: The Markdown content of the page. You can use (== + // include {path} ==) + // to include content from a Markdown file. + Content string `json:"content,omitempty"` + + // Name: The name of the page. It will be used as an identity of the + // page to + // generate URI of the page, text of the link to this page in + // navigation, + // etc. The full page name (start from the root page name to this + // page + // concatenated with `.`) can be used as reference to the page in + // your + // documentation. For example: + //
      pages:
      +	// - name: Tutorial
      +	//   content: (== include tutorial.md ==)
      +	//   subpages:
      +	//   - name: Java
      +	//     content: (== include tutorial_java.md
      +	// ==)
      +	// 
      + // You can reference `Java` page using Markdown reference link + // syntax: + // `Java`. + Name string `json:"name,omitempty"` + + // Subpages: Subpages of this page. The order of subpages specified here + // will be + // honored in the generated docset. + Subpages []*Page `json:"subpages,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Content") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Content") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Page) MarshalJSON() ([]byte, error) { + type noMethod Page + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Service: `Service` is the root object of Google service configuration +// schema. It +// describes basic information about a service, such as the name and +// the +// title, and delegates other aspects to sub-sections. Each sub-section +// is +// either a proto message or a repeated proto message that configures +// a +// specific aspect, such as auth. See each proto message definition for +// details. +// +// Example: +// +// type: google.api.Service +// config_version: 3 +// name: calendar.googleapis.com +// title: Google Calendar API +// apis: +// - name: google.calendar.v3.Calendar +// authentication: +// providers: +// - id: google_calendar_auth +// jwks_uri: https://www.googleapis.com/oauth2/v1/certs +// issuer: https://securetoken.google.com +// rules: +// - selector: "*" +// requirements: +// provider_id: google_calendar_auth +type Service struct { + // Apis: A list of API interfaces exported by this service. Only the + // `name` field + // of the google.protobuf.Api needs to be provided by the + // configuration + // author, as the remaining fields will be derived from the IDL during + // the + // normalization process. It is an error to specify an API interface + // here + // which cannot be resolved against the associated IDL files. + Apis []*Api `json:"apis,omitempty"` + + // Authentication: Auth configuration. + Authentication *Authentication `json:"authentication,omitempty"` + + // Backend: API backend configuration. + Backend *Backend `json:"backend,omitempty"` + + // ConfigVersion: The version of the service configuration. The config + // version may + // influence interpretation of the configuration, for example, + // to + // determine defaults. This is documented together with + // applicable + // options. The current default for the config version itself is `3`. + ConfigVersion int64 `json:"configVersion,omitempty"` + + // Context: Context configuration. + Context *Context `json:"context,omitempty"` + + // Control: Configuration for the service control plane. + Control *Control `json:"control,omitempty"` + + // CustomError: Custom error configuration. + CustomError *CustomError `json:"customError,omitempty"` + + // Documentation: Additional API documentation. + Documentation *Documentation `json:"documentation,omitempty"` + + // Endpoints: Configuration for network endpoints. If this is empty, + // then an endpoint + // with the same name as the service is automatically generated to + // service all + // defined APIs. + Endpoints []*Endpoint `json:"endpoints,omitempty"` + + // Enums: A list of all enum types included in this API service. + // Enums + // referenced directly or indirectly by the `apis` are + // automatically + // included. Enums which are not referenced but shall be + // included + // should be listed here by name. Example: + // + // enums: + // - name: google.someapi.v1.SomeEnum + Enums []*Enum `json:"enums,omitempty"` + + // Experimental: Experimental configuration. + Experimental *Experimental `json:"experimental,omitempty"` + + // Http: HTTP configuration. + Http *Http `json:"http,omitempty"` + + // Id: A unique ID for a specific instance of this message, typically + // assigned + // by the client for tracking purpose. If empty, the server may choose + // to + // generate one instead. + Id string `json:"id,omitempty"` + + // Logging: Logging configuration. + Logging *Logging `json:"logging,omitempty"` + + // Logs: Defines the logs used by this service. + Logs []*LogDescriptor `json:"logs,omitempty"` + + // Metrics: Defines the metrics used by this service. + Metrics []*MetricDescriptor `json:"metrics,omitempty"` + + // MonitoredResources: Defines the monitored resources used by this + // service. This is required + // by the Service.monitoring and Service.logging configurations. + MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` + + // Monitoring: Monitoring configuration. + Monitoring *Monitoring `json:"monitoring,omitempty"` + + // Name: The DNS address at which this service is available, + // e.g. `calendar.googleapis.com`. + Name string `json:"name,omitempty"` + + // ProducerProjectId: The id of the Google developer project that owns + // the service. + // Members of this project can manage the service configuration, + // manage consumption of the service, etc. + ProducerProjectId string `json:"producerProjectId,omitempty"` + + // SystemParameters: System parameter configuration. + SystemParameters *SystemParameters `json:"systemParameters,omitempty"` + + // SystemTypes: A list of all proto message types included in this API + // service. + // It serves similar purpose as [google.api.Service.types], except + // that + // these types are not needed by user-defined APIs. Therefore, they will + // not + // show up in the generated discovery doc. This field should only be + // used + // to define system APIs in ESF. + SystemTypes []*Type `json:"systemTypes,omitempty"` + + // Title: The product title associated with this service. + Title string `json:"title,omitempty"` + + // Types: A list of all proto message types included in this API + // service. + // Types referenced directly or indirectly by the `apis` + // are + // automatically included. Messages which are not referenced but + // shall be included, such as types used by the `google.protobuf.Any` + // type, + // should be listed here by name. Example: + // + // types: + // - name: google.protobuf.Int32 + Types []*Type `json:"types,omitempty"` + + // Usage: Configuration controlling usage of this service. + Usage *Usage `json:"usage,omitempty"` + + // Visibility: API visibility configuration. + Visibility *Visibility `json:"visibility,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Apis") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Apis") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Service) MarshalJSON() ([]byte, error) { + type noMethod Service + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SourceContext: `SourceContext` represents information about the +// source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // FileName: The path-qualified name of the .proto file that contained + // the associated + // protobuf element. For example: + // "google/protobuf/source_context.proto". + FileName string `json:"fileName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FileName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FileName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SourceContext) MarshalJSON() ([]byte, error) { + type noMethod SourceContext + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is +// suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of +// google.rpc.Code, but it may accept additional error codes if needed. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error +// details or +// localize it in the client. The optional error details may contain +// arbitrary +// information about the error. There is a predefined set of error +// detail types +// in the package `google.rpc` which can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it +// is not necessarily the actual wire format. When the `Status` message +// is +// exposed in different client libraries and different wire protocols, +// it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety +// of +// environments, either with or without APIs, to provide a +// consistent developer experience across different +// environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, +// it may embed the `Status` in the normal response to indicate the +// partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may +// have a `Status` message for error reporting purpose. +// +// - Batch operations. If a client uses batch request and batch +// response, the +// `Status` message should be used directly inside batch response, +// one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation +// results in its response, the status of those operations should +// be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could +// be used directly after any stripping needed for security/privacy +// reasons. +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There will + // be a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any + // user-facing error message should be localized and sent in + // the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type noMethod Status + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SystemParameter: Define a parameter's name and location. The +// parameter may be passed as either +// an HTTP header or a URL query parameter, and if both are passed the +// behavior +// is implementation-dependent. +type SystemParameter struct { + // HttpHeader: Define the HTTP header name to use for the parameter. It + // is case + // insensitive. + HttpHeader string `json:"httpHeader,omitempty"` + + // Name: Define the name of the parameter, such as "api_key" . It is + // case sensitive. + Name string `json:"name,omitempty"` + + // UrlQueryParameter: Define the URL query parameter name to use for the + // parameter. It is case + // sensitive. + UrlQueryParameter string `json:"urlQueryParameter,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HttpHeader") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HttpHeader") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SystemParameter) MarshalJSON() ([]byte, error) { + type noMethod SystemParameter + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SystemParameterRule: Define a system parameter rule mapping system +// parameter definitions to +// methods. +type SystemParameterRule struct { + // Parameters: Define parameters. Multiple names may be defined for a + // parameter. + // For a given method call, only one of them should be used. If + // multiple + // names are used the behavior is implementation-dependent. + // If none of the specified names are present the behavior + // is + // parameter-dependent. + Parameters []*SystemParameter `json:"parameters,omitempty"` + + // Selector: Selects the methods to which this rule applies. Use '*' to + // indicate all + // methods in all APIs. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Parameters") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Parameters") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { + type noMethod SystemParameterRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SystemParameters: ### System parameter configuration +// +// A system parameter is a special kind of parameter defined by the +// API +// system, not by an individual API. It is typically mapped to an HTTP +// header +// and/or a URL query parameter. This configuration specifies which +// methods +// change the names of the system parameters. +type SystemParameters struct { + // Rules: Define system parameters. + // + // The parameters defined here will override the default + // parameters + // implemented by the system. If this field is missing from the + // service + // config, default system parameters will be used. Default system + // parameters + // and names is implementation-dependent. + // + // Example: define api key for all methods + // + // system_parameters + // rules: + // - selector: "*" + // parameters: + // - name: api_key + // url_query_parameter: api_key + // + // + // Example: define 2 api key names for a specific method. + // + // system_parameters + // rules: + // - selector: "/ListShelves" + // parameters: + // - name: api_key + // http_header: Api-Key1 + // - name: api_key + // http_header: Api-Key2 + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*SystemParameterRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rules") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SystemParameters) MarshalJSON() ([]byte, error) { + type noMethod SystemParameters + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Type: A protocol buffer message type. +type Type struct { + // Fields: The list of fields. + Fields []*Field `json:"fields,omitempty"` + + // Name: The fully qualified message name. + Name string `json:"name,omitempty"` + + // Oneofs: The list of types appearing in `oneof` definitions in this + // type. + Oneofs []string `json:"oneofs,omitempty"` + + // Options: The protocol buffer options. + Options []*Option `json:"options,omitempty"` + + // SourceContext: The source context. + SourceContext *SourceContext `json:"sourceContext,omitempty"` + + // Syntax: The source syntax. + // + // Possible values: + // "SYNTAX_PROTO2" - Syntax `proto2`. + // "SYNTAX_PROTO3" - Syntax `proto3`. + Syntax string `json:"syntax,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fields") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fields") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Type) MarshalJSON() ([]byte, error) { + type noMethod Type + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Usage: Configuration controlling usage of a service. +type Usage struct { + // ProducerNotificationChannel: The full resource name of a channel used + // for sending notifications to the + // service producer. + // + // Google Service Management currently only supports + // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a + // notification + // channel. To use Google Cloud Pub/Sub as the channel, this must be the + // name + // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name + // format + // documented in https://cloud.google.com/pubsub/docs/overview. + ProducerNotificationChannel string `json:"producerNotificationChannel,omitempty"` + + // Requirements: Requirements that must be satisfied before a consumer + // project can use the + // service. Each requirement is of the form + // /; + // for example 'serviceusage.googleapis.com/billing-enabled'. + Requirements []string `json:"requirements,omitempty"` + + // Rules: A list of usage rules that apply to individual API + // methods. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*UsageRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ProducerNotificationChannel") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "ProducerNotificationChannel") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Usage) MarshalJSON() ([]byte, error) { + type noMethod Usage + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsageRule: Usage configuration rules for the service. +// +// NOTE: Under development. +// +// +// Use this rule to configure unregistered calls for the service. +// Unregistered +// calls are calls that do not contain consumer project +// identity. +// (Example: calls that do not contain an API key). +// By default, API methods do not allow unregistered calls, and each +// method call +// must be identified by a consumer project identity. Use this rule +// to +// allow/disallow unregistered calls. +// +// Example of an API that wants to allow unregistered calls for entire +// service. +// +// usage: +// rules: +// - selector: "*" +// allow_unregistered_calls: true +// +// Example of a method that wants to allow unregistered calls. +// +// usage: +// rules: +// - selector: +// "google.example.library.v1.LibraryService.CreateBook" +// allow_unregistered_calls: true +type UsageRule struct { + // AllowUnregisteredCalls: True, if the method allows unregistered + // calls; false otherwise. + AllowUnregisteredCalls bool `json:"allowUnregisteredCalls,omitempty"` + + // Selector: Selects the methods to which this rule applies. Use '*' to + // indicate all + // methods in all APIs. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AllowUnregisteredCalls") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowUnregisteredCalls") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UsageRule) MarshalJSON() ([]byte, error) { + type noMethod UsageRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Visibility: `Visibility` defines restrictions for the visibility of +// service +// elements. Restrictions are specified using visibility labels +// (e.g., TRUSTED_TESTER) that are elsewhere linked to users and +// projects. +// +// Users and projects can have access to more than one visibility label. +// The +// effective visibility for multiple labels is the union of each +// label's +// elements, plus any unrestricted elements. +// +// If an element and its parents have no restrictions, visibility +// is +// unconditionally granted. +// +// Example: +// +// visibility: +// rules: +// - selector: google.calendar.Calendar.EnhancedSearch +// restriction: TRUSTED_TESTER +// - selector: google.calendar.Calendar.Delegate +// restriction: GOOGLE_INTERNAL +// +// Here, all methods are publicly visible except for the restricted +// methods +// EnhancedSearch and Delegate. +type Visibility struct { + // Rules: A list of visibility rules that apply to individual API + // elements. + // + // **NOTE:** All service configuration rules follow "last one wins" + // order. + Rules []*VisibilityRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rules") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Visibility) MarshalJSON() ([]byte, error) { + type noMethod Visibility + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VisibilityRule: A visibility rule provides visibility configuration +// for an individual API +// element. +type VisibilityRule struct { + // Restriction: A comma-separated list of visibility labels that apply + // to the `selector`. + // Any of the listed labels can be used to grant the visibility. + // + // If a rule has multiple labels, removing one of the labels but not all + // of + // them can break clients. + // + // Example: + // + // visibility: + // rules: + // - selector: google.calendar.Calendar.EnhancedSearch + // restriction: GOOGLE_INTERNAL, TRUSTED_TESTER + // + // Removing GOOGLE_INTERNAL from this restriction will break clients + // that + // rely on this method and only had access to it through + // GOOGLE_INTERNAL. + Restriction string `json:"restriction,omitempty"` + + // Selector: Selects methods, messages, fields, enums, etc. to which + // this rule applies. + // + // Refer to selector for syntax details. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Restriction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Restriction") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VisibilityRule) MarshalJSON() ([]byte, error) { + type noMethod VisibilityRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "serviceuser.projects.services.disable": + +type ProjectsServicesDisableCall struct { + s *APIService + name string + disableservicerequest *DisableServiceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Disable: Disable a managed service for a +// consumer. +// +// Operation +func (r *ProjectsServicesService) Disable(name string, disableservicerequest *DisableServiceRequest) *ProjectsServicesDisableCall { + c := &ProjectsServicesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.disableservicerequest = disableservicerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServicesDisableCall) Fields(s ...googleapi.Field) *ProjectsServicesDisableCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServicesDisableCall) Context(ctx context.Context) *ProjectsServicesDisableCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServicesDisableCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServicesDisableCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disableservicerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:disable") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "serviceuser.projects.services.disable" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Disable a managed service for a consumer.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + // "flatPath": "v1/projects/{projectsId}/services/{servicesId}:disable", + // "httpMethod": "POST", + // "id": "serviceuser.projects.services.disable", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Name of the consumer and the service to disable for that consumer.\n\nThe Service User implementation accepts the following forms for consumer:\n- \"project:\u003cproject_id\u003e\"\n\nA valid path would be:\n- /v1/projects/my-project/services/servicemanagement.googleapis.com:disable", + // "location": "path", + // "pattern": "^projects/[^/]+/services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:disable", + // "request": { + // "$ref": "DisableServiceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "serviceuser.projects.services.enable": + +type ProjectsServicesEnableCall struct { + s *APIService + name string + enableservicerequest *EnableServiceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Enable: Enable a managed service for a consumer with the default +// settings. +// +// Operation +// +// google.rpc.Status errors may contain a +// google.rpc.PreconditionFailure error detail. +func (r *ProjectsServicesService) Enable(name string, enableservicerequest *EnableServiceRequest) *ProjectsServicesEnableCall { + c := &ProjectsServicesEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.enableservicerequest = enableservicerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServicesEnableCall) Fields(s ...googleapi.Field) *ProjectsServicesEnableCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServicesEnableCall) Context(ctx context.Context) *ProjectsServicesEnableCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServicesEnableCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServicesEnableCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.enableservicerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:enable") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "serviceuser.projects.services.enable" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enable a managed service for a consumer with the default settings.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e\n\ngoogle.rpc.Status errors may contain a\ngoogle.rpc.PreconditionFailure error detail.", + // "flatPath": "v1/projects/{projectsId}/services/{servicesId}:enable", + // "httpMethod": "POST", + // "id": "serviceuser.projects.services.enable", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Name of the consumer and the service to enable for that consumer.\n\nA valid path would be:\n- /v1/projects/my-project/services/servicemanagement.googleapis.com:enable", + // "location": "path", + // "pattern": "^projects/[^/]+/services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:enable", + // "request": { + // "$ref": "EnableServiceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "serviceuser.projects.services.list": + +type ProjectsServicesListCall struct { + s *APIService + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List enabled services for the specified consumer. +func (r *ProjectsServicesService) List(parent string) *ProjectsServicesListCall { + c := &ProjectsServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Requested size of +// the next page of data. +func (c *ProjectsServicesListCall) PageSize(pageSize int64) *ProjectsServicesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token identifying +// which result to start with; returned by a previous list +// call. +func (c *ProjectsServicesListCall) PageToken(pageToken string) *ProjectsServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServicesListCall) Fields(s ...googleapi.Field) *ProjectsServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsServicesListCall) IfNoneMatch(entityTag string) *ProjectsServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServicesListCall) Context(ctx context.Context) *ProjectsServicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/services") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "serviceuser.projects.services.list" call. +// Exactly one of *ListEnabledServicesResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListEnabledServicesResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServicesListCall) Do(opts ...googleapi.CallOption) (*ListEnabledServicesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListEnabledServicesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List enabled services for the specified consumer.", + // "flatPath": "v1/projects/{projectsId}/services", + // "httpMethod": "GET", + // "id": "serviceuser.projects.services.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Requested size of the next page of data.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token identifying which result to start with; returned by a previous list\ncall.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "List enabled services for the specified parent.\n\nAn example valid parent would be:\n- projects/my-project", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/services", + // "response": { + // "$ref": "ListEnabledServicesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsServicesListCall) Pages(ctx context.Context, f func(*ListEnabledServicesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/vendor/google.golang.org/api/sheets/v4/sheets-api.json b/vendor/google.golang.org/api/sheets/v4/sheets-api.json index 26c3aa99c..343a1820e 100644 --- a/vendor/google.golang.org/api/sheets/v4/sheets-api.json +++ b/vendor/google.golang.org/api/sheets/v4/sheets-api.json @@ -1,4172 +1,4193 @@ { - "ownerName": "Google", - "resources": { - "spreadsheets": { - "methods": { - "create": { - "httpMethod": "POST", - "parameterOrder": [], - "response": { - "$ref": "Spreadsheet" - }, - "parameters": {}, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "flatPath": "v4/spreadsheets", - "id": "sheets.spreadsheets.create", - "path": "v4/spreadsheets", - "request": { - "$ref": "Spreadsheet" - }, - "description": "Creates a spreadsheet, returning the newly created spreadsheet." + "schemas": { + "BatchUpdateValuesResponse": { + "description": "The response when updating a range of values in a spreadsheet.", + "type": "object", + "properties": { + "totalUpdatedColumns": { + "description": "The total number of columns where at least one cell in the column was\nupdated.", + "format": "int32", + "type": "integer" }, - "batchUpdate": { - "path": "v4/spreadsheets/{spreadsheetId}:batchUpdate", - "id": "sheets.spreadsheets.batchUpdate", - "request": { - "$ref": "BatchUpdateSpreadsheetRequest" - }, - "description": "Applies one or more updates to the spreadsheet.\n\nEach request is validated before\nbeing applied. If any request is not valid then the entire request will\nfail and nothing will be applied.\n\nSome requests have replies to\ngive you some information about how\nthey are applied. The replies will mirror the requests. For example,\nif you applied 4 updates and the 3rd one had a reply, then the\nresponse will have 2 empty replies, the actual reply, and another empty\nreply, in that order.\n\nDue to the collaborative nature of spreadsheets, it is not guaranteed that\nthe spreadsheet will reflect exactly your changes after this completes,\nhowever it is guaranteed that the updates in the request will be\napplied together atomically. Your changes may be altered with respect to\ncollaborator changes. If there are no collaborators, the spreadsheet\nshould reflect your changes.", - "response": { - "$ref": "BatchUpdateSpreadsheetResponse" - }, - "parameterOrder": [ - "spreadsheetId" - ], - "httpMethod": "POST", - "parameters": { - "spreadsheetId": { - "location": "path", - "description": "The spreadsheet to apply the updates to.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "flatPath": "v4/spreadsheets/{spreadsheetId}:batchUpdate" + "spreadsheetId": { + "description": "The spreadsheet the updates were applied to.", + "type": "string" }, - "get": { - "response": { - "$ref": "Spreadsheet" - }, - "parameterOrder": [ - "spreadsheetId" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/spreadsheets", - "https://www.googleapis.com/auth/spreadsheets.readonly" - ], - "parameters": { - "spreadsheetId": { - "description": "The spreadsheet to request.", - "required": true, - "type": "string", - "location": "path" - }, - "ranges": { - "repeated": true, - "location": "query", - "description": "The ranges to retrieve from the spreadsheet.", - "type": "string" - }, - "includeGridData": { - "location": "query", - "description": "True if grid data should be returned.\nThis parameter is ignored if a field mask was set in the request.", - "type": "boolean" - } + "totalUpdatedRows": { + "description": "The total number of rows where at least one cell in the row was updated.", + "format": "int32", + "type": "integer" + }, + "responses": { + "type": "array", + "items": { + "$ref": "UpdateValuesResponse" }, - "flatPath": "v4/spreadsheets/{spreadsheetId}", - "path": "v4/spreadsheets/{spreadsheetId}", - "id": "sheets.spreadsheets.get", - "description": "Returns the spreadsheet at the given ID.\nThe caller must specify the spreadsheet ID.\n\nBy default, data within grids will not be returned.\nYou can include grid data one of two ways:\n\n* Specify a field mask listing your desired fields using the `fields` URL\nparameter in HTTP\n\n* Set the includeGridData\nURL parameter to true. If a field mask is set, the `includeGridData`\nparameter is ignored\n\nFor large spreadsheets, it is recommended to retrieve only the specific\nfields of the spreadsheet that you want.\n\nTo retrieve only subsets of the spreadsheet, use the\nranges URL parameter.\nMultiple ranges can be specified. Limiting the range will\nreturn only the portions of the spreadsheet that intersect the requested\nranges. Ranges are specified using A1 notation." + "description": "One UpdateValuesResponse per requested range, in the same order as\nthe requests appeared." + }, + "totalUpdatedSheets": { + "description": "The total number of sheets where at least one cell in the sheet was\nupdated.", + "format": "int32", + "type": "integer" + }, + "totalUpdatedCells": { + "description": "The total number of cells updated.", + "format": "int32", + "type": "integer" } }, - "resources": { - "sheets": { - "methods": { - "copyTo": { - "response": { - "$ref": "SheetProperties" - }, - "parameterOrder": [ - "spreadsheetId", - "sheetId" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "parameters": { - "sheetId": { - "location": "path", - "description": "The ID of the sheet to copy.", - "format": "int32", - "required": true, - "type": "integer" - }, - "spreadsheetId": { - "location": "path", - "description": "The ID of the spreadsheet containing the sheet to copy.", - "required": true, - "type": "string" - } - }, - "flatPath": "v4/spreadsheets/{spreadsheetId}/sheets/{sheetId}:copyTo", - "path": "v4/spreadsheets/{spreadsheetId}/sheets/{sheetId}:copyTo", - "id": "sheets.spreadsheets.sheets.copyTo", - "description": "Copies a single sheet from a spreadsheet to another spreadsheet.\nReturns the properties of the newly created sheet.", - "request": { - "$ref": "CopySheetToAnotherSpreadsheetRequest" - } - } + "id": "BatchUpdateValuesResponse" + }, + "SortRangeRequest": { + "properties": { + "sortSpecs": { + "description": "The sort order per column. Later specifications are used when values\nare equal in the earlier specifications.", + "type": "array", + "items": { + "$ref": "SortSpec" } }, - "values": { - "methods": { - "batchGet": { - "description": "Returns one or more ranges of values from a spreadsheet.\nThe caller must specify the spreadsheet ID and one or more ranges.", - "httpMethod": "GET", - "parameterOrder": [ - "spreadsheetId" - ], - "response": { - "$ref": "BatchGetValuesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/spreadsheets", - "https://www.googleapis.com/auth/spreadsheets.readonly" - ], - "parameters": { - "valueRenderOption": { - "enum": [ - "FORMATTED_VALUE", - "UNFORMATTED_VALUE", - "FORMULA" - ], - "description": "How values should be represented in the output.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", - "type": "string", - "location": "query" - }, - "dateTimeRenderOption": { - "location": "query", - "enum": [ - "SERIAL_NUMBER", - "FORMATTED_STRING" - ], - "description": "How dates, times, and durations should be represented in the output.\nThis is ignored if value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", - "type": "string" - }, - "ranges": { - "description": "The A1 notation of the values to retrieve.", - "type": "string", - "repeated": true, - "location": "query" - }, - "majorDimension": { - "location": "query", - "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" - ], - "description": "The major dimension that results should use.\n\nFor example, if the spreadsheet data is: `A1=1,B1=2,A2=3,B2=4`,\nthen requesting `range=A1:B2,majorDimension=ROWS` will return\n`[[1,2],[3,4]]`,\nwhereas requesting `range=A1:B2,majorDimension=COLUMNS` will return\n`[[1,3],[2,4]]`.", - "type": "string" - }, - "spreadsheetId": { - "location": "path", - "description": "The ID of the spreadsheet to retrieve data from.", - "required": true, - "type": "string" - } - }, - "flatPath": "v4/spreadsheets/{spreadsheetId}/values:batchGet", - "id": "sheets.spreadsheets.values.batchGet", - "path": "v4/spreadsheets/{spreadsheetId}/values:batchGet" - }, - "clear": { - "description": "Clears values from a spreadsheet.\nThe caller must specify the spreadsheet ID and range.\nOnly values are cleared -- all other properties of the cell (such as\nformatting, data validation, etc..) are kept.", - "request": { - "$ref": "ClearValuesRequest" - }, - "response": { - "$ref": "ClearValuesResponse" - }, - "parameterOrder": [ - "spreadsheetId", - "range" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "parameters": { - "spreadsheetId": { - "location": "path", - "description": "The ID of the spreadsheet to update.", - "required": true, - "type": "string" - }, - "range": { - "location": "path", - "description": "The A1 notation of the values to clear.", - "required": true, - "type": "string" - } - }, - "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}:clear", - "path": "v4/spreadsheets/{spreadsheetId}/values/{range}:clear", - "id": "sheets.spreadsheets.values.clear" - }, - "append": { - "response": { - "$ref": "AppendValuesResponse" - }, - "parameterOrder": [ - "spreadsheetId", - "range" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "parameters": { - "spreadsheetId": { - "description": "The ID of the spreadsheet to update.", - "required": true, - "type": "string", - "location": "path" - }, - "responseValueRenderOption": { - "location": "query", - "enum": [ - "FORMATTED_VALUE", - "UNFORMATTED_VALUE", - "FORMULA" - ], - "description": "Determines how values in the response should be rendered.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", - "type": "string" - }, - "insertDataOption": { - "enum": [ - "OVERWRITE", - "INSERT_ROWS" - ], - "description": "How the input data should be inserted.", - "type": "string", - "location": "query" - }, - "valueInputOption": { - "location": "query", - "enum": [ - "INPUT_VALUE_OPTION_UNSPECIFIED", - "RAW", - "USER_ENTERED" - ], - "description": "How the input data should be interpreted.", - "type": "string" - }, - "responseDateTimeRenderOption": { - "location": "query", - "enum": [ - "SERIAL_NUMBER", - "FORMATTED_STRING" - ], - "description": "Determines how dates, times, and durations in the response should be\nrendered. This is ignored if response_value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", - "type": "string" - }, - "includeValuesInResponse": { - "location": "query", - "description": "Determines if the update response should include the values\nof the cells that were appended. By default, responses\ndo not include the updated values.", - "type": "boolean" - }, - "range": { - "description": "The A1 notation of a range to search for a logical table of data.\nValues will be appended after the last row of the table.", - "required": true, - "type": "string", - "location": "path" - } - }, - "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}:append", - "path": "v4/spreadsheets/{spreadsheetId}/values/{range}:append", - "id": "sheets.spreadsheets.values.append", - "description": "Appends values to a spreadsheet. The input range is used to search for\nexisting data and find a \"table\" within that range. Values will be\nappended to the next row of the table, starting with the first column of\nthe table. See the\n[guide](/sheets/guides/values#appending_values)\nand\n[sample code](/sheets/samples/writing#append_values)\nfor specific details of how tables are detected and data is appended.\n\nThe caller must specify the spreadsheet ID, range, and\na valueInputOption. The `valueInputOption` only\ncontrols how the input data will be added to the sheet (column-wise or\nrow-wise), it does not influence what cell the data starts being written\nto.", - "request": { - "$ref": "ValueRange" - } - }, - "batchClear": { - "response": { - "$ref": "BatchClearValuesResponse" - }, - "parameterOrder": [ - "spreadsheetId" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "parameters": { - "spreadsheetId": { - "location": "path", - "description": "The ID of the spreadsheet to update.", - "required": true, - "type": "string" - } - }, - "flatPath": "v4/spreadsheets/{spreadsheetId}/values:batchClear", - "path": "v4/spreadsheets/{spreadsheetId}/values:batchClear", - "id": "sheets.spreadsheets.values.batchClear", - "description": "Clears one or more ranges of values from a spreadsheet.\nThe caller must specify the spreadsheet ID and one or more ranges.\nOnly values are cleared -- all other properties of the cell (such as\nformatting, data validation, etc..) are kept.", - "request": { - "$ref": "BatchClearValuesRequest" - } - }, - "get": { - "description": "Returns a range of values from a spreadsheet.\nThe caller must specify the spreadsheet ID and a range.", - "response": { - "$ref": "ValueRange" - }, - "parameterOrder": [ - "spreadsheetId", - "range" - ], - "httpMethod": "GET", - "parameters": { - "valueRenderOption": { - "enum": [ - "FORMATTED_VALUE", - "UNFORMATTED_VALUE", - "FORMULA" - ], - "description": "How values should be represented in the output.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", - "type": "string", - "location": "query" - }, - "dateTimeRenderOption": { - "enum": [ - "SERIAL_NUMBER", - "FORMATTED_STRING" - ], - "description": "How dates, times, and durations should be represented in the output.\nThis is ignored if value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", - "type": "string", - "location": "query" - }, - "majorDimension": { - "location": "query", - "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" - ], - "description": "The major dimension that results should use.\n\nFor example, if the spreadsheet data is: `A1=1,B1=2,A2=3,B2=4`,\nthen requesting `range=A1:B2,majorDimension=ROWS` will return\n`[[1,2],[3,4]]`,\nwhereas requesting `range=A1:B2,majorDimension=COLUMNS` will return\n`[[1,3],[2,4]]`.", - "type": "string" - }, - "spreadsheetId": { - "description": "The ID of the spreadsheet to retrieve data from.", - "required": true, - "type": "string", - "location": "path" - }, - "range": { - "description": "The A1 notation of the values to retrieve.", - "required": true, - "type": "string", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/spreadsheets", - "https://www.googleapis.com/auth/spreadsheets.readonly" - ], - "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}", - "path": "v4/spreadsheets/{spreadsheetId}/values/{range}", - "id": "sheets.spreadsheets.values.get" - }, - "update": { - "response": { - "$ref": "UpdateValuesResponse" - }, - "parameterOrder": [ - "spreadsheetId", - "range" - ], - "httpMethod": "PUT", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "parameters": { - "valueInputOption": { - "enum": [ - "INPUT_VALUE_OPTION_UNSPECIFIED", - "RAW", - "USER_ENTERED" - ], - "description": "How the input data should be interpreted.", - "type": "string", - "location": "query" - }, - "responseDateTimeRenderOption": { - "location": "query", - "enum": [ - "SERIAL_NUMBER", - "FORMATTED_STRING" - ], - "description": "Determines how dates, times, and durations in the response should be\nrendered. This is ignored if response_value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", - "type": "string" - }, - "range": { - "location": "path", - "description": "The A1 notation of the values to update.", - "required": true, - "type": "string" - }, - "includeValuesInResponse": { - "location": "query", - "description": "Determines if the update response should include the values\nof the cells that were updated. By default, responses\ndo not include the updated values.\nIf the range to write was larger than than the range actually written,\nthe response will include all values in the requested range (excluding\ntrailing empty rows and columns).", - "type": "boolean" - }, - "spreadsheetId": { - "description": "The ID of the spreadsheet to update.", - "required": true, - "type": "string", - "location": "path" - }, - "responseValueRenderOption": { - "enum": [ - "FORMATTED_VALUE", - "UNFORMATTED_VALUE", - "FORMULA" - ], - "description": "Determines how values in the response should be rendered.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", - "type": "string", - "location": "query" - } - }, - "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}", - "path": "v4/spreadsheets/{spreadsheetId}/values/{range}", - "id": "sheets.spreadsheets.values.update", - "description": "Sets values in a range of a spreadsheet.\nThe caller must specify the spreadsheet ID, range, and\na valueInputOption.", - "request": { - "$ref": "ValueRange" - } - }, - "batchUpdate": { - "response": { - "$ref": "BatchUpdateValuesResponse" - }, - "parameterOrder": [ - "spreadsheetId" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/spreadsheets" - ], - "parameters": { - "spreadsheetId": { - "description": "The ID of the spreadsheet to update.", - "required": true, - "type": "string", - "location": "path" - } - }, - "flatPath": "v4/spreadsheets/{spreadsheetId}/values:batchUpdate", - "path": "v4/spreadsheets/{spreadsheetId}/values:batchUpdate", - "id": "sheets.spreadsheets.values.batchUpdate", - "description": "Sets values in one or more ranges of a spreadsheet.\nThe caller must specify the spreadsheet ID,\na valueInputOption, and one or more\nValueRanges.", - "request": { - "$ref": "BatchUpdateValuesRequest" - } - } - } + "range": { + "$ref": "GridRange", + "description": "The range to sort." } - } - } - }, - "parameters": { - "fields": { - "location": "query", - "description": "Selector specifying which fields to include in a partial response.", - "type": "string" + }, + "id": "SortRangeRequest", + "description": "Sorts data in rows based on a sort order per column.", + "type": "object" }, - "uploadType": { - "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" + "MergeCellsRequest": { + "id": "MergeCellsRequest", + "description": "Merges all cells in the range.", + "type": "object", + "properties": { + "range": { + "$ref": "GridRange", + "description": "The range of cells to merge." + }, + "mergeType": { + "enum": [ + "MERGE_ALL", + "MERGE_COLUMNS", + "MERGE_ROWS" + ], + "description": "How the cells should be merged.", + "type": "string", + "enumDescriptions": [ + "Create a single merge from the range", + "Create a merge for each column in the range", + "Create a merge for each row in the range" + ] + } + } }, - "callback": { - "location": "query", - "description": "JSONP", - "type": "string" + "AddProtectedRangeRequest": { + "id": "AddProtectedRangeRequest", + "description": "Adds a new protected range.", + "type": "object", + "properties": { + "protectedRange": { + "description": "The protected range to be added. The\nprotectedRangeId field is optional; if\none is not set, an id will be randomly generated. (It is an error to\nspecify the ID of a range that already exists.)", + "$ref": "ProtectedRange" + } + } }, - "$.xgafv": { - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" + "BatchClearValuesRequest": { + "description": "The request for clearing more than one range of values in a spreadsheet.", + "type": "object", + "properties": { + "ranges": { + "description": "The ranges to clear, in A1 notation.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "BatchClearValuesRequest" }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" + "DuplicateFilterViewResponse": { + "properties": { + "filter": { + "$ref": "FilterView", + "description": "The newly created filter." + } + }, + "id": "DuplicateFilterViewResponse", + "description": "The result of a filter view being duplicated.", + "type": "object" }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" + "DuplicateSheetResponse": { + "id": "DuplicateSheetResponse", + "description": "The result of duplicating a sheet.", + "type": "object", + "properties": { + "properties": { + "$ref": "SheetProperties", + "description": "The properties of the duplicate sheet." + } + } }, - "access_token": { - "location": "query", - "description": "OAuth access token.", - "type": "string" + "ClearBasicFilterRequest": { + "description": "Clears the basic filter, if any exists on the sheet.", + "type": "object", + "properties": { + "sheetId": { + "description": "The sheet ID on which the basic filter should be cleared.", + "format": "int32", + "type": "integer" + } + }, + "id": "ClearBasicFilterRequest" }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" + "TextToColumnsRequest": { + "type": "object", + "properties": { + "delimiter": { + "description": "The delimiter to use. Used only if delimiterType is\nCUSTOM.", + "type": "string" + }, + "source": { + "$ref": "GridRange", + "description": "The source data range. This must span exactly one column." + }, + "delimiterType": { + "enum": [ + "DELIMITER_TYPE_UNSPECIFIED", + "COMMA", + "SEMICOLON", + "PERIOD", + "SPACE", + "CUSTOM" + ], + "description": "The delimiter type to use.", + "type": "string", + "enumDescriptions": [ + "Default value. This value must not be used.", + "\",\"", + "\";\"", + "\".\"", + "\" \"", + "A custom value as defined in delimiter." + ] + } + }, + "id": "TextToColumnsRequest", + "description": "Splits a column of text into multiple columns,\nbased on a delimiter in each cell." }, - "pp": { - "location": "query", - "description": "Pretty-print response.", - "type": "boolean", - "default": "true" + "DeleteBandingRequest": { + "description": "Removes the banded range with the given ID from the spreadsheet.", + "type": "object", + "properties": { + "bandedRangeId": { + "description": "The ID of the banded range to delete.", + "format": "int32", + "type": "integer" + } + }, + "id": "DeleteBandingRequest" }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" + "BatchUpdateSpreadsheetResponse": { + "description": "The reply for batch updating a spreadsheet.", + "type": "object", + "properties": { + "replies": { + "description": "The reply of the updates. This maps 1:1 with the updates, although\nreplies to some requests may be empty.", + "type": "array", + "items": { + "$ref": "Response" + } + }, + "updatedSpreadsheet": { + "$ref": "Spreadsheet", + "description": "The spreadsheet after updates were applied. This is only set if\n[BatchUpdateSpreadsheetRequest.include_spreadsheet_in_response] is `true`." + }, + "spreadsheetId": { + "description": "The spreadsheet the updates were applied to.", + "type": "string" + } + }, + "id": "BatchUpdateSpreadsheetResponse" }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" + "AppendValuesResponse": { + "id": "AppendValuesResponse", + "description": "The response when updating a range of values in a spreadsheet.", + "type": "object", + "properties": { + "updates": { + "$ref": "UpdateValuesResponse", + "description": "Information about the updates that were applied." + }, + "tableRange": { + "description": "The range (in A1 notation) of the table that values are being appended to\n(before the values were appended).\nEmpty if no table was found.", + "type": "string" + }, + "spreadsheetId": { + "description": "The spreadsheet the updates were applied to.", + "type": "string" + } + } }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "AddFilterViewRequest": { + "description": "Adds a filter view.", + "type": "object", + "properties": { + "filter": { + "description": "The filter to add. The filterViewId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a filter that already exists.)", + "$ref": "FilterView" + } + }, + "id": "AddFilterViewRequest" }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true", - "location": "query" - } - }, - "version": "v4", - "baseUrl": "https://sheets.googleapis.com/", - "kind": "discovery#restDescription", - "description": "Reads and writes Google Sheets.", - "servicePath": "", - "basePath": "", - "id": "sheets:v4", - "revision": "20170117", - "documentationLink": "https://developers.google.com/sheets/", - "discoveryVersion": "v1", - "version_module": "True", - "schemas": { - "OverlayPosition": { - "description": "The location an object is overlaid on top of a grid.", + "PivotFilterCriteria": { + "description": "Criteria for showing/hiding rows in a pivot table.", + "type": "object", + "properties": { + "visibleValues": { + "description": "Values that should be included. Values not listed here are excluded.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "PivotFilterCriteria" + }, + "MoveDimensionRequest": { + "description": "Moves one or more rows or columns.", "type": "object", "properties": { - "offsetXPixels": { - "description": "The horizontal offset, in pixels, that the object is offset\nfrom the anchor cell.", + "destinationIndex": { + "description": "The zero-based start index of where to move the source data to,\nbased on the coordinates *before* the source data is removed\nfrom the grid. Existing data will be shifted down or right\n(depending on the dimension) to make room for the moved dimensions.\nThe source dimensions are removed from the grid, so the\nthe data may end up in a different index than specified.\n\nFor example, given `A1..A5` of `0, 1, 2, 3, 4` and wanting to move\n`\"1\"` and `\"2\"` to between `\"3\"` and `\"4\"`, the source would be\n`ROWS [1..3)`,and the destination index would be `\"4\"`\n(the zero-based index of row 5).\nThe end result would be `A1..A5` of `0, 3, 1, 2, 4`.", "format": "int32", "type": "integer" }, - "anchorCell": { - "$ref": "GridCoordinate", - "description": "The cell the object is anchored to." + "source": { + "$ref": "DimensionRange", + "description": "The source dimensions to move." + } + }, + "id": "MoveDimensionRequest" + }, + "AddConditionalFormatRuleRequest": { + "description": "Adds a new conditional format rule at the given index.\nAll subsequent rules' indexes are incremented.", + "type": "object", + "properties": { + "rule": { + "description": "The rule to add.", + "$ref": "ConditionalFormatRule" }, - "offsetYPixels": { - "description": "The vertical offset, in pixels, that the object is offset\nfrom the anchor cell.", + "index": { + "description": "The zero-based index where the rule should be inserted.", "format": "int32", "type": "integer" + } + }, + "id": "AddConditionalFormatRuleRequest" + }, + "ChartSpec": { + "type": "object", + "properties": { + "pieChart": { + "$ref": "PieChartSpec", + "description": "A pie chart specification." }, - "heightPixels": { - "description": "The height of the object, in pixels. Defaults to 371.", - "format": "int32", - "type": "integer" + "basicChart": { + "$ref": "BasicChartSpec", + "description": "A basic chart specification, can be one of many kinds of charts.\nSee BasicChartType for the list of all\ncharts this supports." }, - "widthPixels": { - "description": "The width of the object, in pixels. Defaults to 600.", - "format": "int32", - "type": "integer" + "hiddenDimensionStrategy": { + "description": "Determines how the charts will use hidden rows or columns.", + "type": "string", + "enumDescriptions": [ + "Default value, do not use.", + "Charts will skip hidden rows and columns.", + "Charts will skip hidden rows only.", + "Charts will skip hidden columns only.", + "Charts will not skip any hidden rows or columns." + ], + "enum": [ + "CHART_HIDDEN_DIMENSION_STRATEGY_UNSPECIFIED", + "SKIP_HIDDEN_ROWS_AND_COLUMNS", + "SKIP_HIDDEN_ROWS", + "SKIP_HIDDEN_COLUMNS", + "SHOW_ALL" + ] + }, + "title": { + "type": "string", + "description": "The title of the chart." } }, - "id": "OverlayPosition" + "id": "ChartSpec", + "description": "The specifications of a chart." }, - "SpreadsheetProperties": { + "NumberFormat": { "properties": { - "autoRecalc": { + "pattern": { + "description": "Pattern string used for formatting. If not set, a default pattern based on\nthe user's locale will be used if necessary for the given type.\nSee the [Date and Number Formats guide](/sheets/guides/formats) for more\ninformation about the supported patterns.", + "type": "string" + }, + "type": { + "description": "The type of the number format.\nWhen writing, this field must be set.", + "type": "string", "enumDescriptions": [ - "Default value. This value must not be used.", - "Volatile functions are updated on every change.", - "Volatile functions are updated on every change and every minute.", - "Volatile functions are updated on every change and hourly." + "The number format is not specified\nand is based on the contents of the cell.\nDo not explicitly use this.", + "Text formatting, e.g `1000.12`", + "Number formatting, e.g, `1,000.12`", + "Percent formatting, e.g `10.12%`", + "Currency formatting, e.g `$1,000.12`", + "Date formatting, e.g `9/26/2008`", + "Time formatting, e.g `3:59:00 PM`", + "Date+Time formatting, e.g `9/26/08 15:59:00`", + "Scientific number formatting, e.g `1.01E+03`" ], "enum": [ - "RECALCULATION_INTERVAL_UNSPECIFIED", - "ON_CHANGE", - "MINUTE", - "HOUR" - ], - "description": "The amount of time to wait before volatile functions are recalculated.", + "NUMBER_FORMAT_TYPE_UNSPECIFIED", + "TEXT", + "NUMBER", + "PERCENT", + "CURRENCY", + "DATE", + "TIME", + "DATE_TIME", + "SCIENTIFIC" + ] + } + }, + "id": "NumberFormat", + "description": "The number format of a cell.", + "type": "object" + }, + "SheetProperties": { + "type": "object", + "properties": { + "title": { + "description": "The name of the sheet.", "type": "string" }, - "defaultFormat": { - "description": "The default format of all cells in the spreadsheet.\nCellData.effectiveFormat will not be set if the\ncell's format is equal to this default format.\nThis field is read-only.", - "$ref": "CellFormat" + "tabColor": { + "$ref": "Color", + "description": "The color of the tab in the UI." }, - "title": { - "description": "The title of the spreadsheet.", - "type": "string" + "index": { + "description": "The index of the sheet within the spreadsheet.\nWhen adding or updating sheet properties, if this field\nis excluded then the sheet will be added or moved to the end\nof the sheet list. When updating sheet indices or inserting\nsheets, movement is considered in \"before the move\" indexes.\nFor example, if there were 3 sheets (S1, S2, S3) in order to\nmove S1 ahead of S2 the index would have to be set to 2. A sheet\nindex update request will be ignored if the requested index is\nidentical to the sheets current index or if the requested new\nindex is equal to the current sheet index + 1.", + "format": "int32", + "type": "integer" }, - "timeZone": { - "description": "The time zone of the spreadsheet, in CLDR format such as\n`America/New_York`. If the time zone isn't recognized, this may\nbe a custom time zone such as `GMT-07:00`.", - "type": "string" + "sheetId": { + "description": "The ID of the sheet. Must be non-negative.\nThis field cannot be changed once set.", + "format": "int32", + "type": "integer" }, - "locale": { - "description": "The locale of the spreadsheet in one of the following formats:\n\n* an ISO 639-1 language code such as `en`\n\n* an ISO 639-2 language code such as `fil`, if no 639-1 code exists\n\n* a combination of the ISO language code and country code, such as `en_US`\n\nNote: when updating this field, not all locales/languages are supported.", - "type": "string" + "rightToLeft": { + "description": "True if the sheet is an RTL sheet instead of an LTR sheet.", + "type": "boolean" + }, + "hidden": { + "description": "True if the sheet is hidden in the UI, false if it's visible.", + "type": "boolean" + }, + "sheetType": { + "type": "string", + "enumDescriptions": [ + "Default value, do not use.", + "The sheet is a grid.", + "The sheet has no grid and instead has an object like a chart or image." + ], + "enum": [ + "SHEET_TYPE_UNSPECIFIED", + "GRID", + "OBJECT" + ], + "description": "The type of sheet. Defaults to GRID.\nThis field cannot be changed once set." + }, + "gridProperties": { + "description": "Additional properties of the sheet if this sheet is a grid.\n(If the sheet is an object sheet, containing a chart or image, then\nthis field will be absent.)\nWhen writing it is an error to set any grid properties on non-grid sheets.", + "$ref": "GridProperties" } }, - "id": "SpreadsheetProperties", - "description": "Properties of a spreadsheet.", - "type": "object" + "id": "SheetProperties", + "description": "Properties of a sheet." }, - "RepeatCellRequest": { - "description": "Updates all cells in the range to the values in the given Cell object.\nOnly the fields listed in the fields field are updated; others are\nunchanged.\n\nIf writing a cell with a formula, the formula's ranges will automatically\nincrement for each field in the range.\nFor example, if writing a cell with formula `=A1` into range B2:C4,\nB2 would be `=A1`, B3 would be `=A2`, B4 would be `=A3`,\nC2 would be `=B1`, C3 would be `=B2`, C4 would be `=B3`.\n\nTo keep the formula's ranges static, use the `$` indicator.\nFor example, use the formula `=$A$1` to prevent both the row and the\ncolumn from incrementing.", + "UpdateDimensionPropertiesRequest": { + "description": "Updates properties of dimensions within the specified range.", "type": "object", "properties": { + "range": { + "$ref": "DimensionRange", + "description": "The rows or columns to update." + }, "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root `cell` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "description": "The fields that should be updated. At least one field must be specified.\nThe root `properties` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", "format": "google-fieldmask", "type": "string" }, - "cell": { - "description": "The data to write.", - "$ref": "CellData" - }, - "range": { - "$ref": "GridRange", - "description": "The range to repeat the cell in." + "properties": { + "$ref": "DimensionProperties", + "description": "Properties to update." } }, - "id": "RepeatCellRequest" + "id": "UpdateDimensionPropertiesRequest" }, - "AddChartResponse": { - "description": "The result of adding a chart to a spreadsheet.", + "SourceAndDestination": { + "id": "SourceAndDestination", + "description": "A combination of a source range and how to extend that source.", "type": "object", "properties": { - "chart": { - "$ref": "EmbeddedChart", - "description": "The newly added chart." + "fillLength": { + "description": "The number of rows or columns that data should be filled into.\nPositive numbers expand beyond the last row or last column\nof the source. Negative numbers expand before the first row\nor first column of the source.", + "format": "int32", + "type": "integer" + }, + "source": { + "description": "The location of the data to use as the source of the autofill.", + "$ref": "GridRange" + }, + "dimension": { + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ], + "description": "The dimension that data should be filled into.", + "type": "string", + "enumDescriptions": [ + "The default value, do not use.", + "Operates on the rows of a sheet.", + "Operates on the columns of a sheet." + ] } - }, - "id": "AddChartResponse" + } }, - "InsertDimensionRequest": { + "FilterView": { + "description": "A filter view.", + "type": "object", "properties": { - "inheritFromBefore": { - "description": "Whether dimension properties should be extended from the dimensions\nbefore or after the newly inserted dimensions.\nTrue to inherit from the dimensions before (in which case the start\nindex must be greater than 0), and false to inherit from the dimensions\nafter.\n\nFor example, if row index 0 has red background and row index 1\nhas a green background, then inserting 2 rows at index 1 can inherit\neither the green or red background. If `inheritFromBefore` is true,\nthe two new rows will be red (because the row before the insertion point\nwas red), whereas if `inheritFromBefore` is false, the two new rows will\nbe green (because the row after the insertion point was green).", - "type": "boolean" + "namedRangeId": { + "description": "The named range this filter view is backed by, if any.\n\nWhen writing, only one of range or named_range_id\nmay be set.", + "type": "string" + }, + "filterViewId": { + "description": "The ID of the filter view.", + "format": "int32", + "type": "integer" + }, + "criteria": { + "description": "The criteria for showing/hiding values per column.\nThe map's key is the column index, and the value is the criteria for\nthat column.", + "type": "object", + "additionalProperties": { + "$ref": "FilterCriteria" + } + }, + "title": { + "description": "The name of the filter view.", + "type": "string" }, "range": { - "$ref": "DimensionRange", - "description": "The dimensions to insert. Both the start and end indexes must be bounded." + "$ref": "GridRange", + "description": "The range this filter view covers.\n\nWhen writing, only one of range or named_range_id\nmay be set." + }, + "sortSpecs": { + "type": "array", + "items": { + "$ref": "SortSpec" + }, + "description": "The sort order per column. Later specifications are used when values\nare equal in the earlier specifications." } }, - "id": "InsertDimensionRequest", - "description": "Inserts rows or columns in a sheet at a particular index.", - "type": "object" + "id": "FilterView" }, - "UpdateSpreadsheetPropertiesRequest": { + "BandingProperties": { "properties": { - "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root 'properties' is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", - "type": "string" + "firstBandColor": { + "$ref": "Color", + "description": "The first color that is alternating. (Required)" }, - "properties": { - "description": "The properties to update.", - "$ref": "SpreadsheetProperties" + "secondBandColor": { + "$ref": "Color", + "description": "The second color that is alternating. (Required)" + }, + "footerColor": { + "$ref": "Color", + "description": "The color of the last row or column. If this field is not set, the last\nrow or column will be filled with either first_band_color or\nsecond_band_color, depending on the color of the previous row or\ncolumn." + }, + "headerColor": { + "description": "The color of the first row or column. If this field is set, the first\nrow or column will be filled with this color and the colors will\nalternate between first_band_color and second_band_color starting\nfrom the second row or column. Otherwise, the first row or column will be\nfilled with first_band_color and the colors will proceed to alternate\nas they normally would.", + "$ref": "Color" } }, - "id": "UpdateSpreadsheetPropertiesRequest", - "description": "Updates properties of a spreadsheet.", + "id": "BandingProperties", + "description": "Properties referring a single dimension (either row or column). If both\nBandedRange.row_properties and BandedRange.column_properties are\nset, the fill colors are applied to cells according to the following rules:\n\n* header_color and footer_color take priority over band colors.\n* first_band_color takes priority over second_band_color.\n* row_properties takes priority over column_properties.\n\nFor example, the first row color takes priority over the first column\ncolor, but the first column color takes priority over the second row color.\nSimilarly, the row header takes priority over the column header in the\ntop left cell, but the column header takes priority over the first row\ncolor if the row header is not set.", "type": "object" }, - "BatchUpdateValuesRequest": { + "BasicFilter": { + "description": "The default filter associated with a sheet.", + "type": "object", "properties": { - "valueInputOption": { - "enum": [ - "INPUT_VALUE_OPTION_UNSPECIFIED", - "RAW", - "USER_ENTERED" - ], - "description": "How the input data should be interpreted.", - "type": "string", - "enumDescriptions": [ - "Default input value. This value must not be used.", - "The values the user has entered will not be parsed and will be stored\nas-is.", - "The values will be parsed as if the user typed them into the UI.\nNumbers will stay as numbers, but strings may be converted to numbers,\ndates, etc. following the same rules that are applied when entering\ntext into a cell via the Google Sheets UI." - ] + "range": { + "description": "The range the filter covers.", + "$ref": "GridRange" }, - "data": { - "description": "The new values to apply to the spreadsheet.", + "criteria": { + "type": "object", + "additionalProperties": { + "$ref": "FilterCriteria" + }, + "description": "The criteria for showing/hiding values per column.\nThe map's key is the column index, and the value is the criteria for\nthat column." + }, + "sortSpecs": { + "description": "The sort order per column. Later specifications are used when values\nare equal in the earlier specifications.", "type": "array", "items": { - "$ref": "ValueRange" + "$ref": "SortSpec" } - }, - "responseDateTimeRenderOption": { - "enumDescriptions": [ - "Instructs date, time, datetime, and duration fields to be output\nas doubles in \"serial number\" format, as popularized by Lotus 1-2-3.\nDays are counted from December 31st 1899 and are incremented by 1,\nand times are fractions of a day. For example, January 1st 1900 at noon\nwould be 1.5, 1 because it's 1 day offset from December 31st 1899,\nand .5 because noon is half a day. February 1st 1900 at 3pm would\nbe 32.625. This correctly treats the year 1900 as not a leap year.", - "Instructs date, time, datetime, and duration fields to be output\nas strings in their given number format (which is dependent\non the spreadsheet locale)." - ], - "enum": [ - "SERIAL_NUMBER", - "FORMATTED_STRING" - ], - "description": "Determines how dates, times, and durations in the response should be\nrendered. This is ignored if response_value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", - "type": "string" - }, - "responseValueRenderOption": { - "enumDescriptions": [ - "Values will be calculated & formatted in the reply according to the\ncell's formatting. Formatting is based on the spreadsheet's locale,\nnot the requesting user's locale.\nFor example, if `A1` is `1.23` and `A2` is `=A1` and formatted as currency,\nthen `A2` would return `\"$1.23\"`.", - "Values will be calculated, but not formatted in the reply.\nFor example, if `A1` is `1.23` and `A2` is `=A1` and formatted as currency,\nthen `A2` would return the number `1.23`.", - "Values will not be calculated. The reply will include the formulas.\nFor example, if `A1` is `1.23` and `A2` is `=A1` and formatted as currency,\nthen A2 would return `\"=A1\"`." - ], - "enum": [ - "FORMATTED_VALUE", - "UNFORMATTED_VALUE", - "FORMULA" - ], - "description": "Determines how values in the response should be rendered.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", - "type": "string" - }, - "includeValuesInResponse": { - "description": "Determines if the update response should include the values\nof the cells that were updated. By default, responses\ndo not include the updated values. The `updatedData` field within\neach of the BatchUpdateValuesResponse.responses will contain\nthe updated values. If the range to write was larger than than the range\nactually written, the response will include all values in the requested\nrange (excluding trailing empty rows and columns).", - "type": "boolean" } }, - "id": "BatchUpdateValuesRequest", - "description": "The request for updating more than one range of values in a spreadsheet.", + "id": "BasicFilter" + }, + "AddProtectedRangeResponse": { + "properties": { + "protectedRange": { + "description": "The newly added protected range.", + "$ref": "ProtectedRange" + } + }, + "id": "AddProtectedRangeResponse", + "description": "The result of adding a new protected range.", "type": "object" }, - "ProtectedRange": { + "UpdateValuesResponse": { + "description": "The response when updating a range of values in a spreadsheet.", + "type": "object", "properties": { - "description": { - "description": "The description of this protected range.", - "type": "string" - }, - "unprotectedRanges": { - "description": "The list of unprotected ranges within a protected sheet.\nUnprotected ranges are only supported on protected sheets.", - "type": "array", - "items": { - "$ref": "GridRange" - } - }, - "namedRangeId": { - "description": "The named range this protected range is backed by, if any.\n\nWhen writing, only one of range or named_range_id\nmay be set.", - "type": "string" + "updatedData": { + "$ref": "ValueRange", + "description": "The values of the cells after updates were applied.\nThis is only included if the request's `includeValuesInResponse` field\nwas `true`." }, - "protectedRangeId": { - "description": "The ID of the protected range.\nThis field is read-only.", + "updatedRows": { + "description": "The number of rows where at least one cell in the row was updated.", "format": "int32", "type": "integer" }, - "warningOnly": { - "description": "True if this protected range will show a warning when editing.\nWarning-based protection means that every user can edit data in the\nprotected range, except editing will prompt a warning asking the user\nto confirm the edit.\n\nWhen writing: if this field is true, then editors is ignored.\nAdditionally, if this field is changed from true to false and the\n`editors` field is not set (nor included in the field mask), then\nthe editors will be set to all the editors in the document.", - "type": "boolean" + "updatedColumns": { + "description": "The number of columns where at least one cell in the column was updated.", + "format": "int32", + "type": "integer" }, - "requestingUserCanEdit": { - "description": "True if the user who requested this protected range can edit the\nprotected area.\nThis field is read-only.", - "type": "boolean" + "spreadsheetId": { + "description": "The spreadsheet the updates were applied to.", + "type": "string" }, - "editors": { - "$ref": "Editors", - "description": "The users and groups with edit access to the protected range.\nThis field is only visible to users with edit access to the protected\nrange and the document.\nEditors are not supported with warning_only protection." + "updatedRange": { + "description": "The range (in A1 notation) that updates were applied to.", + "type": "string" }, - "range": { - "$ref": "GridRange", - "description": "The range that is being protected.\nThe range may be fully unbounded, in which case this is considered\na protected sheet.\n\nWhen writing, only one of range or named_range_id\nmay be set." - } - }, - "id": "ProtectedRange", - "description": "A protected range.", - "type": "object" - }, - "DimensionProperties": { - "properties": { - "pixelSize": { - "description": "The height (if a row) or width (if a column) of the dimension in pixels.", + "updatedCells": { + "description": "The number of cells updated.", "format": "int32", "type": "integer" - }, - "hiddenByFilter": { - "description": "True if this dimension is being filtered.\nThis field is read-only.", - "type": "boolean" - }, - "hiddenByUser": { - "description": "True if this dimension is explicitly hidden.", - "type": "boolean" } }, - "id": "DimensionProperties", - "description": "Properties about a dimension.", - "type": "object" + "id": "UpdateValuesResponse" }, - "NamedRange": { - "description": "A named range.", + "PivotValue": { + "description": "The definition of how a value in a pivot table should be calculated.", "type": "object", "properties": { - "namedRangeId": { - "description": "The ID of the named range.", + "formula": { + "description": "A custom formula to calculate the value. The formula must start\nwith an `=` character.", "type": "string" }, - "range": { - "$ref": "GridRange", - "description": "The range this represents." + "summarizeFunction": { + "description": "A function to summarize the value.\nIf formula is set, the only supported values are\nSUM and\nCUSTOM.\nIf sourceColumnOffset is set, then `CUSTOM`\nis not supported.", + "type": "string", + "enumDescriptions": [ + "The default, do not use.", + "Corresponds to the `SUM` function.", + "Corresponds to the `COUNTA` function.", + "Corresponds to the `COUNT` function.", + "Corresponds to the `COUNTUNIQUE` function.", + "Corresponds to the `AVERAGE` function.", + "Corresponds to the `MAX` function.", + "Corresponds to the `MIN` function.", + "Corresponds to the `MEDIAN` function.", + "Corresponds to the `PRODUCT` function.", + "Corresponds to the `STDEV` function.", + "Corresponds to the `STDEVP` function.", + "Corresponds to the `VAR` function.", + "Corresponds to the `VARP` function.", + "Indicates the formula should be used as-is.\nOnly valid if PivotValue.formula was set." + ], + "enum": [ + "PIVOT_STANDARD_VALUE_FUNCTION_UNSPECIFIED", + "SUM", + "COUNTA", + "COUNT", + "COUNTUNIQUE", + "AVERAGE", + "MAX", + "MIN", + "MEDIAN", + "PRODUCT", + "STDEV", + "STDEVP", + "VAR", + "VARP", + "CUSTOM" + ] + }, + "sourceColumnOffset": { + "type": "integer", + "description": "The column offset of the source range that this value reads from.\n\nFor example, if the source was `C10:E15`, a `sourceColumnOffset` of `0`\nmeans this value refers to column `C`, whereas the offset `1` would\nrefer to column `D`.", + "format": "int32" }, "name": { - "description": "The name of the named range.", + "description": "A name to use for the value. This is only used if formula was set.\nOtherwise, the column name is used.", "type": "string" } }, - "id": "NamedRange" + "id": "PivotValue" }, - "DimensionRange": { + "ErrorValue": { + "description": "An error in a cell.", + "type": "object", "properties": { - "sheetId": { - "description": "The sheet this span is on.", - "format": "int32", - "type": "integer" - }, - "dimension": { + "type": { + "description": "The type of error.", + "type": "string", "enumDescriptions": [ - "The default value, do not use.", - "Operates on the rows of a sheet.", - "Operates on the columns of a sheet." + "The default error type, do not use this.", + "Corresponds to the `#ERROR!` error.", + "Corresponds to the `#NULL!` error.", + "Corresponds to the `#DIV/0` error.", + "Corresponds to the `#VALUE!` error.", + "Corresponds to the `#REF!` error.", + "Corresponds to the `#NAME?` error.", + "Corresponds to the `#NUM`! error.", + "Corresponds to the `#N/A` error.", + "Corresponds to the `Loading...` state." ], "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" - ], - "description": "The dimension of the span.", - "type": "string" - }, - "startIndex": { - "description": "The start (inclusive) of the span, or not set if unbounded.", - "format": "int32", - "type": "integer" + "ERROR_TYPE_UNSPECIFIED", + "ERROR", + "NULL_VALUE", + "DIVIDE_BY_ZERO", + "VALUE", + "REF", + "NAME", + "NUM", + "N_A", + "LOADING" + ] }, - "endIndex": { - "description": "The end (exclusive) of the span, or not set if unbounded.", - "format": "int32", - "type": "integer" + "message": { + "description": "A message with more information about the error\n(in the spreadsheet's locale).", + "type": "string" } }, - "id": "DimensionRange", - "description": "A range along a single dimension on a sheet.\nAll indexes are zero-based.\nIndexes are half open: the start index is inclusive\nand the end index is exclusive.\nMissing indexes indicate the range is unbounded on that side.", - "type": "object" + "id": "ErrorValue" }, - "CutPasteRequest": { - "description": "Moves data from the source to the destination.", + "CopySheetToAnotherSpreadsheetRequest": { + "description": "The request to copy a sheet across spreadsheets.", "type": "object", "properties": { - "source": { - "$ref": "GridRange", - "description": "The source data to cut." - }, - "pasteType": { - "enum": [ - "PASTE_NORMAL", - "PASTE_VALUES", - "PASTE_FORMAT", - "PASTE_NO_BORDERS", - "PASTE_FORMULA", - "PASTE_DATA_VALIDATION", - "PASTE_CONDITIONAL_FORMATTING" - ], - "description": "What kind of data to paste. All the source data will be cut, regardless\nof what is pasted.", - "type": "string", - "enumDescriptions": [ - "Paste values, formulas, formats, and merges.", - "Paste the values ONLY without formats, formulas, or merges.", - "Paste the format and data validation only.", - "Like PASTE_NORMAL but without borders.", - "Paste the formulas only.", - "Paste the data validation only.", - "Paste the conditional formatting rules only." - ] - }, - "destination": { - "description": "The top-left coordinate where the data should be pasted.", - "$ref": "GridCoordinate" + "destinationSpreadsheetId": { + "description": "The ID of the spreadsheet to copy the sheet to.", + "type": "string" } }, - "id": "CutPasteRequest" + "id": "CopySheetToAnotherSpreadsheetRequest" }, - "Borders": { - "description": "The borders of the cell.", - "type": "object", + "PivotGroupSortValueBucket": { "properties": { - "right": { - "$ref": "Border", - "description": "The right border of the cell." - }, - "bottom": { - "description": "The bottom border of the cell.", - "$ref": "Border" - }, - "top": { - "$ref": "Border", - "description": "The top border of the cell." + "buckets": { + "description": "Determines the bucket from which values are chosen to sort.\n\nFor example, in a pivot table with one row group & two column groups,\nthe row group can list up to two values. The first value corresponds\nto a value within the first column group, and the second value\ncorresponds to a value in the second column group. If no values\nare listed, this would indicate that the row should be sorted according\nto the \"Grand Total\" over the column groups. If a single value is listed,\nthis would correspond to using the \"Total\" of that bucket.", + "type": "array", + "items": { + "$ref": "ExtendedValue" + } }, - "left": { - "$ref": "Border", - "description": "The left border of the cell." + "valuesIndex": { + "description": "The offset in the PivotTable.values list which the values in this\ngrouping should be sorted by.", + "format": "int32", + "type": "integer" } }, - "id": "Borders" + "id": "PivotGroupSortValueBucket", + "description": "Information about which values in a pivot group should be used for sorting.", + "type": "object" }, - "BasicChartSeries": { - "description": "A single series of data in a chart.\nFor example, if charting stock prices over time, multiple series may exist,\none for the \"Open Price\", \"High Price\", \"Low Price\" and \"Close Price\".", + "EmbeddedObjectPosition": { + "description": "The position of an embedded object such as a chart.", "type": "object", "properties": { - "series": { - "description": "The data being visualized in this chart series.", - "$ref": "ChartData" + "newSheet": { + "description": "If true, the embedded object will be put on a new sheet whose ID\nis chosen for you. Used only when writing.", + "type": "boolean" }, - "type": { - "enumDescriptions": [ - "Default value, do not use.", - "A \u003ca href=\"/chart/interactive/docs/gallery/barchart\"\u003ebar chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/linechart\"\u003eline chart\u003c/a\u003e.", - "An \u003ca href=\"/chart/interactive/docs/gallery/areachart\"\u003earea chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/columnchart\"\u003ecolumn chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/scatterchart\"\u003escatter chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/combochart\"\u003ecombo chart\u003c/a\u003e." - ], - "enum": [ - "BASIC_CHART_TYPE_UNSPECIFIED", - "BAR", - "LINE", - "AREA", - "COLUMN", - "SCATTER", - "COMBO" - ], - "description": "The type of this series. Valid only if the\nchartType is\nCOMBO.\nDifferent types will change the way the series is visualized.\nOnly LINE, AREA,\nand COLUMN are supported.", - "type": "string" + "sheetId": { + "description": "The sheet this is on. Set only if the embedded object\nis on its own sheet. Must be non-negative.", + "format": "int32", + "type": "integer" }, - "targetAxis": { - "enumDescriptions": [ - "Default value, do not use.", - "The axis rendered at the bottom of a chart.\nFor most charts, this is the standard major axis.\nFor bar charts, this is a minor axis.", - "The axis rendered at the left of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is the standard major axis.", - "The axis rendered at the right of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is an unusual major axis." - ], - "enum": [ - "BASIC_CHART_AXIS_POSITION_UNSPECIFIED", - "BOTTOM_AXIS", - "LEFT_AXIS", - "RIGHT_AXIS" - ], - "description": "The minor axis that will specify the range of values for this series.\nFor example, if charting stocks over time, the \"Volume\" series\nmay want to be pinned to the right with the prices pinned to the left,\nbecause the scale of trading volume is different than the scale of\nprices.\nIt is an error to specify an axis that isn't a valid minor axis\nfor the chart's type.", - "type": "string" + "overlayPosition": { + "$ref": "OverlayPosition", + "description": "The position at which the object is overlaid on top of a grid." } }, - "id": "BasicChartSeries" + "id": "EmbeddedObjectPosition" }, - "AutoResizeDimensionsRequest": { + "DeleteProtectedRangeRequest": { + "description": "Deletes the protected range with the given ID.", + "type": "object", "properties": { - "dimensions": { - "description": "The dimensions to automatically resize.\nOnly COLUMNS are supported.", - "$ref": "DimensionRange" + "protectedRangeId": { + "description": "The ID of the protected range to delete.", + "format": "int32", + "type": "integer" } }, - "id": "AutoResizeDimensionsRequest", - "description": "Automatically resizes one or more dimensions based on the contents\nof the cells in that dimension.", - "type": "object" + "id": "DeleteProtectedRangeRequest" }, - "UpdateBordersRequest": { + "AutoFillRequest": { + "description": "Fills in more data based on existing data.", + "type": "object", "properties": { - "bottom": { - "$ref": "Border", - "description": "The border to put at the bottom of the range." - }, - "innerVertical": { - "description": "The vertical border to put within the range.", - "$ref": "Border" + "useAlternateSeries": { + "description": "True if we should generate data with the \"alternate\" series.\nThis differs based on the type and amount of source data.", + "type": "boolean" }, - "right": { - "$ref": "Border", - "description": "The border to put at the right of the range." + "sourceAndDestination": { + "$ref": "SourceAndDestination", + "description": "The source and destination areas to autofill.\nThis explicitly lists the source of the autofill and where to\nextend that data." }, "range": { "$ref": "GridRange", - "description": "The range whose borders should be updated." - }, - "innerHorizontal": { - "description": "The horizontal border to put within the range.", - "$ref": "Border" - }, - "top": { - "$ref": "Border", - "description": "The border to put at the top of the range." - }, - "left": { - "description": "The border to put at the left of the range.", - "$ref": "Border" + "description": "The range to autofill. This will examine the range and detect\nthe location that has data and automatically fill that data\nin to the rest of the range." } }, - "id": "UpdateBordersRequest", - "description": "Updates the borders of a range.\nIf a field is not set in the request, that means the border remains as-is.\nFor example, with two subsequent UpdateBordersRequest:\n\n 1. range: A1:A5 `{ top: RED, bottom: WHITE }`\n 2. range: A1:A5 `{ left: BLUE }`\n\nThat would result in A1:A5 having a borders of\n`{ top: RED, bottom: WHITE, left: BLUE }`.\nIf you want to clear a border, explicitly set the style to\nNONE.", - "type": "object" + "id": "AutoFillRequest" }, - "CellFormat": { + "GradientRule": { + "id": "GradientRule", + "description": "A rule that applies a gradient color scale format, based on\nthe interpolation points listed. The format of a cell will vary\nbased on its contents as compared to the values of the interpolation\npoints.", + "type": "object", "properties": { - "wrapStrategy": { - "enumDescriptions": [ - "The default value, do not use.", - "Lines that are longer than the cell width will be written in the next\ncell over, so long as that cell is empty. If the next cell over is\nnon-empty, this behaves the same as CLIP. The text will never wrap\nto the next line unless the user manually inserts a new line.\nExample:\n\n | First sentence. |\n | Manual newline that is very long. \u003c- Text continues into next cell\n | Next newline. |", - "This wrap strategy represents the old Google Sheets wrap strategy where\nwords that are longer than a line are clipped rather than broken. This\nstrategy is not supported on all platforms and is being phased out.\nExample:\n\n | Cell has a |\n | loooooooooo| \u003c- Word is clipped.\n | word. |", - "Lines that are longer than the cell width will be clipped.\nThe text will never wrap to the next line unless the user manually\ninserts a new line.\nExample:\n\n | First sentence. |\n | Manual newline t| \u003c- Text is clipped\n | Next newline. |", - "Words that are longer than a line are wrapped at the character level\nrather than clipped.\nExample:\n\n | Cell has a |\n | loooooooooo| \u003c- Word is broken.\n | ong word. |" - ], - "enum": [ - "WRAP_STRATEGY_UNSPECIFIED", - "OVERFLOW_CELL", - "LEGACY_WRAP", - "CLIP", - "WRAP" - ], - "description": "The wrap strategy for the value in the cell.", - "type": "string" - }, - "numberFormat": { - "$ref": "NumberFormat", - "description": "A format describing how number values should be represented to the user." - }, - "hyperlinkDisplayType": { - "enum": [ - "HYPERLINK_DISPLAY_TYPE_UNSPECIFIED", - "LINKED", - "PLAIN_TEXT" - ], - "description": "How a hyperlink, if it exists, should be displayed in the cell.", - "type": "string", - "enumDescriptions": [ - "The default value: the hyperlink is rendered. Do not use this.", - "A hyperlink should be explicitly rendered.", - "A hyperlink should not be rendered." - ] - }, - "horizontalAlignment": { - "enumDescriptions": [ - "The horizontal alignment is not specified. Do not use this.", - "The text is explicitly aligned to the left of the cell.", - "The text is explicitly aligned to the center of the cell.", - "The text is explicitly aligned to the right of the cell." - ], - "enum": [ - "HORIZONTAL_ALIGN_UNSPECIFIED", - "LEFT", - "CENTER", - "RIGHT" - ], - "description": "The horizontal alignment of the value in the cell.", - "type": "string" - }, - "textFormat": { - "$ref": "TextFormat", - "description": "The format of the text in the cell (unless overridden by a format run)." - }, - "backgroundColor": { - "$ref": "Color", - "description": "The background color of the cell." - }, - "verticalAlignment": { - "enumDescriptions": [ - "The vertical alignment is not specified. Do not use this.", - "The text is explicitly aligned to the top of the cell.", - "The text is explicitly aligned to the middle of the cell.", - "The text is explicitly aligned to the bottom of the cell." - ], - "enum": [ - "VERTICAL_ALIGN_UNSPECIFIED", - "TOP", - "MIDDLE", - "BOTTOM" - ], - "description": "The vertical alignment of the value in the cell.", - "type": "string" + "midpoint": { + "$ref": "InterpolationPoint", + "description": "An optional midway interpolation point." }, - "padding": { - "description": "The padding of the cell.", - "$ref": "Padding" + "minpoint": { + "$ref": "InterpolationPoint", + "description": "The starting interpolation point." }, - "borders": { - "description": "The borders of the cell.", - "$ref": "Borders" + "maxpoint": { + "$ref": "InterpolationPoint", + "description": "The final interpolation point." + } + } + }, + "SetBasicFilterRequest": { + "id": "SetBasicFilterRequest", + "description": "Sets the basic filter associated with a sheet.", + "type": "object", + "properties": { + "filter": { + "$ref": "BasicFilter", + "description": "The filter to set." + } + } + }, + "ClearValuesRequest": { + "type": "object", + "properties": {}, + "id": "ClearValuesRequest", + "description": "The request for clearing a range of values in a spreadsheet." + }, + "InterpolationPoint": { + "description": "A single interpolation point on a gradient conditional format.\nThese pin the gradient color scale according to the color,\ntype and value chosen.", + "type": "object", + "properties": { + "color": { + "$ref": "Color", + "description": "The color this interpolation point should use." }, - "textDirection": { + "type": { "enum": [ - "TEXT_DIRECTION_UNSPECIFIED", - "LEFT_TO_RIGHT", - "RIGHT_TO_LEFT" + "INTERPOLATION_POINT_TYPE_UNSPECIFIED", + "MIN", + "MAX", + "NUMBER", + "PERCENT", + "PERCENTILE" ], - "description": "The direction of the text in the cell.", + "description": "How the value should be interpreted.", "type": "string", "enumDescriptions": [ - "The text direction is not specified. Do not use this.", - "The text direction of left-to-right was set by the user.", - "The text direction of right-to-left was set by the user." + "The default value, do not use.", + "The interpolation point will use the minimum value in the\ncells over the range of the conditional format.", + "The interpolation point will use the maximum value in the\ncells over the range of the conditional format.", + "The interpolation point will use exactly the value in\nInterpolationPoint.value.", + "The interpolation point will be the given percentage over\nall the cells in the range of the conditional format.\nThis is equivalent to NUMBER if the value was:\n`=(MAX(FLATTEN(range)) * (value / 100))\n + (MIN(FLATTEN(range)) * (1 - (value / 100)))`\n(where errors in the range are ignored when flattening).", + "The interpolation point will be the given percentile\nover all the cells in the range of the conditional format.\nThis is equivalent to NUMBER if the value was:\n`=PERCENTILE(FLATTEN(range), value / 100)`\n(where errors in the range are ignored when flattening)." ] - } - }, - "id": "CellFormat", - "description": "The format of a cell.", - "type": "object" - }, - "ClearValuesResponse": { - "properties": { - "spreadsheetId": { - "description": "The spreadsheet the updates were applied to.", - "type": "string" }, - "clearedRange": { - "description": "The range (in A1 notation) that was cleared.\n(If the request was for an unbounded range or a ranger larger\n than the bounds of the sheet, this will be the actual range\n that was cleared, bounded to the sheet's limits.)", + "value": { + "description": "The value this interpolation point uses. May be a formula.\nUnused if type is MIN or\nMAX.", "type": "string" } }, - "id": "ClearValuesResponse", - "description": "The response when clearing a range of values in a spreadsheet.", - "type": "object" + "id": "InterpolationPoint" }, - "DeleteConditionalFormatRuleRequest": { + "FindReplaceResponse": { + "description": "The result of the find/replace.", + "type": "object", "properties": { - "index": { - "description": "The zero-based index of the rule to be deleted.", + "formulasChanged": { + "description": "The number of formula cells changed.", "format": "int32", "type": "integer" }, - "sheetId": { - "description": "The sheet the rule is being deleted from.", + "valuesChanged": { + "type": "integer", + "description": "The number of non-formula cells changed.", + "format": "int32" + }, + "occurrencesChanged": { + "description": "The number of occurrences (possibly multiple within a cell) changed.\nFor example, if replacing `\"e\"` with `\"o\"` in `\"Google Sheets\"`, this would\nbe `\"3\"` because `\"Google Sheets\"` -\u003e `\"Googlo Shoots\"`.", + "format": "int32", + "type": "integer" + }, + "rowsChanged": { + "description": "The number of rows changed.", + "format": "int32", + "type": "integer" + }, + "sheetsChanged": { + "description": "The number of sheets changed.", "format": "int32", "type": "integer" } }, - "id": "DeleteConditionalFormatRuleRequest", - "description": "Deletes a conditional format rule at the given index.\nAll subsequent rules' indexes are decremented.", - "type": "object" + "id": "FindReplaceResponse" }, - "DeleteNamedRangeRequest": { - "description": "Removes the named range with the given ID from the spreadsheet.", + "DeleteEmbeddedObjectRequest": { + "id": "DeleteEmbeddedObjectRequest", + "description": "Deletes the embedded object with the given ID.", "type": "object", "properties": { - "namedRangeId": { - "description": "The ID of the named range to delete.", - "type": "string" + "objectId": { + "description": "The ID of the embedded object to delete.", + "format": "int32", + "type": "integer" } - }, - "id": "DeleteNamedRangeRequest" + } }, - "AddBandingResponse": { + "DuplicateFilterViewRequest": { + "description": "Duplicates a particular filter view.", + "type": "object", "properties": { - "bandedRange": { - "$ref": "BandedRange", - "description": "The banded range that was added." + "filterId": { + "description": "The ID of the filter being duplicated.", + "format": "int32", + "type": "integer" } }, - "id": "AddBandingResponse", - "description": "The result of adding a banded range.", - "type": "object" + "id": "DuplicateFilterViewRequest" }, - "ChartData": { + "DeleteSheetRequest": { + "description": "Deletes the requested sheet.", + "type": "object", "properties": { - "sourceRange": { - "description": "The source ranges of the data.", - "$ref": "ChartSourceRange" + "sheetId": { + "description": "The ID of the sheet to delete.", + "format": "int32", + "type": "integer" } }, - "id": "ChartData", - "description": "The data included in a domain or series.", - "type": "object" + "id": "DeleteSheetRequest" }, - "BatchGetValuesResponse": { + "UpdateConditionalFormatRuleResponse": { + "description": "The result of updating a conditional format rule.", + "type": "object", "properties": { - "valueRanges": { - "description": "The requested values. The order of the ValueRanges is the same as the\norder of the requested ranges.", - "type": "array", - "items": { - "$ref": "ValueRange" - } + "oldRule": { + "description": "The old (deleted) rule. Not set if a rule was moved\n(because it is the same as new_rule).", + "$ref": "ConditionalFormatRule" }, - "spreadsheetId": { - "description": "The ID of the spreadsheet the data was retrieved from.", - "type": "string" + "newIndex": { + "description": "The index of the new rule.", + "format": "int32", + "type": "integer" + }, + "oldIndex": { + "description": "The old index of the rule. Not set if a rule was replaced\n(because it is the same as new_index).", + "format": "int32", + "type": "integer" + }, + "newRule": { + "$ref": "ConditionalFormatRule", + "description": "The new rule that replaced the old rule (if replacing),\nor the rule that was moved (if moved)" } }, - "id": "BatchGetValuesResponse", - "description": "The response when retrieving more than one range of values in a spreadsheet.", - "type": "object" + "id": "UpdateConditionalFormatRuleResponse" }, - "UpdateBandingRequest": { + "DuplicateSheetRequest": { + "type": "object", "properties": { - "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root `bandedRange` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", + "newSheetName": { + "description": "The name of the new sheet. If empty, a new name is chosen for you.", "type": "string" }, - "bandedRange": { - "description": "The banded range to update with the new properties.", - "$ref": "BandedRange" + "sourceSheetId": { + "type": "integer", + "description": "The sheet to duplicate.", + "format": "int32" + }, + "newSheetId": { + "description": "If set, the ID of the new sheet. If not set, an ID is chosen.\nIf set, the ID must not conflict with any existing sheet ID.\nIf set, it must be non-negative.", + "format": "int32", + "type": "integer" + }, + "insertSheetIndex": { + "description": "The zero-based index where the new sheet should be inserted.\nThe index of all sheets after this are incremented.", + "format": "int32", + "type": "integer" } }, - "id": "UpdateBandingRequest", - "description": "Updates properties of the supplied banded range.", - "type": "object" + "id": "DuplicateSheetRequest", + "description": "Duplicates the contents of a sheet." }, - "Color": { + "ConditionValue": { + "id": "ConditionValue", + "description": "The value of the condition.", + "type": "object", "properties": { - "red": { - "description": "The amount of red in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - }, - "green": { - "description": "The amount of green in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - }, - "blue": { - "description": "The amount of blue in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" + "relativeDate": { + "type": "string", + "enumDescriptions": [ + "Default value, do not use.", + "The value is one year before today.", + "The value is one month before today.", + "The value is one week before today.", + "The value is yesterday.", + "The value is today.", + "The value is tomorrow." + ], + "enum": [ + "RELATIVE_DATE_UNSPECIFIED", + "PAST_YEAR", + "PAST_MONTH", + "PAST_WEEK", + "YESTERDAY", + "TODAY", + "TOMORROW" + ], + "description": "A relative date (based on the current date).\nValid only if the type is\nDATE_BEFORE,\nDATE_AFTER,\nDATE_ON_OR_BEFORE or\nDATE_ON_OR_AFTER.\n\nRelative dates are not supported in data validation.\nThey are supported only in conditional formatting and\nconditional filters." }, - "alpha": { - "description": "The fraction of this color that should be applied to the pixel. That is,\nthe final pixel color is defined by the equation:\n\n pixel color = alpha * (this color) + (1.0 - alpha) * (background color)\n\nThis means that a value of 1.0 corresponds to a solid color, whereas\na value of 0.0 corresponds to a completely transparent color. This\nuses a wrapper message rather than a simple float scalar so that it is\npossible to distinguish between a default value and the value being unset.\nIf omitted, this color object is to be rendered as a solid color\n(as if the alpha value had been explicitly given with a value of 1.0).", - "format": "float", - "type": "number" + "userEnteredValue": { + "description": "A value the condition is based on.\nThe value will be parsed as if the user typed into a cell.\nFormulas are supported (and must begin with an `=`).", + "type": "string" } - }, - "id": "Color", - "description": "Represents a color in the RGBA color space. This representation is designed\nfor simplicity of conversion to/from color representations in various\nlanguages over compactness; for example, the fields of this representation\ncan be trivially provided to the constructor of \"java.awt.Color\" in Java; it\ncan also be trivially provided to UIColor's \"+colorWithRed:green:blue:alpha\"\nmethod in iOS; and, with just a little work, it can be easily formatted into\na CSS \"rgba()\" string in JavaScript, as well. Here are some examples:\n\nExample (Java):\n\n import com.google.type.Color;\n\n // ...\n public static java.awt.Color fromProto(Color protocolor) {\n float alpha = protocolor.hasAlpha()\n ? protocolor.getAlpha().getValue()\n : 1.0;\n\n return new java.awt.Color(\n protocolor.getRed(),\n protocolor.getGreen(),\n protocolor.getBlue(),\n alpha);\n }\n\n public static Color toProto(java.awt.Color color) {\n float red = (float) color.getRed();\n float green = (float) color.getGreen();\n float blue = (float) color.getBlue();\n float denominator = 255.0;\n Color.Builder resultBuilder =\n Color\n .newBuilder()\n .setRed(red / denominator)\n .setGreen(green / denominator)\n .setBlue(blue / denominator);\n int alpha = color.getAlpha();\n if (alpha != 255) {\n result.setAlpha(\n FloatValue\n .newBuilder()\n .setValue(((float) alpha) / denominator)\n .build());\n }\n return resultBuilder.build();\n }\n // ...\n\nExample (iOS / Obj-C):\n\n // ...\n static UIColor* fromProto(Color* protocolor) {\n float red = [protocolor red];\n float green = [protocolor green];\n float blue = [protocolor blue];\n FloatValue* alpha_wrapper = [protocolor alpha];\n float alpha = 1.0;\n if (alpha_wrapper != nil) {\n alpha = [alpha_wrapper value];\n }\n return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];\n }\n\n static Color* toProto(UIColor* color) {\n CGFloat red, green, blue, alpha;\n if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {\n return nil;\n }\n Color* result = [Color alloc] init];\n [result setRed:red];\n [result setGreen:green];\n [result setBlue:blue];\n if (alpha \u003c= 0.9999) {\n [result setAlpha:floatWrapperWithValue(alpha)];\n }\n [result autorelease];\n return result;\n }\n // ...\n\n Example (JavaScript):\n\n // ...\n\n var protoToCssColor = function(rgb_color) {\n var redFrac = rgb_color.red || 0.0;\n var greenFrac = rgb_color.green || 0.0;\n var blueFrac = rgb_color.blue || 0.0;\n var red = Math.floor(redFrac * 255);\n var green = Math.floor(greenFrac * 255);\n var blue = Math.floor(blueFrac * 255);\n\n if (!('alpha' in rgb_color)) {\n return rgbToCssColor_(red, green, blue);\n }\n\n var alphaFrac = rgb_color.alpha.value || 0.0;\n var rgbParams = [red, green, blue].join(',');\n return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');\n };\n\n var rgbToCssColor_ = function(red, green, blue) {\n var rgbNumber = new Number((red \u003c\u003c 16) | (green \u003c\u003c 8) | blue);\n var hexString = rgbNumber.toString(16);\n var missingZeros = 6 - hexString.length;\n var resultBuilder = ['#'];\n for (var i = 0; i \u003c missingZeros; i++) {\n resultBuilder.push('0');\n }\n resultBuilder.push(hexString);\n return resultBuilder.join('');\n };\n\n // ...", - "type": "object" + } }, - "PivotGroup": { + "ExtendedValue": { + "type": "object", "properties": { - "sourceColumnOffset": { - "description": "The column offset of the source range that this grouping is based on.\n\nFor example, if the source was `C10:E15`, a `sourceColumnOffset` of `0`\nmeans this group refers to column `C`, whereas the offset `1` would refer\nto column `D`.", - "format": "int32", - "type": "integer" + "stringValue": { + "description": "Represents a string value.\nLeading single quotes are not included. For example, if the user typed\n`'123` into the UI, this would be represented as a `stringValue` of\n`\"123\"`.", + "type": "string" }, - "showTotals": { - "description": "True if the pivot table should include the totals for this grouping.", + "boolValue": { + "description": "Represents a boolean value.", "type": "boolean" }, - "valueMetadata": { - "description": "Metadata about values in the grouping.", - "type": "array", - "items": { - "$ref": "PivotGroupValueMetadata" - } + "formulaValue": { + "description": "Represents a formula.", + "type": "string" }, - "sortOrder": { - "enum": [ - "SORT_ORDER_UNSPECIFIED", - "ASCENDING", - "DESCENDING" - ], - "description": "The order the values in this group should be sorted.", - "type": "string", - "enumDescriptions": [ - "Default value, do not use this.", - "Sort ascending.", - "Sort descending." - ] + "numberValue": { + "description": "Represents a double value.\nNote: Dates, Times and DateTimes are represented as doubles in\n\"serial number\" format.", + "format": "double", + "type": "number" }, - "valueBucket": { - "$ref": "PivotGroupSortValueBucket", - "description": "The bucket of the opposite pivot group to sort by.\nIf not specified, sorting is alphabetical by this group's values." + "errorValue": { + "description": "Represents an error.\nThis field is read-only.", + "$ref": "ErrorValue" } }, - "id": "PivotGroup", - "description": "A single grouping (either row or column) in a pivot table.", - "type": "object" + "id": "ExtendedValue", + "description": "The kinds of value that a cell in a spreadsheet can have." }, - "PivotTable": { + "BandedRange": { + "description": "A banded (alternating colors) range in a sheet.", + "type": "object", "properties": { - "rows": { - "description": "Each row grouping in the pivot table.", - "type": "array", - "items": { - "$ref": "PivotGroup" - } - }, - "valueLayout": { - "enum": [ - "HORIZONTAL", - "VERTICAL" - ], - "description": "Whether values should be listed horizontally (as columns)\nor vertically (as rows).", - "type": "string", - "enumDescriptions": [ - "Values are laid out horizontally (as columns).", - "Values are laid out vertically (as rows)." - ] - }, - "columns": { - "description": "Each column grouping in the pivot table.", - "type": "array", - "items": { - "$ref": "PivotGroup" - } + "range": { + "$ref": "GridRange", + "description": "The range over which these properties are applied." }, - "values": { - "description": "A list of values to include in the pivot table.", - "type": "array", - "items": { - "$ref": "PivotValue" - } + "bandedRangeId": { + "description": "The id of the banded range.", + "format": "int32", + "type": "integer" }, - "source": { - "$ref": "GridRange", - "description": "The range the pivot table is reading data from." + "rowProperties": { + "description": "Properties for row bands. These properties will be applied on a row-by-row\nbasis throughout all the rows in the range. At least one of\nrow_properties or column_properties must be specified.", + "$ref": "BandingProperties" }, - "criteria": { - "additionalProperties": { - "$ref": "PivotFilterCriteria" - }, - "description": "An optional mapping of filters per source column offset.\n\nThe filters will be applied before aggregating data into the pivot table.\nThe map's key is the column offset of the source range that you want to\nfilter, and the value is the criteria for that column.\n\nFor example, if the source was `C10:E15`, a key of `0` will have the filter\nfor column `C`, whereas the key `1` is for column `D`.", - "type": "object" + "columnProperties": { + "$ref": "BandingProperties", + "description": "Properties for column bands. These properties will be applied on a column-\nby-column basis throughout all the columns in the range. At least one of\nrow_properties or column_properties must be specified." } }, - "id": "PivotTable", - "description": "A pivot table.", - "type": "object" + "id": "BandedRange" }, - "ChartSourceRange": { + "BatchClearValuesResponse": { + "description": "The response when updating a range of values in a spreadsheet.", + "type": "object", "properties": { - "sources": { - "description": "The ranges of data for a series or domain.\nExactly one dimension must have a length of 1,\nand all sources in the list must have the same dimension\nwith length 1.\nThe domain (if it exists) & all series must have the same number\nof source ranges. If using more than one source range, then the source\nrange at a given offset must be contiguous across the domain and series.\n\nFor example, these are valid configurations:\n\n domain sources: A1:A5\n series1 sources: B1:B5\n series2 sources: D6:D10\n\n domain sources: A1:A5, C10:C12\n series1 sources: B1:B5, D10:D12\n series2 sources: C1:C5, E10:E12", + "spreadsheetId": { + "type": "string", + "description": "The spreadsheet the updates were applied to." + }, + "clearedRanges": { + "description": "The ranges that were cleared, in A1 notation.\n(If the requests were for an unbounded range or a ranger larger\n than the bounds of the sheet, this will be the actual ranges\n that were cleared, bounded to the sheet's limits.)", "type": "array", "items": { - "$ref": "GridRange" + "type": "string" } } }, - "id": "ChartSourceRange", - "description": "Source ranges for a chart.", - "type": "object" + "id": "BatchClearValuesResponse" }, - "ValueRange": { + "Spreadsheet": { + "description": "Resource that represents a spreadsheet.", + "type": "object", "properties": { - "range": { - "description": "The range the values cover, in A1 notation.\nFor output, this range indicates the entire requested range,\neven though the values will exclude trailing rows and columns.\nWhen appending values, this field represents the range to search for a\ntable, after which values will be appended.", - "type": "string" + "properties": { + "$ref": "SpreadsheetProperties", + "description": "Overall properties of a spreadsheet." }, - "majorDimension": { - "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" - ], - "description": "The major dimension of the values.\n\nFor output, if the spreadsheet data is: `A1=1,B1=2,A2=3,B2=4`,\nthen requesting `range=A1:B2,majorDimension=ROWS` will return\n`[[1,2],[3,4]]`,\nwhereas requesting `range=A1:B2,majorDimension=COLUMNS` will return\n`[[1,3],[2,4]]`.\n\nFor input, with `range=A1:B2,majorDimension=ROWS` then `[[1,2],[3,4]]`\nwill set `A1=1,B1=2,A2=3,B2=4`. With `range=A1:B2,majorDimension=COLUMNS`\nthen `[[1,2],[3,4]]` will set `A1=1,B1=3,A2=2,B2=4`.\n\nWhen writing, if this field is not set, it defaults to ROWS.", - "type": "string", - "enumDescriptions": [ - "The default value, do not use.", - "Operates on the rows of a sheet.", - "Operates on the columns of a sheet." - ] + "spreadsheetId": { + "description": "The ID of the spreadsheet.\nThis field is read-only.", + "type": "string" }, - "values": { - "description": "The data that was read or to be written. This is an array of arrays,\nthe outer array representing all the data and each inner array\nrepresenting a major dimension. Each item in the inner array\ncorresponds with one cell.\n\nFor output, empty trailing rows and columns will not be included.\n\nFor input, supported value types are: bool, string, and double.\nNull values will be skipped.\nTo set a cell to an empty value, set the string value to an empty string.", + "sheets": { + "description": "The sheets that are part of a spreadsheet.", "type": "array", "items": { - "type": "array", - "items": { - "type": "any" - } + "$ref": "Sheet" } - } - }, - "id": "ValueRange", - "description": "Data within a range of the spreadsheet.", - "type": "object" - }, - "AppendCellsRequest": { - "properties": { - "rows": { - "description": "The data to append.", + }, + "namedRanges": { + "description": "The named ranges defined in a spreadsheet.", "type": "array", "items": { - "$ref": "RowData" + "$ref": "NamedRange" } }, - "fields": { - "description": "The fields of CellData that should be updated.\nAt least one field must be specified.\nThe root is the CellData; 'row.values.' should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", - "type": "string" - }, - "sheetId": { - "description": "The sheet ID to append the data to.", - "format": "int32", - "type": "integer" + "spreadsheetUrl": { + "type": "string", + "description": "The url of the spreadsheet.\nThis field is read-only." } }, - "id": "AppendCellsRequest", - "description": "Adds new cells after the last row with data in a sheet,\ninserting new rows into the sheet if necessary.", - "type": "object" + "id": "Spreadsheet" }, - "AddBandingRequest": { - "description": "Adds a new banded range to the spreadsheet.", + "AddChartRequest": { + "description": "Adds a chart to a sheet in the spreadsheet.", "type": "object", "properties": { - "bandedRange": { - "$ref": "BandedRange", - "description": "The banded range to add. The bandedRangeId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a range that already exists.)" + "chart": { + "description": "The chart that should be added to the spreadsheet, including the position\nwhere it should be placed. The chartId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a chart that already exists.)", + "$ref": "EmbeddedChart" } }, - "id": "AddBandingRequest" + "id": "AddChartRequest" }, - "Response": { - "description": "A single response from an update.", + "UpdateProtectedRangeRequest": { + "description": "Updates an existing protected range with the specified\nprotectedRangeId.", "type": "object", "properties": { - "addFilterView": { - "description": "A reply from adding a filter view.", - "$ref": "AddFilterViewResponse" - }, - "addBanding": { - "$ref": "AddBandingResponse", - "description": "A reply from adding a banded range." - }, - "addProtectedRange": { - "description": "A reply from adding a protected range.", - "$ref": "AddProtectedRangeResponse" - }, - "duplicateSheet": { - "$ref": "DuplicateSheetResponse", - "description": "A reply from duplicating a sheet." - }, - "deleteConditionalFormatRule": { - "description": "A reply from deleting a conditional format rule.", - "$ref": "DeleteConditionalFormatRuleResponse" + "protectedRange": { + "description": "The protected range to update with the new properties.", + "$ref": "ProtectedRange" }, - "updateEmbeddedObjectPosition": { - "description": "A reply from updating an embedded object's position.", - "$ref": "UpdateEmbeddedObjectPositionResponse" + "fields": { + "description": "The fields that should be updated. At least one field must be specified.\nThe root `protectedRange` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", + "type": "string" + } + }, + "id": "UpdateProtectedRangeRequest" + }, + "TextFormat": { + "description": "The format of a run of text in a cell.\nAbsent values indicate that the field isn't specified.", + "type": "object", + "properties": { + "underline": { + "description": "True if the text is underlined.", + "type": "boolean" }, - "duplicateFilterView": { - "$ref": "DuplicateFilterViewResponse", - "description": "A reply from duplicating a filter view." + "foregroundColor": { + "$ref": "Color", + "description": "The foreground color of the text." }, - "addChart": { - "description": "A reply from adding a chart.", - "$ref": "AddChartResponse" + "bold": { + "description": "True if the text is bold.", + "type": "boolean" }, - "findReplace": { - "description": "A reply from doing a find/replace.", - "$ref": "FindReplaceResponse" + "fontFamily": { + "description": "The font family.", + "type": "string" }, - "addSheet": { - "description": "A reply from adding a sheet.", - "$ref": "AddSheetResponse" + "italic": { + "description": "True if the text is italicized.", + "type": "boolean" }, - "updateConditionalFormatRule": { - "$ref": "UpdateConditionalFormatRuleResponse", - "description": "A reply from updating a conditional format rule." + "strikethrough": { + "description": "True if the text has a strikethrough.", + "type": "boolean" }, - "addNamedRange": { - "$ref": "AddNamedRangeResponse", - "description": "A reply from adding a named range." + "fontSize": { + "description": "The size of the font.", + "format": "int32", + "type": "integer" + } + }, + "id": "TextFormat" + }, + "AddSheetResponse": { + "id": "AddSheetResponse", + "description": "The result of adding a sheet.", + "type": "object", + "properties": { + "properties": { + "$ref": "SheetProperties", + "description": "The properties of the newly added sheet." + } + } + }, + "AddFilterViewResponse": { + "properties": { + "filter": { + "$ref": "FilterView", + "description": "The newly added filter view." } }, - "id": "Response" + "id": "AddFilterViewResponse", + "description": "The result of adding a filter view.", + "type": "object" }, - "TextFormatRun": { + "IterativeCalculationSettings": { + "description": "Settings to control how circular dependencies are resolved with iterative\ncalculation.", + "type": "object", "properties": { - "startIndex": { - "description": "The character index where this run starts.", + "convergenceThreshold": { + "description": "When iterative calculation is enabled, the threshold value such that\ncalculation rounds stop when succesive results differ by less.", + "format": "double", + "type": "number" + }, + "maxIterations": { + "description": "When iterative calculation is enabled, the maximum number of calculation\nrounds to perform during iterative calculation.", "format": "int32", "type": "integer" - }, - "format": { - "description": "The format of this run. Absent values inherit the cell's format.", - "$ref": "TextFormat" } }, - "id": "TextFormatRun", - "description": "A run of a text format. The format of this run continues until the start\nindex of the next run.\nWhen updating, all fields must be set.", - "type": "object" + "id": "IterativeCalculationSettings" }, - "EmbeddedChart": { + "OverlayPosition": { + "description": "The location an object is overlaid on top of a grid.", + "type": "object", "properties": { - "spec": { - "description": "The specification of the chart.", - "$ref": "ChartSpec" + "widthPixels": { + "description": "The width of the object, in pixels. Defaults to 600.", + "format": "int32", + "type": "integer" }, - "chartId": { - "description": "The ID of the chart.", + "offsetXPixels": { + "description": "The horizontal offset, in pixels, that the object is offset\nfrom the anchor cell.", "format": "int32", "type": "integer" }, - "position": { - "$ref": "EmbeddedObjectPosition", - "description": "The position of the chart." + "anchorCell": { + "$ref": "GridCoordinate", + "description": "The cell the object is anchored to." + }, + "offsetYPixels": { + "description": "The vertical offset, in pixels, that the object is offset\nfrom the anchor cell.", + "format": "int32", + "type": "integer" + }, + "heightPixels": { + "description": "The height of the object, in pixels. Defaults to 371.", + "format": "int32", + "type": "integer" } }, - "id": "EmbeddedChart", - "description": "A chart embedded in a sheet.", - "type": "object" + "id": "OverlayPosition" }, - "InsertRangeRequest": { - "description": "Inserts cells into a range, shifting the existing cells over or down.", + "SpreadsheetProperties": { + "description": "Properties of a spreadsheet.", "type": "object", "properties": { - "shiftDimension": { + "title": { + "description": "The title of the spreadsheet.", + "type": "string" + }, + "timeZone": { + "description": "The time zone of the spreadsheet, in CLDR format such as\n`America/New_York`. If the time zone isn't recognized, this may\nbe a custom time zone such as `GMT-07:00`.", + "type": "string" + }, + "locale": { + "description": "The locale of the spreadsheet in one of the following formats:\n\n* an ISO 639-1 language code such as `en`\n\n* an ISO 639-2 language code such as `fil`, if no 639-1 code exists\n\n* a combination of the ISO language code and country code, such as `en_US`\n\nNote: when updating this field, not all locales/languages are supported.", + "type": "string" + }, + "iterativeCalculationSettings": { + "description": "Determines whether and how circular references are resolved with iterative\ncalculation. Absence of this field means that circular references will\nresult in calculation errors.", + "$ref": "IterativeCalculationSettings" + }, + "autoRecalc": { "enumDescriptions": [ - "The default value, do not use.", - "Operates on the rows of a sheet.", - "Operates on the columns of a sheet." + "Default value. This value must not be used.", + "Volatile functions are updated on every change.", + "Volatile functions are updated on every change and every minute.", + "Volatile functions are updated on every change and hourly." ], "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" + "RECALCULATION_INTERVAL_UNSPECIFIED", + "ON_CHANGE", + "MINUTE", + "HOUR" ], - "description": "The dimension which will be shifted when inserting cells.\nIf ROWS, existing cells will be shifted down.\nIf COLUMNS, existing cells will be shifted right.", + "description": "The amount of time to wait before volatile functions are recalculated.", "type": "string" }, + "defaultFormat": { + "$ref": "CellFormat", + "description": "The default format of all cells in the spreadsheet.\nCellData.effectiveFormat will not be set if the\ncell's format is equal to this default format.\nThis field is read-only." + } + }, + "id": "SpreadsheetProperties" + }, + "RepeatCellRequest": { + "id": "RepeatCellRequest", + "description": "Updates all cells in the range to the values in the given Cell object.\nOnly the fields listed in the fields field are updated; others are\nunchanged.\n\nIf writing a cell with a formula, the formula's ranges will automatically\nincrement for each field in the range.\nFor example, if writing a cell with formula `=A1` into range B2:C4,\nB2 would be `=A1`, B3 would be `=A2`, B4 would be `=A3`,\nC2 would be `=B1`, C3 would be `=B2`, C4 would be `=B3`.\n\nTo keep the formula's ranges static, use the `$` indicator.\nFor example, use the formula `=$A$1` to prevent both the row and the\ncolumn from incrementing.", + "type": "object", + "properties": { "range": { "$ref": "GridRange", - "description": "The range to insert new cells into." + "description": "The range to repeat the cell in." + }, + "fields": { + "description": "The fields that should be updated. At least one field must be specified.\nThe root `cell` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", + "type": "string" + }, + "cell": { + "$ref": "CellData", + "description": "The data to write." } - }, - "id": "InsertRangeRequest" + } }, - "AddNamedRangeResponse": { + "AddChartResponse": { + "type": "object", "properties": { - "namedRange": { - "description": "The named range to add.", - "$ref": "NamedRange" + "chart": { + "description": "The newly added chart.", + "$ref": "EmbeddedChart" } }, - "id": "AddNamedRangeResponse", - "description": "The result of adding a named range.", - "type": "object" + "id": "AddChartResponse", + "description": "The result of adding a chart to a spreadsheet." }, - "RowData": { - "description": "Data about each cell in a row.", + "InsertDimensionRequest": { + "description": "Inserts rows or columns in a sheet at a particular index.", "type": "object", "properties": { - "values": { - "description": "The values in the row, one per column.", - "type": "array", - "items": { - "$ref": "CellData" - } + "inheritFromBefore": { + "description": "Whether dimension properties should be extended from the dimensions\nbefore or after the newly inserted dimensions.\nTrue to inherit from the dimensions before (in which case the start\nindex must be greater than 0), and false to inherit from the dimensions\nafter.\n\nFor example, if row index 0 has red background and row index 1\nhas a green background, then inserting 2 rows at index 1 can inherit\neither the green or red background. If `inheritFromBefore` is true,\nthe two new rows will be red (because the row before the insertion point\nwas red), whereas if `inheritFromBefore` is false, the two new rows will\nbe green (because the row after the insertion point was green).", + "type": "boolean" + }, + "range": { + "$ref": "DimensionRange", + "description": "The dimensions to insert. Both the start and end indexes must be bounded." } }, - "id": "RowData" + "id": "InsertDimensionRequest" }, - "Border": { + "UpdateSpreadsheetPropertiesRequest": { + "description": "Updates properties of a spreadsheet.", + "type": "object", "properties": { - "color": { - "description": "The color of the border.", - "$ref": "Color" - }, - "width": { - "description": "The width of the border, in pixels.\nDeprecated; the width is determined by the \"style\" field.", - "format": "int32", - "type": "integer" + "fields": { + "description": "The fields that should be updated. At least one field must be specified.\nThe root 'properties' is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", + "type": "string" }, - "style": { - "enum": [ - "STYLE_UNSPECIFIED", - "DOTTED", - "DASHED", - "SOLID", - "SOLID_MEDIUM", - "SOLID_THICK", - "NONE", - "DOUBLE" - ], - "description": "The style of the border.", - "type": "string", - "enumDescriptions": [ - "The style is not specified. Do not use this.", - "The border is dotted.", - "The border is dashed.", - "The border is a thin solid line.", - "The border is a medium solid line.", - "The border is a thick solid line.", - "No border.\nUsed only when updating a border in order to erase it.", - "The border is two solid lines." - ] + "properties": { + "$ref": "SpreadsheetProperties", + "description": "The properties to update." } }, - "id": "Border", - "description": "A border along a cell.", - "type": "object" + "id": "UpdateSpreadsheetPropertiesRequest" }, - "GridData": { + "ProtectedRange": { "properties": { - "rowData": { - "description": "The data in the grid, one entry per row,\nstarting with the row in startRow.\nThe values in RowData will correspond to columns starting\nat start_column.", + "unprotectedRanges": { + "description": "The list of unprotected ranges within a protected sheet.\nUnprotected ranges are only supported on protected sheets.", "type": "array", "items": { - "$ref": "RowData" + "$ref": "GridRange" } }, - "startRow": { - "description": "The first row this GridData refers to, zero-based.", - "format": "int32", - "type": "integer" - }, - "columnMetadata": { - "description": "Metadata about the requested columns in the grid, starting with the column\nin start_column.", - "type": "array", - "items": { - "$ref": "DimensionProperties" - } + "namedRangeId": { + "description": "The named range this protected range is backed by, if any.\n\nWhen writing, only one of range or named_range_id\nmay be set.", + "type": "string" }, - "startColumn": { - "description": "The first column this GridData refers to, zero-based.", + "protectedRangeId": { + "description": "The ID of the protected range.\nThis field is read-only.", "format": "int32", "type": "integer" }, - "rowMetadata": { - "description": "Metadata about the requested rows in the grid, starting with the row\nin start_row.", - "type": "array", - "items": { - "$ref": "DimensionProperties" - } - } - }, - "id": "GridData", - "description": "Data in the grid, as well as metadata about the dimensions.", - "type": "object" - }, - "FindReplaceRequest": { - "description": "Finds and replaces data in cells over a range, sheet, or all sheets.", - "type": "object", - "properties": { - "matchEntireCell": { - "description": "True if the find value should match the entire cell.", - "type": "boolean" - }, - "searchByRegex": { - "description": "True if the find value is a regex.\nThe regular expression and replacement should follow Java regex rules\nat https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html.\nThe replacement string is allowed to refer to capturing groups.\nFor example, if one cell has the contents `\"Google Sheets\"` and another\nhas `\"Google Docs\"`, then searching for `\"o.* (.*)\"` with a replacement of\n`\"$1 Rocks\"` would change the contents of the cells to\n`\"GSheets Rocks\"` and `\"GDocs Rocks\"` respectively.", + "warningOnly": { + "description": "True if this protected range will show a warning when editing.\nWarning-based protection means that every user can edit data in the\nprotected range, except editing will prompt a warning asking the user\nto confirm the edit.\n\nWhen writing: if this field is true, then editors is ignored.\nAdditionally, if this field is changed from true to false and the\n`editors` field is not set (nor included in the field mask), then\nthe editors will be set to all the editors in the document.", "type": "boolean" }, - "find": { - "description": "The value to search.", - "type": "string" - }, - "replacement": { - "description": "The value to use as the replacement.", - "type": "string" + "requestingUserCanEdit": { + "type": "boolean", + "description": "True if the user who requested this protected range can edit the\nprotected area.\nThis field is read-only." }, "range": { "$ref": "GridRange", - "description": "The range to find/replace over." - }, - "sheetId": { - "description": "The sheet to find/replace over.", - "format": "int32", - "type": "integer" - }, - "allSheets": { - "description": "True to find/replace over all sheets.", - "type": "boolean" - }, - "matchCase": { - "description": "True if the search is case sensitive.", - "type": "boolean" + "description": "The range that is being protected.\nThe range may be fully unbounded, in which case this is considered\na protected sheet.\n\nWhen writing, only one of range or named_range_id\nmay be set." }, - "includeFormulas": { - "description": "True if the search should include cells with formulas.\nFalse to skip cells with formulas.", - "type": "boolean" - } - }, - "id": "FindReplaceRequest" - }, - "UpdateNamedRangeRequest": { - "description": "Updates properties of the named range with the specified\nnamedRangeId.", - "type": "object", - "properties": { - "namedRange": { - "description": "The named range to update with the new properties.", - "$ref": "NamedRange" + "editors": { + "$ref": "Editors", + "description": "The users and groups with edit access to the protected range.\nThis field is only visible to users with edit access to the protected\nrange and the document.\nEditors are not supported with warning_only protection." }, - "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root `namedRange` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", + "description": { + "description": "The description of this protected range.", "type": "string" } }, - "id": "UpdateNamedRangeRequest" - }, - "AddSheetRequest": { - "properties": { - "properties": { - "$ref": "SheetProperties", - "description": "The properties the new sheet should have.\nAll properties are optional.\nThe sheetId field is optional; if one is not\nset, an id will be randomly generated. (It is an error to specify the ID\nof a sheet that already exists.)" - } - }, - "id": "AddSheetRequest", - "description": "Adds a new sheet.\nWhen a sheet is added at a given index,\nall subsequent sheets' indexes are incremented.\nTo add an object sheet, use AddChartRequest instead and specify\nEmbeddedObjectPosition.sheetId or\nEmbeddedObjectPosition.newSheet.", + "id": "ProtectedRange", + "description": "A protected range.", "type": "object" }, - "UpdateCellsRequest": { - "description": "Updates all cells in a range with new data.", + "BatchUpdateValuesRequest": { + "description": "The request for updating more than one range of values in a spreadsheet.", "type": "object", "properties": { - "start": { - "description": "The coordinate to start writing data at.\nAny number of rows and columns (including a different number of\ncolumns per row) may be written.", - "$ref": "GridCoordinate" + "responseValueRenderOption": { + "description": "Determines how values in the response should be rendered.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", + "type": "string", + "enumDescriptions": [ + "Values will be calculated & formatted in the reply according to the\ncell's formatting. Formatting is based on the spreadsheet's locale,\nnot the requesting user's locale.\nFor example, if `A1` is `1.23` and `A2` is `=A1` and formatted as currency,\nthen `A2` would return `\"$1.23\"`.", + "Values will be calculated, but not formatted in the reply.\nFor example, if `A1` is `1.23` and `A2` is `=A1` and formatted as currency,\nthen `A2` would return the number `1.23`.", + "Values will not be calculated. The reply will include the formulas.\nFor example, if `A1` is `1.23` and `A2` is `=A1` and formatted as currency,\nthen A2 would return `\"=A1\"`." + ], + "enum": [ + "FORMATTED_VALUE", + "UNFORMATTED_VALUE", + "FORMULA" + ] }, - "range": { - "description": "The range to write data to.\n\nIf the data in rows does not cover the entire requested range,\nthe fields matching those set in fields will be cleared.", - "$ref": "GridRange" + "includeValuesInResponse": { + "type": "boolean", + "description": "Determines if the update response should include the values\nof the cells that were updated. By default, responses\ndo not include the updated values. The `updatedData` field within\neach of the BatchUpdateValuesResponse.responses will contain\nthe updated values. If the range to write was larger than than the range\nactually written, the response will include all values in the requested\nrange (excluding trailing empty rows and columns)." }, - "rows": { - "description": "The data to write.", + "valueInputOption": { + "enumDescriptions": [ + "Default input value. This value must not be used.", + "The values the user has entered will not be parsed and will be stored\nas-is.", + "The values will be parsed as if the user typed them into the UI.\nNumbers will stay as numbers, but strings may be converted to numbers,\ndates, etc. following the same rules that are applied when entering\ntext into a cell via the Google Sheets UI." + ], + "enum": [ + "INPUT_VALUE_OPTION_UNSPECIFIED", + "RAW", + "USER_ENTERED" + ], + "description": "How the input data should be interpreted.", + "type": "string" + }, + "data": { + "description": "The new values to apply to the spreadsheet.", "type": "array", "items": { - "$ref": "RowData" + "$ref": "ValueRange" } }, - "fields": { - "description": "The fields of CellData that should be updated.\nAt least one field must be specified.\nThe root is the CellData; 'row.values.' should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", - "type": "string" - } - }, - "id": "UpdateCellsRequest" - }, - "DeleteConditionalFormatRuleResponse": { - "description": "The result of deleting a conditional format rule.", - "type": "object", - "properties": { - "rule": { - "$ref": "ConditionalFormatRule", - "description": "The rule that was deleted." - } - }, - "id": "DeleteConditionalFormatRuleResponse" - }, - "DeleteRangeRequest": { - "properties": { - "shiftDimension": { - "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" - ], - "description": "The dimension from which deleted cells will be replaced with.\nIf ROWS, existing cells will be shifted upward to\nreplace the deleted cells. If COLUMNS, existing cells\nwill be shifted left to replace the deleted cells.", + "responseDateTimeRenderOption": { + "description": "Determines how dates, times, and durations in the response should be\nrendered. This is ignored if response_value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", "type": "string", "enumDescriptions": [ - "The default value, do not use.", - "Operates on the rows of a sheet.", - "Operates on the columns of a sheet." + "Instructs date, time, datetime, and duration fields to be output\nas doubles in \"serial number\" format, as popularized by Lotus 1-2-3.\nDays are counted from December 31st 1899 and are incremented by 1,\nand times are fractions of a day. For example, January 1st 1900 at noon\nwould be 1.5, 1 because it's 1 day offset from December 31st 1899,\nand .5 because noon is half a day. February 1st 1900 at 3pm would\nbe 32.625. This correctly treats the year 1900 as not a leap year.", + "Instructs date, time, datetime, and duration fields to be output\nas strings in their given number format (which is dependent\non the spreadsheet locale)." + ], + "enum": [ + "SERIAL_NUMBER", + "FORMATTED_STRING" ] - }, - "range": { - "$ref": "GridRange", - "description": "The range of cells to delete." } }, - "id": "DeleteRangeRequest", - "description": "Deletes a range of cells, shifting other cells into the deleted area.", - "type": "object" + "id": "BatchUpdateValuesRequest" }, - "GridCoordinate": { + "DimensionProperties": { + "description": "Properties about a dimension.", + "type": "object", "properties": { - "sheetId": { - "description": "The sheet this coordinate is on.", + "pixelSize": { + "description": "The height (if a row) or width (if a column) of the dimension in pixels.", "format": "int32", "type": "integer" }, - "rowIndex": { - "description": "The row index of the coordinate.", - "format": "int32", - "type": "integer" + "hiddenByFilter": { + "description": "True if this dimension is being filtered.\nThis field is read-only.", + "type": "boolean" }, - "columnIndex": { - "description": "The column index of the coordinate.", - "format": "int32", - "type": "integer" + "hiddenByUser": { + "description": "True if this dimension is explicitly hidden.", + "type": "boolean" } }, - "id": "GridCoordinate", - "description": "A coordinate in a sheet.\nAll indexes are zero-based.", - "type": "object" + "id": "DimensionProperties" }, - "UpdateSheetPropertiesRequest": { + "NamedRange": { + "description": "A named range.", + "type": "object", "properties": { - "properties": { - "$ref": "SheetProperties", - "description": "The properties to update." - }, - "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root `properties` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", + "namedRangeId": { + "description": "The ID of the named range.", "type": "string" - } - }, - "id": "UpdateSheetPropertiesRequest", - "description": "Updates properties of the sheet with the specified\nsheetId.", - "type": "object" - }, - "UnmergeCellsRequest": { - "properties": { + }, "range": { "$ref": "GridRange", - "description": "The range within which all cells should be unmerged.\nIf the range spans multiple merges, all will be unmerged.\nThe range must not partially span any merge." + "description": "The range this represents." + }, + "name": { + "description": "The name of the named range.", + "type": "string" } }, - "id": "UnmergeCellsRequest", - "description": "Unmerges cells in the given range.", - "type": "object" + "id": "NamedRange" }, - "GridProperties": { + "DimensionRange": { "properties": { - "rowCount": { - "description": "The number of rows in the grid.", - "format": "int32", - "type": "integer" + "startIndex": { + "type": "integer", + "description": "The start (inclusive) of the span, or not set if unbounded.", + "format": "int32" }, - "frozenRowCount": { - "description": "The number of rows that are frozen in the grid.", + "endIndex": { + "description": "The end (exclusive) of the span, or not set if unbounded.", "format": "int32", "type": "integer" }, - "hideGridlines": { - "description": "True if the grid isn't showing gridlines in the UI.", - "type": "boolean" - }, - "columnCount": { - "description": "The number of columns in the grid.", - "format": "int32", - "type": "integer" + "sheetId": { + "type": "integer", + "description": "The sheet this span is on.", + "format": "int32" }, - "frozenColumnCount": { - "description": "The number of columns that are frozen in the grid.", - "format": "int32", - "type": "integer" + "dimension": { + "enumDescriptions": [ + "The default value, do not use.", + "Operates on the rows of a sheet.", + "Operates on the columns of a sheet." + ], + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ], + "description": "The dimension of the span.", + "type": "string" } }, - "id": "GridProperties", - "description": "Properties of a grid.", + "id": "DimensionRange", + "description": "A range along a single dimension on a sheet.\nAll indexes are zero-based.\nIndexes are half open: the start index is inclusive\nand the end index is exclusive.\nMissing indexes indicate the range is unbounded on that side.", "type": "object" }, - "UpdateEmbeddedObjectPositionResponse": { + "CutPasteRequest": { "properties": { - "position": { - "$ref": "EmbeddedObjectPosition", - "description": "The new position of the embedded object." + "destination": { + "description": "The top-left coordinate where the data should be pasted.", + "$ref": "GridCoordinate" + }, + "source": { + "$ref": "GridRange", + "description": "The source data to cut." + }, + "pasteType": { + "enumDescriptions": [ + "Paste values, formulas, formats, and merges.", + "Paste the values ONLY without formats, formulas, or merges.", + "Paste the format and data validation only.", + "Like PASTE_NORMAL but without borders.", + "Paste the formulas only.", + "Paste the data validation only.", + "Paste the conditional formatting rules only." + ], + "enum": [ + "PASTE_NORMAL", + "PASTE_VALUES", + "PASTE_FORMAT", + "PASTE_NO_BORDERS", + "PASTE_FORMULA", + "PASTE_DATA_VALIDATION", + "PASTE_CONDITIONAL_FORMATTING" + ], + "description": "What kind of data to paste. All the source data will be cut, regardless\nof what is pasted.", + "type": "string" } }, - "id": "UpdateEmbeddedObjectPositionResponse", - "description": "The result of updating an embedded object's position.", + "id": "CutPasteRequest", + "description": "Moves data from the source to the destination.", "type": "object" }, - "Sheet": { - "description": "A sheet in a spreadsheet.", + "Borders": { + "description": "The borders of the cell.", "type": "object", "properties": { - "basicFilter": { - "$ref": "BasicFilter", - "description": "The filter on this sheet, if any." - }, - "merges": { - "description": "The ranges that are merged together.", - "type": "array", - "items": { - "$ref": "GridRange" - } - }, - "data": { - "description": "Data in the grid, if this is a grid sheet.\nThe number of GridData objects returned is dependent on the number of\nranges requested on this sheet. For example, if this is representing\n`Sheet1`, and the spreadsheet was requested with ranges\n`Sheet1!A1:C10` and `Sheet1!D15:E20`, then the first GridData will have a\nstartRow/startColumn of `0`,\nwhile the second one will have `startRow 14` (zero-based row 15),\nand `startColumn 3` (zero-based column D).", - "type": "array", - "items": { - "$ref": "GridData" - } - }, - "bandedRanges": { - "description": "The banded (i.e. alternating colors) ranges on this sheet.", - "type": "array", - "items": { - "$ref": "BandedRange" - } - }, - "charts": { - "description": "The specifications of every chart on this sheet.", - "type": "array", - "items": { - "$ref": "EmbeddedChart" - } - }, - "properties": { - "description": "The properties of the sheet.", - "$ref": "SheetProperties" + "right": { + "$ref": "Border", + "description": "The right border of the cell." }, - "filterViews": { - "description": "The filter views in this sheet.", - "type": "array", - "items": { - "$ref": "FilterView" - } + "bottom": { + "description": "The bottom border of the cell.", + "$ref": "Border" }, - "conditionalFormats": { - "description": "The conditional format rules in this sheet.", - "type": "array", - "items": { - "$ref": "ConditionalFormatRule" - } + "top": { + "$ref": "Border", + "description": "The top border of the cell." }, - "protectedRanges": { - "description": "The protected ranges in this sheet.", - "type": "array", - "items": { - "$ref": "ProtectedRange" - } + "left": { + "description": "The left border of the cell.", + "$ref": "Border" } }, - "id": "Sheet" + "id": "Borders" }, - "SortSpec": { + "BasicChartSeries": { "properties": { - "sortOrder": { + "series": { + "$ref": "ChartData", + "description": "The data being visualized in this chart series." + }, + "type": { "enumDescriptions": [ - "Default value, do not use this.", - "Sort ascending.", - "Sort descending." + "Default value, do not use.", + "A \u003ca href=\"/chart/interactive/docs/gallery/barchart\"\u003ebar chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/linechart\"\u003eline chart\u003c/a\u003e.", + "An \u003ca href=\"/chart/interactive/docs/gallery/areachart\"\u003earea chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/columnchart\"\u003ecolumn chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/scatterchart\"\u003escatter chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/combochart\"\u003ecombo chart\u003c/a\u003e." ], "enum": [ - "SORT_ORDER_UNSPECIFIED", - "ASCENDING", - "DESCENDING" + "BASIC_CHART_TYPE_UNSPECIFIED", + "BAR", + "LINE", + "AREA", + "COLUMN", + "SCATTER", + "COMBO" ], - "description": "The order data should be sorted.", + "description": "The type of this series. Valid only if the\nchartType is\nCOMBO.\nDifferent types will change the way the series is visualized.\nOnly LINE, AREA,\nand COLUMN are supported.", "type": "string" }, - "dimensionIndex": { - "description": "The dimension the sort should be applied to.", - "format": "int32", - "type": "integer" + "targetAxis": { + "enum": [ + "BASIC_CHART_AXIS_POSITION_UNSPECIFIED", + "BOTTOM_AXIS", + "LEFT_AXIS", + "RIGHT_AXIS" + ], + "description": "The minor axis that will specify the range of values for this series.\nFor example, if charting stocks over time, the \"Volume\" series\nmay want to be pinned to the right with the prices pinned to the left,\nbecause the scale of trading volume is different than the scale of\nprices.\nIt is an error to specify an axis that isn't a valid minor axis\nfor the chart's type.", + "type": "string", + "enumDescriptions": [ + "Default value, do not use.", + "The axis rendered at the bottom of a chart.\nFor most charts, this is the standard major axis.\nFor bar charts, this is a minor axis.", + "The axis rendered at the left of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is the standard major axis.", + "The axis rendered at the right of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is an unusual major axis." + ] } }, - "id": "SortSpec", - "description": "A sort order associated with a specific column or row.", + "id": "BasicChartSeries", + "description": "A single series of data in a chart.\nFor example, if charting stock prices over time, multiple series may exist,\none for the \"Open Price\", \"High Price\", \"Low Price\" and \"Close Price\".", "type": "object" }, - "BooleanRule": { - "description": "A rule that may or may not match, depending on the condition.", + "AutoResizeDimensionsRequest": { + "description": "Automatically resizes one or more dimensions based on the contents\nof the cells in that dimension.", "type": "object", "properties": { - "condition": { - "description": "The condition of the rule. If the condition evaluates to true,\nthe format will be applied.", - "$ref": "BooleanCondition" - }, - "format": { - "description": "The format to apply.\nConditional formatting can only apply a subset of formatting:\nbold, italic,\nstrikethrough,\nforeground color &\nbackground color.", - "$ref": "CellFormat" + "dimensions": { + "$ref": "DimensionRange", + "description": "The dimensions to automatically resize.\nOnly COLUMNS are supported." } }, - "id": "BooleanRule" + "id": "AutoResizeDimensionsRequest" }, - "FilterCriteria": { - "description": "Criteria for showing/hiding rows in a filter or filter view.", + "UpdateBordersRequest": { + "id": "UpdateBordersRequest", + "description": "Updates the borders of a range.\nIf a field is not set in the request, that means the border remains as-is.\nFor example, with two subsequent UpdateBordersRequest:\n\n 1. range: A1:A5 `{ top: RED, bottom: WHITE }`\n 2. range: A1:A5 `{ left: BLUE }`\n\nThat would result in A1:A5 having a borders of\n`{ top: RED, bottom: WHITE, left: BLUE }`.\nIf you want to clear a border, explicitly set the style to\nNONE.", "type": "object", "properties": { - "hiddenValues": { - "description": "Values that should be hidden.", - "type": "array", - "items": { - "type": "string" - } + "bottom": { + "$ref": "Border", + "description": "The border to put at the bottom of the range." }, - "condition": { - "$ref": "BooleanCondition", - "description": "A condition that must be true for values to be shown.\n(This does not override hiddenValues -- if a value is listed there,\n it will still be hidden.)" + "innerVertical": { + "$ref": "Border", + "description": "The vertical border to put within the range." + }, + "right": { + "$ref": "Border", + "description": "The border to put at the right of the range." + }, + "range": { + "$ref": "GridRange", + "description": "The range whose borders should be updated." + }, + "innerHorizontal": { + "$ref": "Border", + "description": "The horizontal border to put within the range." + }, + "top": { + "description": "The border to put at the top of the range.", + "$ref": "Border" + }, + "left": { + "$ref": "Border", + "description": "The border to put at the left of the range." } - }, - "id": "FilterCriteria" + } }, - "PivotGroupValueMetadata": { + "CellFormat": { + "type": "object", "properties": { - "value": { - "description": "The calculated value the metadata corresponds to.\n(Note that formulaValue is not valid,\n because the values will be calculated.)", - "$ref": "ExtendedValue" + "numberFormat": { + "description": "A format describing how number values should be represented to the user.", + "$ref": "NumberFormat" }, - "collapsed": { - "description": "True if the data corresponding to the value is collapsed.", - "type": "boolean" + "hyperlinkDisplayType": { + "enumDescriptions": [ + "The default value: the hyperlink is rendered. Do not use this.", + "A hyperlink should be explicitly rendered.", + "A hyperlink should not be rendered." + ], + "enum": [ + "HYPERLINK_DISPLAY_TYPE_UNSPECIFIED", + "LINKED", + "PLAIN_TEXT" + ], + "description": "How a hyperlink, if it exists, should be displayed in the cell.", + "type": "string" + }, + "horizontalAlignment": { + "description": "The horizontal alignment of the value in the cell.", + "type": "string", + "enumDescriptions": [ + "The horizontal alignment is not specified. Do not use this.", + "The text is explicitly aligned to the left of the cell.", + "The text is explicitly aligned to the center of the cell.", + "The text is explicitly aligned to the right of the cell." + ], + "enum": [ + "HORIZONTAL_ALIGN_UNSPECIFIED", + "LEFT", + "CENTER", + "RIGHT" + ] + }, + "textFormat": { + "description": "The format of the text in the cell (unless overridden by a format run).", + "$ref": "TextFormat" + }, + "backgroundColor": { + "$ref": "Color", + "description": "The background color of the cell." + }, + "verticalAlignment": { + "type": "string", + "enumDescriptions": [ + "The vertical alignment is not specified. Do not use this.", + "The text is explicitly aligned to the top of the cell.", + "The text is explicitly aligned to the middle of the cell.", + "The text is explicitly aligned to the bottom of the cell." + ], + "enum": [ + "VERTICAL_ALIGN_UNSPECIFIED", + "TOP", + "MIDDLE", + "BOTTOM" + ], + "description": "The vertical alignment of the value in the cell." + }, + "padding": { + "description": "The padding of the cell.", + "$ref": "Padding" + }, + "textDirection": { + "enumDescriptions": [ + "The text direction is not specified. Do not use this.", + "The text direction of left-to-right was set by the user.", + "The text direction of right-to-left was set by the user." + ], + "enum": [ + "TEXT_DIRECTION_UNSPECIFIED", + "LEFT_TO_RIGHT", + "RIGHT_TO_LEFT" + ], + "description": "The direction of the text in the cell.", + "type": "string" + }, + "borders": { + "$ref": "Borders", + "description": "The borders of the cell." + }, + "wrapStrategy": { + "enum": [ + "WRAP_STRATEGY_UNSPECIFIED", + "OVERFLOW_CELL", + "LEGACY_WRAP", + "CLIP", + "WRAP" + ], + "description": "The wrap strategy for the value in the cell.", + "type": "string", + "enumDescriptions": [ + "The default value, do not use.", + "Lines that are longer than the cell width will be written in the next\ncell over, so long as that cell is empty. If the next cell over is\nnon-empty, this behaves the same as CLIP. The text will never wrap\nto the next line unless the user manually inserts a new line.\nExample:\n\n | First sentence. |\n | Manual newline that is very long. \u003c- Text continues into next cell\n | Next newline. |", + "This wrap strategy represents the old Google Sheets wrap strategy where\nwords that are longer than a line are clipped rather than broken. This\nstrategy is not supported on all platforms and is being phased out.\nExample:\n\n | Cell has a |\n | loooooooooo| \u003c- Word is clipped.\n | word. |", + "Lines that are longer than the cell width will be clipped.\nThe text will never wrap to the next line unless the user manually\ninserts a new line.\nExample:\n\n | First sentence. |\n | Manual newline t| \u003c- Text is clipped\n | Next newline. |", + "Words that are longer than a line are wrapped at the character level\nrather than clipped.\nExample:\n\n | Cell has a |\n | loooooooooo| \u003c- Word is broken.\n | ong word. |" + ] } }, - "id": "PivotGroupValueMetadata", - "description": "Metadata about a value in a pivot grouping.", - "type": "object" + "id": "CellFormat", + "description": "The format of a cell." }, - "Editors": { + "ClearValuesResponse": { + "description": "The response when clearing a range of values in a spreadsheet.", + "type": "object", "properties": { - "users": { - "description": "The email addresses of users with edit access to the protected range.", - "type": "array", - "items": { - "type": "string" - } - }, - "groups": { - "description": "The email addresses of groups with edit access to the protected range.", - "type": "array", - "items": { - "type": "string" - } + "spreadsheetId": { + "description": "The spreadsheet the updates were applied to.", + "type": "string" }, - "domainUsersCanEdit": { - "description": "True if anyone in the document's domain has edit access to the protected\nrange. Domain protection is only supported on documents within a domain.", - "type": "boolean" + "clearedRange": { + "description": "The range (in A1 notation) that was cleared.\n(If the request was for an unbounded range or a ranger larger\n than the bounds of the sheet, this will be the actual range\n that was cleared, bounded to the sheet's limits.)", + "type": "string" } }, - "id": "Editors", - "description": "The editors of a protected range.", - "type": "object" + "id": "ClearValuesResponse" }, - "UpdateConditionalFormatRuleRequest": { + "DeleteConditionalFormatRuleRequest": { + "description": "Deletes a conditional format rule at the given index.\nAll subsequent rules' indexes are decremented.", + "type": "object", "properties": { - "rule": { - "description": "The rule that should replace the rule at the given index.", - "$ref": "ConditionalFormatRule" - }, "index": { - "description": "The zero-based index of the rule that should be replaced or moved.", + "description": "The zero-based index of the rule to be deleted.", "format": "int32", "type": "integer" }, "sheetId": { - "description": "The sheet of the rule to move. Required if new_index is set,\nunused otherwise.", - "format": "int32", - "type": "integer" - }, - "newIndex": { - "description": "The zero-based new index the rule should end up at.", + "description": "The sheet the rule is being deleted from.", "format": "int32", "type": "integer" } }, - "id": "UpdateConditionalFormatRuleRequest", - "description": "Updates a conditional format rule at the given index,\nor moves a conditional format rule to another index.", - "type": "object" + "id": "DeleteConditionalFormatRuleRequest" }, - "BasicChartDomain": { - "description": "The domain of a chart.\nFor example, if charting stock prices over time, this would be the date.", + "AddBandingResponse": { + "description": "The result of adding a banded range.", "type": "object", "properties": { - "domain": { - "description": "The data of the domain. For example, if charting stock prices over time,\nthis is the data representing the dates.", - "$ref": "ChartData" + "bandedRange": { + "$ref": "BandedRange", + "description": "The banded range that was added." } }, - "id": "BasicChartDomain" + "id": "AddBandingResponse" }, - "DataValidationRule": { + "DeleteNamedRangeRequest": { + "type": "object", "properties": { - "condition": { - "$ref": "BooleanCondition", - "description": "The condition that data in the cell must match." - }, - "showCustomUi": { - "description": "True if the UI should be customized based on the kind of condition.\nIf true, \"List\" conditions will show a dropdown.", - "type": "boolean" - }, - "strict": { - "description": "True if invalid data should be rejected.", - "type": "boolean" - }, - "inputMessage": { - "description": "A message to show the user when adding data to the cell.", + "namedRangeId": { + "description": "The ID of the named range to delete.", "type": "string" } }, - "id": "DataValidationRule", - "description": "A data validation rule.", - "type": "object" + "id": "DeleteNamedRangeRequest", + "description": "Removes the named range with the given ID from the spreadsheet." }, - "PasteDataRequest": { + "ChartData": { "properties": { - "data": { - "description": "The data to insert.", - "type": "string" - }, - "delimiter": { - "description": "The delimiter in the data.", - "type": "string" - }, - "type": { - "enum": [ - "PASTE_NORMAL", - "PASTE_VALUES", - "PASTE_FORMAT", - "PASTE_NO_BORDERS", - "PASTE_FORMULA", - "PASTE_DATA_VALIDATION", - "PASTE_CONDITIONAL_FORMATTING" - ], - "description": "How the data should be pasted.", - "type": "string", - "enumDescriptions": [ - "Paste values, formulas, formats, and merges.", - "Paste the values ONLY without formats, formulas, or merges.", - "Paste the format and data validation only.", - "Like PASTE_NORMAL but without borders.", - "Paste the formulas only.", - "Paste the data validation only.", - "Paste the conditional formatting rules only." - ] - }, - "html": { - "description": "True if the data is HTML.", - "type": "boolean" - }, - "coordinate": { - "description": "The coordinate at which the data should start being inserted.", - "$ref": "GridCoordinate" + "sourceRange": { + "$ref": "ChartSourceRange", + "description": "The source ranges of the data." } }, - "id": "PasteDataRequest", - "description": "Inserts data into the spreadsheet starting at the specified coordinate.", + "id": "ChartData", + "description": "The data included in a domain or series.", "type": "object" }, - "AppendDimensionRequest": { - "description": "Appends rows or columns to the end of a sheet.", + "BatchGetValuesResponse": { + "description": "The response when retrieving more than one range of values in a spreadsheet.", "type": "object", "properties": { - "dimension": { - "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" - ], - "description": "Whether rows or columns should be appended.", - "type": "string", - "enumDescriptions": [ - "The default value, do not use.", - "Operates on the rows of a sheet.", - "Operates on the columns of a sheet." - ] - }, - "length": { - "description": "The number of rows or columns to append.", - "format": "int32", - "type": "integer" + "valueRanges": { + "description": "The requested values. The order of the ValueRanges is the same as the\norder of the requested ranges.", + "type": "array", + "items": { + "$ref": "ValueRange" + } }, - "sheetId": { - "description": "The sheet to append rows or columns to.", - "format": "int32", - "type": "integer" - } - }, - "id": "AppendDimensionRequest" - }, - "AddNamedRangeRequest": { - "properties": { - "namedRange": { - "$ref": "NamedRange", - "description": "The named range to add. The namedRangeId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a range that already exists.)" + "spreadsheetId": { + "description": "The ID of the spreadsheet the data was retrieved from.", + "type": "string" } }, - "id": "AddNamedRangeRequest", - "description": "Adds a named range to the spreadsheet.", - "type": "object" + "id": "BatchGetValuesResponse" }, - "UpdateEmbeddedObjectPositionRequest": { + "UpdateBandingRequest": { "properties": { "fields": { - "description": "The fields of OverlayPosition\nthat should be updated when setting a new position. Used only if\nnewPosition.overlayPosition\nis set, in which case at least one field must\nbe specified. The root `newPosition.overlayPosition` is implied and\nshould not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "description": "The fields that should be updated. At least one field must be specified.\nThe root `bandedRange` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", "format": "google-fieldmask", "type": "string" }, - "objectId": { - "description": "The ID of the object to moved.", - "format": "int32", - "type": "integer" - }, - "newPosition": { - "description": "An explicit position to move the embedded object to.\nIf newPosition.sheetId is set,\na new sheet with that ID will be created.\nIf newPosition.newSheet is set to true,\na new sheet will be created with an ID that will be chosen for you.", - "$ref": "EmbeddedObjectPosition" + "bandedRange": { + "$ref": "BandedRange", + "description": "The banded range to update with the new properties." } }, - "id": "UpdateEmbeddedObjectPositionRequest", - "description": "Update an embedded object's position (such as a moving or resizing a\nchart or image).", + "id": "UpdateBandingRequest", + "description": "Updates properties of the supplied banded range.", "type": "object" }, - "PieChartSpec": { - "description": "A \u003ca href=\"/chart/interactive/docs/gallery/piechart\"\u003epie chart\u003c/a\u003e.", + "Color": { + "description": "Represents a color in the RGBA color space. This representation is designed\nfor simplicity of conversion to/from color representations in various\nlanguages over compactness; for example, the fields of this representation\ncan be trivially provided to the constructor of \"java.awt.Color\" in Java; it\ncan also be trivially provided to UIColor's \"+colorWithRed:green:blue:alpha\"\nmethod in iOS; and, with just a little work, it can be easily formatted into\na CSS \"rgba()\" string in JavaScript, as well. Here are some examples:\n\nExample (Java):\n\n import com.google.type.Color;\n\n // ...\n public static java.awt.Color fromProto(Color protocolor) {\n float alpha = protocolor.hasAlpha()\n ? protocolor.getAlpha().getValue()\n : 1.0;\n\n return new java.awt.Color(\n protocolor.getRed(),\n protocolor.getGreen(),\n protocolor.getBlue(),\n alpha);\n }\n\n public static Color toProto(java.awt.Color color) {\n float red = (float) color.getRed();\n float green = (float) color.getGreen();\n float blue = (float) color.getBlue();\n float denominator = 255.0;\n Color.Builder resultBuilder =\n Color\n .newBuilder()\n .setRed(red / denominator)\n .setGreen(green / denominator)\n .setBlue(blue / denominator);\n int alpha = color.getAlpha();\n if (alpha != 255) {\n result.setAlpha(\n FloatValue\n .newBuilder()\n .setValue(((float) alpha) / denominator)\n .build());\n }\n return resultBuilder.build();\n }\n // ...\n\nExample (iOS / Obj-C):\n\n // ...\n static UIColor* fromProto(Color* protocolor) {\n float red = [protocolor red];\n float green = [protocolor green];\n float blue = [protocolor blue];\n FloatValue* alpha_wrapper = [protocolor alpha];\n float alpha = 1.0;\n if (alpha_wrapper != nil) {\n alpha = [alpha_wrapper value];\n }\n return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];\n }\n\n static Color* toProto(UIColor* color) {\n CGFloat red, green, blue, alpha;\n if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {\n return nil;\n }\n Color* result = [Color alloc] init];\n [result setRed:red];\n [result setGreen:green];\n [result setBlue:blue];\n if (alpha \u003c= 0.9999) {\n [result setAlpha:floatWrapperWithValue(alpha)];\n }\n [result autorelease];\n return result;\n }\n // ...\n\n Example (JavaScript):\n\n // ...\n\n var protoToCssColor = function(rgb_color) {\n var redFrac = rgb_color.red || 0.0;\n var greenFrac = rgb_color.green || 0.0;\n var blueFrac = rgb_color.blue || 0.0;\n var red = Math.floor(redFrac * 255);\n var green = Math.floor(greenFrac * 255);\n var blue = Math.floor(blueFrac * 255);\n\n if (!('alpha' in rgb_color)) {\n return rgbToCssColor_(red, green, blue);\n }\n\n var alphaFrac = rgb_color.alpha.value || 0.0;\n var rgbParams = [red, green, blue].join(',');\n return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');\n };\n\n var rgbToCssColor_ = function(red, green, blue) {\n var rgbNumber = new Number((red \u003c\u003c 16) | (green \u003c\u003c 8) | blue);\n var hexString = rgbNumber.toString(16);\n var missingZeros = 6 - hexString.length;\n var resultBuilder = ['#'];\n for (var i = 0; i \u003c missingZeros; i++) {\n resultBuilder.push('0');\n }\n resultBuilder.push(hexString);\n return resultBuilder.join('');\n };\n\n // ...", "type": "object", "properties": { - "domain": { - "description": "The data that covers the domain of the pie chart.", - "$ref": "ChartData" - }, - "threeDimensional": { - "description": "True if the pie is three dimensional.", - "type": "boolean" + "green": { + "description": "The amount of green in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" }, - "series": { - "$ref": "ChartData", - "description": "The data that covers the one and only series of the pie chart." + "blue": { + "description": "The amount of blue in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" }, - "legendPosition": { - "enumDescriptions": [ - "Default value, do not use.", - "The legend is rendered on the bottom of the chart.", - "The legend is rendered on the left of the chart.", - "The legend is rendered on the right of the chart.", - "The legend is rendered on the top of the chart.", - "No legend is rendered.", - "Each pie slice has a label attached to it." - ], - "enum": [ - "PIE_CHART_LEGEND_POSITION_UNSPECIFIED", - "BOTTOM_LEGEND", - "LEFT_LEGEND", - "RIGHT_LEGEND", - "TOP_LEGEND", - "NO_LEGEND", - "LABELED_LEGEND" - ], - "description": "Where the legend of the pie chart should be drawn.", - "type": "string" + "alpha": { + "type": "number", + "description": "The fraction of this color that should be applied to the pixel. That is,\nthe final pixel color is defined by the equation:\n\n pixel color = alpha * (this color) + (1.0 - alpha) * (background color)\n\nThis means that a value of 1.0 corresponds to a solid color, whereas\na value of 0.0 corresponds to a completely transparent color. This\nuses a wrapper message rather than a simple float scalar so that it is\npossible to distinguish between a default value and the value being unset.\nIf omitted, this color object is to be rendered as a solid color\n(as if the alpha value had been explicitly given with a value of 1.0).", + "format": "float" }, - "pieHole": { - "description": "The size of the hole in the pie chart.", - "format": "double", + "red": { + "description": "The amount of red in the color as a value in the interval [0, 1].", + "format": "float", "type": "number" } }, - "id": "PieChartSpec" - }, - "UpdateFilterViewRequest": { - "description": "Updates properties of the filter view.", - "type": "object", - "properties": { - "filter": { - "$ref": "FilterView", - "description": "The new properties of the filter view." - }, - "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root `filter` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", - "type": "string" - } - }, - "id": "UpdateFilterViewRequest" + "id": "Color" }, - "ConditionalFormatRule": { - "description": "A rule describing a conditional format.", + "PivotGroup": { "type": "object", "properties": { - "ranges": { - "description": "The ranges that will be formatted if the condition is true.\nAll the ranges must be on the same grid.", + "sortOrder": { + "type": "string", + "enumDescriptions": [ + "Default value, do not use this.", + "Sort ascending.", + "Sort descending." + ], + "enum": [ + "SORT_ORDER_UNSPECIFIED", + "ASCENDING", + "DESCENDING" + ], + "description": "The order the values in this group should be sorted." + }, + "valueBucket": { + "$ref": "PivotGroupSortValueBucket", + "description": "The bucket of the opposite pivot group to sort by.\nIf not specified, sorting is alphabetical by this group's values." + }, + "sourceColumnOffset": { + "description": "The column offset of the source range that this grouping is based on.\n\nFor example, if the source was `C10:E15`, a `sourceColumnOffset` of `0`\nmeans this group refers to column `C`, whereas the offset `1` would refer\nto column `D`.", + "format": "int32", + "type": "integer" + }, + "showTotals": { + "description": "True if the pivot table should include the totals for this grouping.", + "type": "boolean" + }, + "valueMetadata": { + "description": "Metadata about values in the grouping.", "type": "array", "items": { - "$ref": "GridRange" + "$ref": "PivotGroupValueMetadata" } - }, - "gradientRule": { - "$ref": "GradientRule", - "description": "The formatting will vary based on the gradients in the rule." - }, - "booleanRule": { - "description": "The formatting is either \"on\" or \"off\" according to the rule.", - "$ref": "BooleanRule" } }, - "id": "ConditionalFormatRule" + "id": "PivotGroup", + "description": "A single grouping (either row or column) in a pivot table." }, - "CopyPasteRequest": { - "description": "Copies data from the source to the destination.", + "PivotTable": { + "description": "A pivot table.", "type": "object", "properties": { - "destination": { - "$ref": "GridRange", - "description": "The location to paste to. If the range covers a span that's\na multiple of the source's height or width, then the\ndata will be repeated to fill in the destination range.\nIf the range is smaller than the source range, the entire\nsource data will still be copied (beyond the end of the destination range)." + "criteria": { + "description": "An optional mapping of filters per source column offset.\n\nThe filters will be applied before aggregating data into the pivot table.\nThe map's key is the column offset of the source range that you want to\nfilter, and the value is the criteria for that column.\n\nFor example, if the source was `C10:E15`, a key of `0` will have the filter\nfor column `C`, whereas the key `1` is for column `D`.", + "type": "object", + "additionalProperties": { + "$ref": "PivotFilterCriteria" + } }, - "pasteOrientation": { + "rows": { + "description": "Each row grouping in the pivot table.", + "type": "array", + "items": { + "$ref": "PivotGroup" + } + }, + "valueLayout": { "enumDescriptions": [ - "Paste normally.", - "Paste transposed, where all rows become columns and vice versa." + "Values are laid out horizontally (as columns).", + "Values are laid out vertically (as rows)." ], "enum": [ - "NORMAL", - "TRANSPOSE" + "HORIZONTAL", + "VERTICAL" ], - "description": "How that data should be oriented when pasting.", + "description": "Whether values should be listed horizontally (as columns)\nor vertically (as rows).", "type": "string" }, "source": { - "description": "The source range to copy.", + "description": "The range the pivot table is reading data from.", "$ref": "GridRange" }, - "pasteType": { - "enum": [ - "PASTE_NORMAL", - "PASTE_VALUES", - "PASTE_FORMAT", - "PASTE_NO_BORDERS", - "PASTE_FORMULA", - "PASTE_DATA_VALIDATION", - "PASTE_CONDITIONAL_FORMATTING" - ], - "description": "What kind of data to paste.", - "type": "string", - "enumDescriptions": [ - "Paste values, formulas, formats, and merges.", - "Paste the values ONLY without formats, formulas, or merges.", - "Paste the format and data validation only.", - "Like PASTE_NORMAL but without borders.", - "Paste the formulas only.", - "Paste the data validation only.", - "Paste the conditional formatting rules only." - ] + "columns": { + "description": "Each column grouping in the pivot table.", + "type": "array", + "items": { + "$ref": "PivotGroup" + } + }, + "values": { + "description": "A list of values to include in the pivot table.", + "type": "array", + "items": { + "$ref": "PivotValue" + } } }, - "id": "CopyPasteRequest" + "id": "PivotTable" }, - "BooleanCondition": { - "description": "A condition that can evaluate to true or false.\nBooleanConditions are used by conditional formatting,\ndata validation, and the criteria in filters.", + "ChartSourceRange": { + "description": "Source ranges for a chart.", "type": "object", "properties": { - "type": { - "enum": [ - "CONDITION_TYPE_UNSPECIFIED", - "NUMBER_GREATER", - "NUMBER_GREATER_THAN_EQ", - "NUMBER_LESS", - "NUMBER_LESS_THAN_EQ", - "NUMBER_EQ", - "NUMBER_NOT_EQ", - "NUMBER_BETWEEN", - "NUMBER_NOT_BETWEEN", - "TEXT_CONTAINS", - "TEXT_NOT_CONTAINS", - "TEXT_STARTS_WITH", - "TEXT_ENDS_WITH", - "TEXT_EQ", - "TEXT_IS_EMAIL", - "TEXT_IS_URL", - "DATE_EQ", - "DATE_BEFORE", - "DATE_AFTER", - "DATE_ON_OR_BEFORE", - "DATE_ON_OR_AFTER", - "DATE_BETWEEN", - "DATE_NOT_BETWEEN", - "DATE_IS_VALID", - "ONE_OF_RANGE", - "ONE_OF_LIST", - "BLANK", - "NOT_BLANK", - "CUSTOM_FORMULA" - ], - "description": "The type of condition.", - "type": "string", - "enumDescriptions": [ - "The default value, do not use.", - "The cell's value must be greater than the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be greater than or equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be less than the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be less than or equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be not equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be between the two condition values.\nSupported by data validation, conditional formatting and filters.\nRequires exactly two ConditionValues.", - "The cell's value must not be between the two condition values.\nSupported by data validation, conditional formatting and filters.\nRequires exactly two ConditionValues.", - "The cell's value must contain the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must not contain the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must start with the condition's value.\nSupported by conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must end with the condition's value.\nSupported by conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be exactly the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be a valid email address.\nSupported by data validation.\nRequires no ConditionValues.", - "The cell's value must be a valid URL.\nSupported by data validation.\nRequires no ConditionValues.", - "The cell's value must be the same date as the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", - "The cell's value must be before the date of the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue\nthat may be a relative date.", - "The cell's value must be after the date of the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue\nthat may be a relative date.", - "The cell's value must be on or before the date of the condition's value.\nSupported by data validation.\nRequires a single ConditionValue\nthat may be a relative date.", - "The cell's value must be on or after the date of the condition's value.\nSupported by data validation.\nRequires a single ConditionValue\nthat may be a relative date.", - "The cell's value must be between the dates of the two condition values.\nSupported by data validation.\nRequires exactly two ConditionValues.", - "The cell's value must be outside the dates of the two condition values.\nSupported by data validation.\nRequires exactly two ConditionValues.", - "The cell's value must be a date.\nSupported by data validation.\nRequires no ConditionValues.", - "The cell's value must be listed in the grid in condition value's range.\nSupported by data validation.\nRequires a single ConditionValue,\nand the value must be a valid range in A1 notation.", - "The cell's value must in the list of condition values.\nSupported by data validation.\nSupports any number of condition values,\none per item in the list.\nFormulas are not supported in the values.", - "The cell's value must be empty.\nSupported by conditional formatting and filters.\nRequires no ConditionValues.", - "The cell's value must not be empty.\nSupported by conditional formatting and filters.\nRequires no ConditionValues.", - "The condition's formula must evaluate to true.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue." - ] - }, - "values": { - "description": "The values of the condition. The number of supported values depends\non the condition type. Some support zero values,\nothers one or two values,\nand ConditionType.ONE_OF_LIST supports an arbitrary number of values.", + "sources": { + "description": "The ranges of data for a series or domain.\nExactly one dimension must have a length of 1,\nand all sources in the list must have the same dimension\nwith length 1.\nThe domain (if it exists) & all series must have the same number\nof source ranges. If using more than one source range, then the source\nrange at a given offset must be contiguous across the domain and series.\n\nFor example, these are valid configurations:\n\n domain sources: A1:A5\n series1 sources: B1:B5\n series2 sources: D6:D10\n\n domain sources: A1:A5, C10:C12\n series1 sources: B1:B5, D10:D12\n series2 sources: C1:C5, E10:E12", "type": "array", "items": { - "$ref": "ConditionValue" + "$ref": "GridRange" } } }, - "id": "BooleanCondition" + "id": "ChartSourceRange" }, - "Request": { - "description": "A single kind of update to apply to a spreadsheet.", + "ValueRange": { + "id": "ValueRange", + "description": "Data within a range of the spreadsheet.", "type": "object", "properties": { - "sortRange": { - "$ref": "SortRangeRequest", - "description": "Sorts data in a range." - }, - "deleteProtectedRange": { - "description": "Deletes a protected range.", - "$ref": "DeleteProtectedRangeRequest" - }, - "duplicateFilterView": { - "$ref": "DuplicateFilterViewRequest", - "description": "Duplicates a filter view." - }, - "addChart": { - "$ref": "AddChartRequest", - "description": "Adds a chart." - }, - "findReplace": { - "description": "Finds and replaces occurrences of some text with other text.", - "$ref": "FindReplaceRequest" - }, - "textToColumns": { - "$ref": "TextToColumnsRequest", - "description": "Converts a column of text into many columns of text." - }, - "updateChartSpec": { - "description": "Updates a chart's specifications.", - "$ref": "UpdateChartSpecRequest" - }, - "updateProtectedRange": { - "description": "Updates a protected range.", - "$ref": "UpdateProtectedRangeRequest" - }, - "addSheet": { - "description": "Adds a sheet.", - "$ref": "AddSheetRequest" - }, - "deleteFilterView": { - "$ref": "DeleteFilterViewRequest", - "description": "Deletes a filter view from a sheet." - }, - "copyPaste": { - "description": "Copies data from one area and pastes it to another.", - "$ref": "CopyPasteRequest" - }, - "insertDimension": { - "description": "Inserts new rows or columns in a sheet.", - "$ref": "InsertDimensionRequest" - }, - "deleteRange": { - "description": "Deletes a range of cells from a sheet, shifting the remaining cells.", - "$ref": "DeleteRangeRequest" - }, - "deleteBanding": { - "description": "Removes a banded range", - "$ref": "DeleteBandingRequest" - }, - "addFilterView": { - "description": "Adds a filter view.", - "$ref": "AddFilterViewRequest" - }, - "updateBorders": { - "description": "Updates the borders in a range of cells.", - "$ref": "UpdateBordersRequest" - }, - "setDataValidation": { - "$ref": "SetDataValidationRequest", - "description": "Sets data validation for one or more cells." - }, - "deleteConditionalFormatRule": { - "$ref": "DeleteConditionalFormatRuleRequest", - "description": "Deletes an existing conditional format rule." - }, - "clearBasicFilter": { - "description": "Clears the basic filter on a sheet.", - "$ref": "ClearBasicFilterRequest" - }, - "repeatCell": { - "$ref": "RepeatCellRequest", - "description": "Repeats a single cell across a range." + "majorDimension": { + "enumDescriptions": [ + "The default value, do not use.", + "Operates on the rows of a sheet.", + "Operates on the columns of a sheet." + ], + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ], + "description": "The major dimension of the values.\n\nFor output, if the spreadsheet data is: `A1=1,B1=2,A2=3,B2=4`,\nthen requesting `range=A1:B2,majorDimension=ROWS` will return\n`[[1,2],[3,4]]`,\nwhereas requesting `range=A1:B2,majorDimension=COLUMNS` will return\n`[[1,3],[2,4]]`.\n\nFor input, with `range=A1:B2,majorDimension=ROWS` then `[[1,2],[3,4]]`\nwill set `A1=1,B1=2,A2=3,B2=4`. With `range=A1:B2,majorDimension=COLUMNS`\nthen `[[1,2],[3,4]]` will set `A1=1,B1=3,A2=2,B2=4`.\n\nWhen writing, if this field is not set, it defaults to ROWS.", + "type": "string" }, - "appendDimension": { - "description": "Appends dimensions to the end of a sheet.", - "$ref": "AppendDimensionRequest" + "values": { + "description": "The data that was read or to be written. This is an array of arrays,\nthe outer array representing all the data and each inner array\nrepresenting a major dimension. Each item in the inner array\ncorresponds with one cell.\n\nFor output, empty trailing rows and columns will not be included.\n\nFor input, supported value types are: bool, string, and double.\nNull values will be skipped.\nTo set a cell to an empty value, set the string value to an empty string.", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "any" + } + } }, - "updateConditionalFormatRule": { - "description": "Updates an existing conditional format rule.", - "$ref": "UpdateConditionalFormatRuleRequest" + "range": { + "description": "The range the values cover, in A1 notation.\nFor output, this range indicates the entire requested range,\neven though the values will exclude trailing rows and columns.\nWhen appending values, this field represents the range to search for a\ntable, after which values will be appended.", + "type": "string" + } + } + }, + "AppendCellsRequest": { + "description": "Adds new cells after the last row with data in a sheet,\ninserting new rows into the sheet if necessary.", + "type": "object", + "properties": { + "rows": { + "description": "The data to append.", + "type": "array", + "items": { + "$ref": "RowData" + } }, - "insertRange": { - "$ref": "InsertRangeRequest", - "description": "Inserts new cells in a sheet, shifting the existing cells." + "fields": { + "description": "The fields of CellData that should be updated.\nAt least one field must be specified.\nThe root is the CellData; 'row.values.' should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", + "type": "string" }, - "moveDimension": { - "description": "Moves rows or columns to another location in a sheet.", - "$ref": "MoveDimensionRequest" + "sheetId": { + "type": "integer", + "description": "The sheet ID to append the data to.", + "format": "int32" + } + }, + "id": "AppendCellsRequest" + }, + "AddBandingRequest": { + "description": "Adds a new banded range to the spreadsheet.", + "type": "object", + "properties": { + "bandedRange": { + "$ref": "BandedRange", + "description": "The banded range to add. The bandedRangeId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a range that already exists.)" + } + }, + "id": "AddBandingRequest" + }, + "Response": { + "description": "A single response from an update.", + "type": "object", + "properties": { + "addFilterView": { + "description": "A reply from adding a filter view.", + "$ref": "AddFilterViewResponse" }, - "updateBanding": { - "description": "Updates a banded range", - "$ref": "UpdateBandingRequest" + "addBanding": { + "$ref": "AddBandingResponse", + "description": "A reply from adding a banded range." }, "addProtectedRange": { - "description": "Adds a protected range.", - "$ref": "AddProtectedRangeRequest" - }, - "deleteNamedRange": { - "description": "Deletes a named range.", - "$ref": "DeleteNamedRangeRequest" + "description": "A reply from adding a protected range.", + "$ref": "AddProtectedRangeResponse" }, "duplicateSheet": { - "description": "Duplicates a sheet.", - "$ref": "DuplicateSheetRequest" - }, - "unmergeCells": { - "description": "Unmerges merged cells.", - "$ref": "UnmergeCellsRequest" + "$ref": "DuplicateSheetResponse", + "description": "A reply from duplicating a sheet." }, - "deleteSheet": { - "$ref": "DeleteSheetRequest", - "description": "Deletes a sheet." + "deleteConditionalFormatRule": { + "description": "A reply from deleting a conditional format rule.", + "$ref": "DeleteConditionalFormatRuleResponse" }, "updateEmbeddedObjectPosition": { - "$ref": "UpdateEmbeddedObjectPositionRequest", - "description": "Updates an embedded object's (e.g. chart, image) position." - }, - "updateDimensionProperties": { - "description": "Updates dimensions' properties.", - "$ref": "UpdateDimensionPropertiesRequest" - }, - "pasteData": { - "$ref": "PasteDataRequest", - "description": "Pastes data (HTML or delimited) into a sheet." - }, - "setBasicFilter": { - "$ref": "SetBasicFilterRequest", - "description": "Sets the basic filter on a sheet." - }, - "addConditionalFormatRule": { - "$ref": "AddConditionalFormatRuleRequest", - "description": "Adds a new conditional format rule." - }, - "addNamedRange": { - "$ref": "AddNamedRangeRequest", - "description": "Adds a named range." - }, - "updateCells": { - "description": "Updates many cells at once.", - "$ref": "UpdateCellsRequest" - }, - "updateSpreadsheetProperties": { - "description": "Updates the spreadsheet's properties.", - "$ref": "UpdateSpreadsheetPropertiesRequest" - }, - "deleteEmbeddedObject": { - "description": "Deletes an embedded object (e.g, chart, image) in a sheet.", - "$ref": "DeleteEmbeddedObjectRequest" - }, - "updateFilterView": { - "description": "Updates the properties of a filter view.", - "$ref": "UpdateFilterViewRequest" + "$ref": "UpdateEmbeddedObjectPositionResponse", + "description": "A reply from updating an embedded object's position." }, - "addBanding": { - "description": "Adds a new banded range", - "$ref": "AddBandingRequest" - }, - "appendCells": { - "description": "Appends cells after the last row with data in a sheet.", - "$ref": "AppendCellsRequest" - }, - "autoResizeDimensions": { - "$ref": "AutoResizeDimensionsRequest", - "description": "Automatically resizes one or more dimensions based on the contents\nof the cells in that dimension." - }, - "cutPaste": { - "$ref": "CutPasteRequest", - "description": "Cuts data from one area and pastes it to another." + "duplicateFilterView": { + "description": "A reply from duplicating a filter view.", + "$ref": "DuplicateFilterViewResponse" }, - "mergeCells": { - "description": "Merges cells together.", - "$ref": "MergeCellsRequest" + "addChart": { + "$ref": "AddChartResponse", + "description": "A reply from adding a chart." }, - "updateNamedRange": { - "description": "Updates a named range.", - "$ref": "UpdateNamedRangeRequest" + "findReplace": { + "description": "A reply from doing a find/replace.", + "$ref": "FindReplaceResponse" }, - "updateSheetProperties": { - "description": "Updates a sheet's properties.", - "$ref": "UpdateSheetPropertiesRequest" + "addSheet": { + "$ref": "AddSheetResponse", + "description": "A reply from adding a sheet." }, - "deleteDimension": { - "$ref": "DeleteDimensionRequest", - "description": "Deletes rows or columns in a sheet." + "updateConditionalFormatRule": { + "description": "A reply from updating a conditional format rule.", + "$ref": "UpdateConditionalFormatRuleResponse" }, - "autoFill": { - "description": "Automatically fills in more data based on existing data.", - "$ref": "AutoFillRequest" + "addNamedRange": { + "$ref": "AddNamedRangeResponse", + "description": "A reply from adding a named range." } }, - "id": "Request" + "id": "Response" }, - "GridRange": { + "InsertRangeRequest": { + "description": "Inserts cells into a range, shifting the existing cells over or down.", + "type": "object", "properties": { - "endRowIndex": { - "description": "The end row (exclusive) of the range, or not set if unbounded.", - "format": "int32", - "type": "integer" + "shiftDimension": { + "enumDescriptions": [ + "The default value, do not use.", + "Operates on the rows of a sheet.", + "Operates on the columns of a sheet." + ], + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ], + "description": "The dimension which will be shifted when inserting cells.\nIf ROWS, existing cells will be shifted down.\nIf COLUMNS, existing cells will be shifted right.", + "type": "string" }, - "endColumnIndex": { - "description": "The end column (exclusive) of the range, or not set if unbounded.", + "range": { + "$ref": "GridRange", + "description": "The range to insert new cells into." + } + }, + "id": "InsertRangeRequest" + }, + "TextFormatRun": { + "description": "A run of a text format. The format of this run continues until the start\nindex of the next run.\nWhen updating, all fields must be set.", + "type": "object", + "properties": { + "startIndex": { + "description": "The character index where this run starts.", "format": "int32", "type": "integer" }, - "startRowIndex": { - "description": "The start row (inclusive) of the range, or not set if unbounded.", + "format": { + "description": "The format of this run. Absent values inherit the cell's format.", + "$ref": "TextFormat" + } + }, + "id": "TextFormatRun" + }, + "EmbeddedChart": { + "description": "A chart embedded in a sheet.", + "type": "object", + "properties": { + "chartId": { + "description": "The ID of the chart.", "format": "int32", "type": "integer" }, - "startColumnIndex": { - "description": "The start column (inclusive) of the range, or not set if unbounded.", - "format": "int32", - "type": "integer" + "position": { + "description": "The position of the chart.", + "$ref": "EmbeddedObjectPosition" }, - "sheetId": { - "description": "The sheet this range is on.", - "format": "int32", - "type": "integer" + "spec": { + "description": "The specification of the chart.", + "$ref": "ChartSpec" } }, - "id": "GridRange", - "description": "A range on a sheet.\nAll indexes are zero-based.\nIndexes are half open, e.g the start index is inclusive\nand the end index is exclusive -- [start_index, end_index).\nMissing indexes indicate the range is unbounded on that side.\n\nFor example, if `\"Sheet1\"` is sheet ID 0, then:\n\n `Sheet1!A1:A1 == sheet_id: 0,\n start_row_index: 0, end_row_index: 1,\n start_column_index: 0, end_column_index: 1`\n\n `Sheet1!A3:B4 == sheet_id: 0,\n start_row_index: 2, end_row_index: 4,\n start_column_index: 0, end_column_index: 2`\n\n `Sheet1!A:B == sheet_id: 0,\n start_column_index: 0, end_column_index: 2`\n\n `Sheet1!A5:B == sheet_id: 0,\n start_row_index: 4,\n start_column_index: 0, end_column_index: 2`\n\n `Sheet1 == sheet_id:0`\n\nThe start index must always be less than or equal to the end index.\nIf the start index equals the end index, then the range is empty.\nEmpty ranges are typically not meaningful and are usually rendered in the\nUI as `#REF!`.", - "type": "object" + "id": "EmbeddedChart" }, - "BasicChartSpec": { + "AddNamedRangeResponse": { + "description": "The result of adding a named range.", + "type": "object", "properties": { - "legendPosition": { - "enumDescriptions": [ - "Default value, do not use.", - "The legend is rendered on the bottom of the chart.", - "The legend is rendered on the left of the chart.", - "The legend is rendered on the right of the chart.", - "The legend is rendered on the top of the chart.", - "No legend is rendered." - ], - "enum": [ - "BASIC_CHART_LEGEND_POSITION_UNSPECIFIED", - "BOTTOM_LEGEND", - "LEFT_LEGEND", - "RIGHT_LEGEND", - "TOP_LEGEND", - "NO_LEGEND" - ], - "description": "The position of the chart legend.", - "type": "string" - }, - "domains": { - "description": "The domain of data this is charting.\nOnly a single domain is currently supported.", + "namedRange": { + "$ref": "NamedRange", + "description": "The named range to add." + } + }, + "id": "AddNamedRangeResponse" + }, + "RowData": { + "description": "Data about each cell in a row.", + "type": "object", + "properties": { + "values": { + "description": "The values in the row, one per column.", + "type": "array", + "items": { + "$ref": "CellData" + } + } + }, + "id": "RowData" + }, + "GridData": { + "description": "Data in the grid, as well as metadata about the dimensions.", + "type": "object", + "properties": { + "columnMetadata": { + "description": "Metadata about the requested columns in the grid, starting with the column\nin start_column.", "type": "array", "items": { - "$ref": "BasicChartDomain" + "$ref": "DimensionProperties" } }, - "headerCount": { - "description": "The number of rows or columns in the data that are \"headers\".\nIf not set, Google Sheets will guess how many rows are headers based\non the data.\n\n(Note that BasicChartAxis.title may override the axis title\n inferred from the header values.)", + "startColumn": { + "description": "The first column this GridData refers to, zero-based.", "format": "int32", "type": "integer" }, - "axis": { - "description": "The axis on the chart.", + "rowMetadata": { + "description": "Metadata about the requested rows in the grid, starting with the row\nin start_row.", "type": "array", "items": { - "$ref": "BasicChartAxis" + "$ref": "DimensionProperties" } }, - "chartType": { - "enumDescriptions": [ - "Default value, do not use.", - "A \u003ca href=\"/chart/interactive/docs/gallery/barchart\"\u003ebar chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/linechart\"\u003eline chart\u003c/a\u003e.", - "An \u003ca href=\"/chart/interactive/docs/gallery/areachart\"\u003earea chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/columnchart\"\u003ecolumn chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/scatterchart\"\u003escatter chart\u003c/a\u003e.", - "A \u003ca href=\"/chart/interactive/docs/gallery/combochart\"\u003ecombo chart\u003c/a\u003e." - ], - "enum": [ - "BASIC_CHART_TYPE_UNSPECIFIED", - "BAR", - "LINE", - "AREA", - "COLUMN", - "SCATTER", - "COMBO" - ], - "description": "The type of the chart.", - "type": "string" - }, - "series": { - "description": "The data this chart is visualizing.", + "rowData": { + "description": "The data in the grid, one entry per row,\nstarting with the row in startRow.\nThe values in RowData will correspond to columns starting\nat start_column.", "type": "array", "items": { - "$ref": "BasicChartSeries" + "$ref": "RowData" } + }, + "startRow": { + "type": "integer", + "description": "The first row this GridData refers to, zero-based.", + "format": "int32" } }, - "id": "BasicChartSpec", - "description": "The specification for a basic chart. See BasicChartType for the list\nof charts this supports.", - "type": "object" + "id": "GridData" }, - "SetDataValidationRequest": { + "Border": { + "description": "A border along a cell.", + "type": "object", "properties": { - "rule": { - "$ref": "DataValidationRule", - "description": "The data validation rule to set on each cell in the range,\nor empty to clear the data validation in the range." + "width": { + "description": "The width of the border, in pixels.\nDeprecated; the width is determined by the \"style\" field.", + "format": "int32", + "type": "integer" }, - "range": { - "$ref": "GridRange", - "description": "The range the data validation rule should apply to." + "style": { + "enum": [ + "STYLE_UNSPECIFIED", + "DOTTED", + "DASHED", + "SOLID", + "SOLID_MEDIUM", + "SOLID_THICK", + "NONE", + "DOUBLE" + ], + "description": "The style of the border.", + "type": "string", + "enumDescriptions": [ + "The style is not specified. Do not use this.", + "The border is dotted.", + "The border is dashed.", + "The border is a thin solid line.", + "The border is a medium solid line.", + "The border is a thick solid line.", + "No border.\nUsed only when updating a border in order to erase it.", + "The border is two solid lines." + ] + }, + "color": { + "description": "The color of the border.", + "$ref": "Color" } }, - "id": "SetDataValidationRequest", - "description": "Sets a data validation rule to every cell in the range.\nTo clear validation in a range, call this with no rule specified.", - "type": "object" + "id": "Border" }, - "CellData": { + "UpdateNamedRangeRequest": { "properties": { - "userEnteredFormat": { - "$ref": "CellFormat", - "description": "The format the user entered for the cell.\n\nWhen writing, the new format will be merged with the existing format." + "namedRange": { + "$ref": "NamedRange", + "description": "The named range to update with the new properties." }, - "note": { - "description": "Any note on the cell.", + "fields": { + "description": "The fields that should be updated. At least one field must be specified.\nThe root `namedRange` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", "type": "string" + } + }, + "id": "UpdateNamedRangeRequest", + "description": "Updates properties of the named range with the specified\nnamedRangeId.", + "type": "object" + }, + "FindReplaceRequest": { + "description": "Finds and replaces data in cells over a range, sheet, or all sheets.", + "type": "object", + "properties": { + "allSheets": { + "description": "True to find/replace over all sheets.", + "type": "boolean" }, - "effectiveFormat": { - "description": "The effective format being used by the cell.\nThis includes the results of applying any conditional formatting and,\nif the cell contains a formula, the computed number format.\nIf the effective format is the default format, effective format will\nnot be written.\nThis field is read-only.", - "$ref": "CellFormat" + "matchCase": { + "description": "True if the search is case sensitive.", + "type": "boolean" }, - "dataValidation": { - "$ref": "DataValidationRule", - "description": "A data validation rule on the cell, if any.\n\nWhen writing, the new data validation rule will overwrite any prior rule." + "includeFormulas": { + "description": "True if the search should include cells with formulas.\nFalse to skip cells with formulas.", + "type": "boolean" }, - "userEnteredValue": { - "description": "The value the user entered in the cell. e.g, `1234`, `'Hello'`, or `=NOW()`\nNote: Dates, Times and DateTimes are represented as doubles in\nserial number format.", - "$ref": "ExtendedValue" + "matchEntireCell": { + "description": "True if the find value should match the entire cell.", + "type": "boolean" }, - "effectiveValue": { - "description": "The effective value of the cell. For cells with formulas, this will be\nthe calculated value. For cells with literals, this will be\nthe same as the user_entered_value.\nThis field is read-only.", - "$ref": "ExtendedValue" + "searchByRegex": { + "type": "boolean", + "description": "True if the find value is a regex.\nThe regular expression and replacement should follow Java regex rules\nat https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html.\nThe replacement string is allowed to refer to capturing groups.\nFor example, if one cell has the contents `\"Google Sheets\"` and another\nhas `\"Google Docs\"`, then searching for `\"o.* (.*)\"` with a replacement of\n`\"$1 Rocks\"` would change the contents of the cells to\n`\"GSheets Rocks\"` and `\"GDocs Rocks\"` respectively." }, - "formattedValue": { - "description": "The formatted value of the cell.\nThis is the value as it's shown to the user.\nThis field is read-only.", + "find": { + "description": "The value to search.", "type": "string" }, - "textFormatRuns": { - "description": "Runs of rich text applied to subsections of the cell. Runs are only valid\non user entered strings, not formulas, bools, or numbers.\nRuns start at specific indexes in the text and continue until the next\nrun. Properties of a run will continue unless explicitly changed\nin a subsequent run (and properties of the first run will continue\nthe properties of the cell unless explicitly changed).\n\nWhen writing, the new runs will overwrite any prior runs. When writing a\nnew user_entered_value, previous runs will be erased.", - "type": "array", - "items": { - "$ref": "TextFormatRun" - } - }, - "hyperlink": { - "description": "A hyperlink this cell points to, if any.\nThis field is read-only. (To set it, use a `=HYPERLINK` formula.)", + "replacement": { + "description": "The value to use as the replacement.", "type": "string" }, - "pivotTable": { - "$ref": "PivotTable", - "description": "A pivot table anchored at this cell. The size of pivot table itself\nis computed dynamically based on its data, grouping, filters, values,\netc. Only the top-left cell of the pivot table contains the pivot table\ndefinition. The other cells will contain the calculated values of the\nresults of the pivot in their effective_value fields." + "range": { + "description": "The range to find/replace over.", + "$ref": "GridRange" + }, + "sheetId": { + "description": "The sheet to find/replace over.", + "format": "int32", + "type": "integer" } }, - "id": "CellData", - "description": "Data about a specific cell.", + "id": "FindReplaceRequest" + }, + "AddSheetRequest": { + "properties": { + "properties": { + "$ref": "SheetProperties", + "description": "The properties the new sheet should have.\nAll properties are optional.\nThe sheetId field is optional; if one is not\nset, an id will be randomly generated. (It is an error to specify the ID\nof a sheet that already exists.)" + } + }, + "id": "AddSheetRequest", + "description": "Adds a new sheet.\nWhen a sheet is added at a given index,\nall subsequent sheets' indexes are incremented.\nTo add an object sheet, use AddChartRequest instead and specify\nEmbeddedObjectPosition.sheetId or\nEmbeddedObjectPosition.newSheet.", "type": "object" }, - "BatchUpdateSpreadsheetRequest": { + "UpdateCellsRequest": { + "description": "Updates all cells in a range with new data.", + "type": "object", "properties": { - "includeSpreadsheetInResponse": { - "description": "Determines if the update response should include the spreadsheet\nresource.", - "type": "boolean" + "range": { + "description": "The range to write data to.\n\nIf the data in rows does not cover the entire requested range,\nthe fields matching those set in fields will be cleared.", + "$ref": "GridRange" }, - "responseRanges": { - "description": "Limits the ranges included in the response spreadsheet.\nMeaningful only if include_spreadsheet_response is 'true'.", + "rows": { + "description": "The data to write.", "type": "array", "items": { - "type": "string" + "$ref": "RowData" } }, - "responseIncludeGridData": { - "description": "True if grid data should be returned. Meaningful only if\nif include_spreadsheet_response is 'true'.\nThis parameter is ignored if a field mask was set in the request.", - "type": "boolean" + "fields": { + "description": "The fields of CellData that should be updated.\nAt least one field must be specified.\nThe root is the CellData; 'row.values.' should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", + "type": "string" }, - "requests": { - "description": "A list of updates to apply to the spreadsheet.", - "type": "array", - "items": { - "$ref": "Request" - } + "start": { + "$ref": "GridCoordinate", + "description": "The coordinate to start writing data at.\nAny number of rows and columns (including a different number of\ncolumns per row) may be written." + } + }, + "id": "UpdateCellsRequest" + }, + "DeleteConditionalFormatRuleResponse": { + "description": "The result of deleting a conditional format rule.", + "type": "object", + "properties": { + "rule": { + "description": "The rule that was deleted.", + "$ref": "ConditionalFormatRule" + } + }, + "id": "DeleteConditionalFormatRuleResponse" + }, + "DeleteRangeRequest": { + "properties": { + "shiftDimension": { + "enumDescriptions": [ + "The default value, do not use.", + "Operates on the rows of a sheet.", + "Operates on the columns of a sheet." + ], + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ], + "description": "The dimension from which deleted cells will be replaced with.\nIf ROWS, existing cells will be shifted upward to\nreplace the deleted cells. If COLUMNS, existing cells\nwill be shifted left to replace the deleted cells.", + "type": "string" + }, + "range": { + "$ref": "GridRange", + "description": "The range of cells to delete." + } + }, + "id": "DeleteRangeRequest", + "description": "Deletes a range of cells, shifting other cells into the deleted area.", + "type": "object" + }, + "GridCoordinate": { + "description": "A coordinate in a sheet.\nAll indexes are zero-based.", + "type": "object", + "properties": { + "rowIndex": { + "description": "The row index of the coordinate.", + "format": "int32", + "type": "integer" + }, + "columnIndex": { + "description": "The column index of the coordinate.", + "format": "int32", + "type": "integer" + }, + "sheetId": { + "description": "The sheet this coordinate is on.", + "format": "int32", + "type": "integer" } }, - "id": "BatchUpdateSpreadsheetRequest", - "description": "The request for updating any aspect of a spreadsheet.", - "type": "object" + "id": "GridCoordinate" }, - "BasicChartAxis": { + "UpdateSheetPropertiesRequest": { + "description": "Updates properties of the sheet with the specified\nsheetId.", + "type": "object", "properties": { - "position": { - "enum": [ - "BASIC_CHART_AXIS_POSITION_UNSPECIFIED", - "BOTTOM_AXIS", - "LEFT_AXIS", - "RIGHT_AXIS" - ], - "description": "The position of this axis.", - "type": "string", - "enumDescriptions": [ - "Default value, do not use.", - "The axis rendered at the bottom of a chart.\nFor most charts, this is the standard major axis.\nFor bar charts, this is a minor axis.", - "The axis rendered at the left of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is the standard major axis.", - "The axis rendered at the right of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is an unusual major axis." - ] + "properties": { + "$ref": "SheetProperties", + "description": "The properties to update." }, - "title": { - "description": "The title of this axis. If set, this overrides any title inferred\nfrom headers of the data.", + "fields": { + "description": "The fields that should be updated. At least one field must be specified.\nThe root `properties` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", "type": "string" - }, - "format": { - "$ref": "TextFormat", - "description": "The format of the title.\nOnly valid if the axis is not associated with the domain." } }, - "id": "BasicChartAxis", - "description": "An axis of the chart.\nA chart may not have more than one axis per\naxis position.", - "type": "object" + "id": "UpdateSheetPropertiesRequest" }, - "Padding": { + "GridProperties": { + "description": "Properties of a grid.", + "type": "object", "properties": { - "bottom": { - "description": "The bottom padding of the cell.", + "rowCount": { + "description": "The number of rows in the grid.", "format": "int32", "type": "integer" }, - "top": { - "description": "The top padding of the cell.", + "frozenRowCount": { + "description": "The number of rows that are frozen in the grid.", "format": "int32", "type": "integer" }, - "left": { - "description": "The left padding of the cell.", + "hideGridlines": { + "description": "True if the grid isn't showing gridlines in the UI.", + "type": "boolean" + }, + "columnCount": { + "description": "The number of columns in the grid.", "format": "int32", "type": "integer" }, - "right": { - "description": "The right padding of the cell.", + "frozenColumnCount": { + "description": "The number of columns that are frozen in the grid.", "format": "int32", "type": "integer" } }, - "id": "Padding", - "description": "The amount of padding around the cell, in pixels.\nWhen updating padding, every field must be specified.", - "type": "object" + "id": "GridProperties" }, - "DeleteDimensionRequest": { + "UnmergeCellsRequest": { + "id": "UnmergeCellsRequest", + "description": "Unmerges cells in the given range.", + "type": "object", "properties": { "range": { - "$ref": "DimensionRange", - "description": "The dimensions to delete from the sheet." + "$ref": "GridRange", + "description": "The range within which all cells should be unmerged.\nIf the range spans multiple merges, all will be unmerged.\nThe range must not partially span any merge." } - }, - "id": "DeleteDimensionRequest", - "description": "Deletes the dimensions from the sheet.", - "type": "object" + } }, - "UpdateChartSpecRequest": { + "UpdateEmbeddedObjectPositionResponse": { + "id": "UpdateEmbeddedObjectPositionResponse", + "description": "The result of updating an embedded object's position.", + "type": "object", "properties": { - "chartId": { - "description": "The ID of the chart to update.", - "format": "int32", - "type": "integer" - }, - "spec": { - "description": "The specification to apply to the chart.", - "$ref": "ChartSpec" + "position": { + "$ref": "EmbeddedObjectPosition", + "description": "The new position of the embedded object." } - }, - "id": "UpdateChartSpecRequest", - "description": "Updates a chart's specifications.\n(This does not move or resize a chart. To move or resize a chart, use\n UpdateEmbeddedObjectPositionRequest.)", - "type": "object" + } }, - "DeleteFilterViewRequest": { + "SortSpec": { "properties": { - "filterId": { - "description": "The ID of the filter to delete.", + "dimensionIndex": { + "description": "The dimension the sort should be applied to.", "format": "int32", "type": "integer" + }, + "sortOrder": { + "enumDescriptions": [ + "Default value, do not use this.", + "Sort ascending.", + "Sort descending." + ], + "enum": [ + "SORT_ORDER_UNSPECIFIED", + "ASCENDING", + "DESCENDING" + ], + "description": "The order data should be sorted.", + "type": "string" } }, - "id": "DeleteFilterViewRequest", - "description": "Deletes a particular filter view.", + "id": "SortSpec", + "description": "A sort order associated with a specific column or row.", "type": "object" }, - "BatchUpdateValuesResponse": { - "description": "The response when updating a range of values in a spreadsheet.", - "type": "object", + "Sheet": { "properties": { - "totalUpdatedCells": { - "description": "The total number of cells updated.", - "format": "int32", - "type": "integer" + "basicFilter": { + "description": "The filter on this sheet, if any.", + "$ref": "BasicFilter" }, - "totalUpdatedColumns": { - "description": "The total number of columns where at least one cell in the column was\nupdated.", - "format": "int32", - "type": "integer" + "merges": { + "description": "The ranges that are merged together.", + "type": "array", + "items": { + "$ref": "GridRange" + } }, - "spreadsheetId": { - "description": "The spreadsheet the updates were applied to.", - "type": "string" + "data": { + "description": "Data in the grid, if this is a grid sheet.\nThe number of GridData objects returned is dependent on the number of\nranges requested on this sheet. For example, if this is representing\n`Sheet1`, and the spreadsheet was requested with ranges\n`Sheet1!A1:C10` and `Sheet1!D15:E20`, then the first GridData will have a\nstartRow/startColumn of `0`,\nwhile the second one will have `startRow 14` (zero-based row 15),\nand `startColumn 3` (zero-based column D).", + "type": "array", + "items": { + "$ref": "GridData" + } }, - "totalUpdatedRows": { - "description": "The total number of rows where at least one cell in the row was updated.", - "format": "int32", - "type": "integer" + "bandedRanges": { + "description": "The banded (i.e. alternating colors) ranges on this sheet.", + "type": "array", + "items": { + "$ref": "BandedRange" + } }, - "responses": { - "description": "One UpdateValuesResponse per requested range, in the same order as\nthe requests appeared.", + "charts": { + "description": "The specifications of every chart on this sheet.", "type": "array", "items": { - "$ref": "UpdateValuesResponse" + "$ref": "EmbeddedChart" } }, - "totalUpdatedSheets": { - "description": "The total number of sheets where at least one cell in the sheet was\nupdated.", - "format": "int32", - "type": "integer" - } - }, - "id": "BatchUpdateValuesResponse" - }, - "SortRangeRequest": { - "properties": { - "range": { - "$ref": "GridRange", - "description": "The range to sort." + "properties": { + "$ref": "SheetProperties", + "description": "The properties of the sheet." }, - "sortSpecs": { - "description": "The sort order per column. Later specifications are used when values\nare equal in the earlier specifications.", + "filterViews": { + "description": "The filter views in this sheet.", "type": "array", "items": { - "$ref": "SortSpec" + "$ref": "FilterView" + } + }, + "protectedRanges": { + "description": "The protected ranges in this sheet.", + "type": "array", + "items": { + "$ref": "ProtectedRange" + } + }, + "conditionalFormats": { + "description": "The conditional format rules in this sheet.", + "type": "array", + "items": { + "$ref": "ConditionalFormatRule" } } }, - "id": "SortRangeRequest", - "description": "Sorts data in rows based on a sort order per column.", + "id": "Sheet", + "description": "A sheet in a spreadsheet.", "type": "object" }, - "MergeCellsRequest": { - "description": "Merges all cells in the range.", + "BooleanRule": { + "description": "A rule that may or may not match, depending on the condition.", "type": "object", "properties": { - "mergeType": { - "enumDescriptions": [ - "Create a single merge from the range", - "Create a merge for each column in the range", - "Create a merge for each row in the range" - ], - "enum": [ - "MERGE_ALL", - "MERGE_COLUMNS", - "MERGE_ROWS" - ], - "description": "How the cells should be merged.", - "type": "string" + "format": { + "$ref": "CellFormat", + "description": "The format to apply.\nConditional formatting can only apply a subset of formatting:\nbold, italic,\nstrikethrough,\nforeground color &\nbackground color." }, - "range": { - "description": "The range of cells to merge.", - "$ref": "GridRange" + "condition": { + "$ref": "BooleanCondition", + "description": "The condition of the rule. If the condition evaluates to true,\nthe format will be applied." } }, - "id": "MergeCellsRequest" + "id": "BooleanRule" }, - "AddProtectedRangeRequest": { - "description": "Adds a new protected range.", + "FilterCriteria": { + "id": "FilterCriteria", + "description": "Criteria for showing/hiding rows in a filter or filter view.", "type": "object", "properties": { - "protectedRange": { - "$ref": "ProtectedRange", - "description": "The protected range to be added. The\nprotectedRangeId field is optional; if\none is not set, an id will be randomly generated. (It is an error to\nspecify the ID of a range that already exists.)" + "hiddenValues": { + "description": "Values that should be hidden.", + "type": "array", + "items": { + "type": "string" + } + }, + "condition": { + "$ref": "BooleanCondition", + "description": "A condition that must be true for values to be shown.\n(This does not override hiddenValues -- if a value is listed there,\n it will still be hidden.)" + } + } + }, + "PivotGroupValueMetadata": { + "properties": { + "value": { + "$ref": "ExtendedValue", + "description": "The calculated value the metadata corresponds to.\n(Note that formulaValue is not valid,\n because the values will be calculated.)" + }, + "collapsed": { + "description": "True if the data corresponding to the value is collapsed.", + "type": "boolean" } }, - "id": "AddProtectedRangeRequest" + "id": "PivotGroupValueMetadata", + "description": "Metadata about a value in a pivot grouping.", + "type": "object" }, - "BatchClearValuesRequest": { - "description": "The request for clearing more than one range of values in a spreadsheet.", + "Editors": { + "id": "Editors", + "description": "The editors of a protected range.", "type": "object", "properties": { - "ranges": { - "description": "The ranges to clear, in A1 notation.", + "users": { + "description": "The email addresses of users with edit access to the protected range.", + "type": "array", + "items": { + "type": "string" + } + }, + "groups": { + "description": "The email addresses of groups with edit access to the protected range.", "type": "array", "items": { "type": "string" } + }, + "domainUsersCanEdit": { + "description": "True if anyone in the document's domain has edit access to the protected\nrange. Domain protection is only supported on documents within a domain.", + "type": "boolean" } - }, - "id": "BatchClearValuesRequest" + } }, - "DuplicateFilterViewResponse": { + "UpdateConditionalFormatRuleRequest": { + "description": "Updates a conditional format rule at the given index,\nor moves a conditional format rule to another index.", + "type": "object", "properties": { - "filter": { - "$ref": "FilterView", - "description": "The newly created filter." + "index": { + "description": "The zero-based index of the rule that should be replaced or moved.", + "format": "int32", + "type": "integer" + }, + "sheetId": { + "description": "The sheet of the rule to move. Required if new_index is set,\nunused otherwise.", + "format": "int32", + "type": "integer" + }, + "newIndex": { + "description": "The zero-based new index the rule should end up at.", + "format": "int32", + "type": "integer" + }, + "rule": { + "$ref": "ConditionalFormatRule", + "description": "The rule that should replace the rule at the given index." } }, - "id": "DuplicateFilterViewResponse", - "description": "The result of a filter view being duplicated.", - "type": "object" + "id": "UpdateConditionalFormatRuleRequest" }, - "DuplicateSheetResponse": { - "description": "The result of duplicating a sheet.", + "BasicChartDomain": { "type": "object", "properties": { - "properties": { - "description": "The properties of the duplicate sheet.", - "$ref": "SheetProperties" + "domain": { + "description": "The data of the domain. For example, if charting stock prices over time,\nthis is the data representing the dates.", + "$ref": "ChartData" } }, - "id": "DuplicateSheetResponse" + "id": "BasicChartDomain", + "description": "The domain of a chart.\nFor example, if charting stock prices over time, this would be the date." }, - "ClearBasicFilterRequest": { - "description": "Clears the basic filter, if any exists on the sheet.", + "DataValidationRule": { + "description": "A data validation rule.", "type": "object", "properties": { - "sheetId": { - "description": "The sheet ID on which the basic filter should be cleared.", - "format": "int32", - "type": "integer" + "inputMessage": { + "type": "string", + "description": "A message to show the user when adding data to the cell." + }, + "condition": { + "description": "The condition that data in the cell must match.", + "$ref": "BooleanCondition" + }, + "showCustomUi": { + "description": "True if the UI should be customized based on the kind of condition.\nIf true, \"List\" conditions will show a dropdown.", + "type": "boolean" + }, + "strict": { + "description": "True if invalid data should be rejected.", + "type": "boolean" } }, - "id": "ClearBasicFilterRequest" + "id": "DataValidationRule" }, - "TextToColumnsRequest": { - "description": "Splits a column of text into multiple columns,\nbased on a delimiter in each cell.", + "PasteDataRequest": { + "description": "Inserts data into the spreadsheet starting at the specified coordinate.", "type": "object", "properties": { - "delimiter": { - "description": "The delimiter to use. Used only if delimiterType is\nCUSTOM.", + "type": { + "enumDescriptions": [ + "Paste values, formulas, formats, and merges.", + "Paste the values ONLY without formats, formulas, or merges.", + "Paste the format and data validation only.", + "Like PASTE_NORMAL but without borders.", + "Paste the formulas only.", + "Paste the data validation only.", + "Paste the conditional formatting rules only." + ], + "enum": [ + "PASTE_NORMAL", + "PASTE_VALUES", + "PASTE_FORMAT", + "PASTE_NO_BORDERS", + "PASTE_FORMULA", + "PASTE_DATA_VALIDATION", + "PASTE_CONDITIONAL_FORMATTING" + ], + "description": "How the data should be pasted.", "type": "string" }, - "source": { - "$ref": "GridRange", - "description": "The source data range. This must span exactly one column." + "html": { + "description": "True if the data is HTML.", + "type": "boolean" }, - "delimiterType": { - "enum": [ - "DELIMITER_TYPE_UNSPECIFIED", - "COMMA", - "SEMICOLON", - "PERIOD", - "SPACE", - "CUSTOM" - ], - "description": "The delimiter type to use.", - "type": "string", - "enumDescriptions": [ - "Default value. This value must not be used.", - "\",\"", - "\";\"", - "\".\"", - "\" \"", - "A custom value as defined in delimiter." - ] + "coordinate": { + "$ref": "GridCoordinate", + "description": "The coordinate at which the data should start being inserted." + }, + "data": { + "description": "The data to insert.", + "type": "string" + }, + "delimiter": { + "description": "The delimiter in the data.", + "type": "string" } }, - "id": "TextToColumnsRequest" + "id": "PasteDataRequest" }, - "DeleteBandingRequest": { + "AppendDimensionRequest": { + "type": "object", "properties": { - "bandedRangeId": { - "description": "The ID of the banded range to delete.", + "dimension": { + "description": "Whether rows or columns should be appended.", + "type": "string", + "enumDescriptions": [ + "The default value, do not use.", + "Operates on the rows of a sheet.", + "Operates on the columns of a sheet." + ], + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ] + }, + "length": { + "description": "The number of rows or columns to append.", + "format": "int32", + "type": "integer" + }, + "sheetId": { + "description": "The sheet to append rows or columns to.", "format": "int32", "type": "integer" } }, - "id": "DeleteBandingRequest", - "description": "Removes the banded range with the given ID from the spreadsheet.", - "type": "object" + "id": "AppendDimensionRequest", + "description": "Appends rows or columns to the end of a sheet." }, - "BatchUpdateSpreadsheetResponse": { - "description": "The reply for batch updating a spreadsheet.", + "AddNamedRangeRequest": { + "description": "Adds a named range to the spreadsheet.", "type": "object", "properties": { - "replies": { - "description": "The reply of the updates. This maps 1:1 with the updates, although\nreplies to some requests may be empty.", - "type": "array", - "items": { - "$ref": "Response" - } - }, - "updatedSpreadsheet": { - "description": "The spreadsheet after updates were applied. This is only set if\n[BatchUpdateSpreadsheetRequest.include_spreadsheet_in_response] is `true`.", - "$ref": "Spreadsheet" - }, - "spreadsheetId": { - "description": "The spreadsheet the updates were applied to.", - "type": "string" + "namedRange": { + "$ref": "NamedRange", + "description": "The named range to add. The namedRangeId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a range that already exists.)" } }, - "id": "BatchUpdateSpreadsheetResponse" + "id": "AddNamedRangeRequest" }, - "AppendValuesResponse": { + "UpdateEmbeddedObjectPositionRequest": { + "description": "Update an embedded object's position (such as a moving or resizing a\nchart or image).", + "type": "object", "properties": { - "updates": { - "description": "Information about the updates that were applied.", - "$ref": "UpdateValuesResponse" + "newPosition": { + "$ref": "EmbeddedObjectPosition", + "description": "An explicit position to move the embedded object to.\nIf newPosition.sheetId is set,\na new sheet with that ID will be created.\nIf newPosition.newSheet is set to true,\na new sheet will be created with an ID that will be chosen for you." }, - "tableRange": { - "description": "The range (in A1 notation) of the table that values are being appended to\n(before the values were appended).\nEmpty if no table was found.", + "fields": { + "description": "The fields of OverlayPosition\nthat should be updated when setting a new position. Used only if\nnewPosition.overlayPosition\nis set, in which case at least one field must\nbe specified. The root `newPosition.overlayPosition` is implied and\nshould not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", "type": "string" }, - "spreadsheetId": { - "description": "The spreadsheet the updates were applied to.", - "type": "string" - } - }, - "id": "AppendValuesResponse", - "description": "The response when updating a range of values in a spreadsheet.", - "type": "object" - }, - "AddFilterViewRequest": { - "description": "Adds a filter view.", - "type": "object", - "properties": { - "filter": { - "$ref": "FilterView", - "description": "The filter to add. The filterViewId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a filter that already exists.)" + "objectId": { + "description": "The ID of the object to moved.", + "format": "int32", + "type": "integer" } }, - "id": "AddFilterViewRequest" + "id": "UpdateEmbeddedObjectPositionRequest" }, - "PivotFilterCriteria": { - "description": "Criteria for showing/hiding rows in a pivot table.", + "PieChartSpec": { + "description": "A \u003ca href=\"/chart/interactive/docs/gallery/piechart\"\u003epie chart\u003c/a\u003e.", "type": "object", "properties": { - "visibleValues": { - "description": "Values that should be included. Values not listed here are excluded.", - "type": "array", - "items": { - "type": "string" - } + "series": { + "description": "The data that covers the one and only series of the pie chart.", + "$ref": "ChartData" + }, + "legendPosition": { + "enumDescriptions": [ + "Default value, do not use.", + "The legend is rendered on the bottom of the chart.", + "The legend is rendered on the left of the chart.", + "The legend is rendered on the right of the chart.", + "The legend is rendered on the top of the chart.", + "No legend is rendered.", + "Each pie slice has a label attached to it." + ], + "enum": [ + "PIE_CHART_LEGEND_POSITION_UNSPECIFIED", + "BOTTOM_LEGEND", + "LEFT_LEGEND", + "RIGHT_LEGEND", + "TOP_LEGEND", + "NO_LEGEND", + "LABELED_LEGEND" + ], + "description": "Where the legend of the pie chart should be drawn.", + "type": "string" + }, + "pieHole": { + "description": "The size of the hole in the pie chart.", + "format": "double", + "type": "number" + }, + "domain": { + "$ref": "ChartData", + "description": "The data that covers the domain of the pie chart." + }, + "threeDimensional": { + "description": "True if the pie is three dimensional.", + "type": "boolean" } }, - "id": "PivotFilterCriteria" + "id": "PieChartSpec" }, - "MoveDimensionRequest": { + "UpdateFilterViewRequest": { + "description": "Updates properties of the filter view.", + "type": "object", "properties": { - "destinationIndex": { - "description": "The zero-based start index of where to move the source data to,\nbased on the coordinates *before* the source data is removed\nfrom the grid. Existing data will be shifted down or right\n(depending on the dimension) to make room for the moved dimensions.\nThe source dimensions are removed from the grid, so the\nthe data may end up in a different index than specified.\n\nFor example, given `A1..A5` of `0, 1, 2, 3, 4` and wanting to move\n`\"1\"` and `\"2\"` to between `\"3\"` and `\"4\"`, the source would be\n`ROWS [1..3)`,and the destination index would be `\"4\"`\n(the zero-based index of row 5).\nThe end result would be `A1..A5` of `0, 3, 1, 2, 4`.", - "format": "int32", - "type": "integer" + "fields": { + "description": "The fields that should be updated. At least one field must be specified.\nThe root `filter` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", + "format": "google-fieldmask", + "type": "string" }, - "source": { - "description": "The source dimensions to move.", - "$ref": "DimensionRange" + "filter": { + "description": "The new properties of the filter view.", + "$ref": "FilterView" } }, - "id": "MoveDimensionRequest", - "description": "Moves one or more rows or columns.", - "type": "object" + "id": "UpdateFilterViewRequest" }, - "AddConditionalFormatRuleRequest": { - "description": "Adds a new conditional format rule at the given index.\nAll subsequent rules' indexes are incremented.", + "ConditionalFormatRule": { + "description": "A rule describing a conditional format.", "type": "object", "properties": { - "rule": { - "description": "The rule to add.", - "$ref": "ConditionalFormatRule" + "ranges": { + "type": "array", + "items": { + "$ref": "GridRange" + }, + "description": "The ranges that will be formatted if the condition is true.\nAll the ranges must be on the same grid." }, - "index": { - "description": "The zero-based index where the rule should be inserted.", - "format": "int32", - "type": "integer" + "gradientRule": { + "description": "The formatting will vary based on the gradients in the rule.", + "$ref": "GradientRule" + }, + "booleanRule": { + "description": "The formatting is either \"on\" or \"off\" according to the rule.", + "$ref": "BooleanRule" } }, - "id": "AddConditionalFormatRuleRequest" + "id": "ConditionalFormatRule" }, - "ChartSpec": { - "description": "The specifications of a chart.", + "CopyPasteRequest": { + "description": "Copies data from the source to the destination.", "type": "object", "properties": { - "basicChart": { - "description": "A basic chart specification, can be one of many kinds of charts.\nSee BasicChartType for the list of all\ncharts this supports.", - "$ref": "BasicChartSpec" + "destination": { + "$ref": "GridRange", + "description": "The location to paste to. If the range covers a span that's\na multiple of the source's height or width, then the\ndata will be repeated to fill in the destination range.\nIf the range is smaller than the source range, the entire\nsource data will still be copied (beyond the end of the destination range)." }, - "hiddenDimensionStrategy": { + "pasteOrientation": { "enum": [ - "CHART_HIDDEN_DIMENSION_STRATEGY_UNSPECIFIED", - "SKIP_HIDDEN_ROWS_AND_COLUMNS", - "SKIP_HIDDEN_ROWS", - "SKIP_HIDDEN_COLUMNS", - "SHOW_ALL" + "NORMAL", + "TRANSPOSE" ], - "description": "Determines how the charts will use hidden rows or columns.", + "description": "How that data should be oriented when pasting.", "type": "string", "enumDescriptions": [ - "Default value, do not use.", - "Charts will skip hidden rows and columns.", - "Charts will skip hidden rows only.", - "Charts will skip hidden columns only.", - "Charts will not skip any hidden rows or columns." + "Paste normally.", + "Paste transposed, where all rows become columns and vice versa." ] }, - "title": { - "description": "The title of the chart.", - "type": "string" + "source": { + "$ref": "GridRange", + "description": "The source range to copy." }, - "pieChart": { - "$ref": "PieChartSpec", - "description": "A pie chart specification." - } - }, - "id": "ChartSpec" - }, - "NumberFormat": { - "properties": { - "type": { + "pasteType": { + "type": "string", "enumDescriptions": [ - "The number format is not specified\nand is based on the contents of the cell.\nDo not explicitly use this.", - "Text formatting, e.g `1000.12`", - "Number formatting, e.g, `1,000.12`", - "Percent formatting, e.g `10.12%`", - "Currency formatting, e.g `$1,000.12`", - "Date formatting, e.g `9/26/2008`", - "Time formatting, e.g `3:59:00 PM`", - "Date+Time formatting, e.g `9/26/08 15:59:00`", - "Scientific number formatting, e.g `1.01E+03`" + "Paste values, formulas, formats, and merges.", + "Paste the values ONLY without formats, formulas, or merges.", + "Paste the format and data validation only.", + "Like PASTE_NORMAL but without borders.", + "Paste the formulas only.", + "Paste the data validation only.", + "Paste the conditional formatting rules only." ], "enum": [ - "NUMBER_FORMAT_TYPE_UNSPECIFIED", - "TEXT", - "NUMBER", - "PERCENT", - "CURRENCY", - "DATE", - "TIME", - "DATE_TIME", - "SCIENTIFIC" + "PASTE_NORMAL", + "PASTE_VALUES", + "PASTE_FORMAT", + "PASTE_NO_BORDERS", + "PASTE_FORMULA", + "PASTE_DATA_VALIDATION", + "PASTE_CONDITIONAL_FORMATTING" ], - "description": "The type of the number format.\nWhen writing, this field must be set.", - "type": "string" - }, - "pattern": { - "description": "Pattern string used for formatting. If not set, a default pattern based on\nthe user's locale will be used if necessary for the given type.\nSee the [Date and Number Formats guide](/sheets/guides/formats) for more\ninformation about the supported patterns.", - "type": "string" + "description": "What kind of data to paste." } }, - "id": "NumberFormat", - "description": "The number format of a cell.", - "type": "object" + "id": "CopyPasteRequest" }, - "SheetProperties": { + "Request": { + "description": "A single kind of update to apply to a spreadsheet.", + "type": "object", "properties": { - "title": { - "description": "The name of the sheet.", - "type": "string" + "updateDimensionProperties": { + "description": "Updates dimensions' properties.", + "$ref": "UpdateDimensionPropertiesRequest" }, - "tabColor": { - "description": "The color of the tab in the UI.", - "$ref": "Color" + "pasteData": { + "description": "Pastes data (HTML or delimited) into a sheet.", + "$ref": "PasteDataRequest" }, - "index": { - "description": "The index of the sheet within the spreadsheet.\nWhen adding or updating sheet properties, if this field\nis excluded then the sheet will be added or moved to the end\nof the sheet list. When updating sheet indices or inserting\nsheets, movement is considered in \"before the move\" indexes.\nFor example, if there were 3 sheets (S1, S2, S3) in order to\nmove S1 ahead of S2 the index would have to be set to 2. A sheet\nindex update request will be ignored if the requested index is\nidentical to the sheets current index or if the requested new\nindex is equal to the current sheet index + 1.", - "format": "int32", - "type": "integer" + "setBasicFilter": { + "$ref": "SetBasicFilterRequest", + "description": "Sets the basic filter on a sheet." }, - "sheetId": { - "description": "The ID of the sheet. Must be non-negative.\nThis field cannot be changed once set.", - "format": "int32", - "type": "integer" + "addConditionalFormatRule": { + "description": "Adds a new conditional format rule.", + "$ref": "AddConditionalFormatRuleRequest" }, - "rightToLeft": { - "description": "True if the sheet is an RTL sheet instead of an LTR sheet.", - "type": "boolean" + "addNamedRange": { + "$ref": "AddNamedRangeRequest", + "description": "Adds a named range." }, - "hidden": { - "description": "True if the sheet is hidden in the UI, false if it's visible.", - "type": "boolean" + "updateCells": { + "description": "Updates many cells at once.", + "$ref": "UpdateCellsRequest" }, - "gridProperties": { - "description": "Additional properties of the sheet if this sheet is a grid.\n(If the sheet is an object sheet, containing a chart or image, then\nthis field will be absent.)\nWhen writing it is an error to set any grid properties on non-grid sheets.", - "$ref": "GridProperties" + "updateSpreadsheetProperties": { + "description": "Updates the spreadsheet's properties.", + "$ref": "UpdateSpreadsheetPropertiesRequest" }, - "sheetType": { - "enum": [ - "SHEET_TYPE_UNSPECIFIED", - "GRID", - "OBJECT" - ], - "description": "The type of sheet. Defaults to GRID.\nThis field cannot be changed once set.", - "type": "string", - "enumDescriptions": [ - "Default value, do not use.", - "The sheet is a grid.", - "The sheet has no grid and instead has an object like a chart or image." - ] - } - }, - "id": "SheetProperties", - "description": "Properties of a sheet.", - "type": "object" - }, - "UpdateDimensionPropertiesRequest": { - "description": "Updates properties of dimensions within the specified range.", - "type": "object", - "properties": { - "properties": { - "description": "Properties to update.", - "$ref": "DimensionProperties" + "deleteEmbeddedObject": { + "$ref": "DeleteEmbeddedObjectRequest", + "description": "Deletes an embedded object (e.g, chart, image) in a sheet." }, - "range": { - "$ref": "DimensionRange", - "description": "The rows or columns to update." + "updateFilterView": { + "description": "Updates the properties of a filter view.", + "$ref": "UpdateFilterViewRequest" }, - "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root `properties` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", - "type": "string" - } - }, - "id": "UpdateDimensionPropertiesRequest" - }, - "SourceAndDestination": { - "properties": { - "dimension": { - "enumDescriptions": [ - "The default value, do not use.", - "Operates on the rows of a sheet.", - "Operates on the columns of a sheet." - ], - "enum": [ - "DIMENSION_UNSPECIFIED", - "ROWS", - "COLUMNS" - ], - "description": "The dimension that data should be filled into.", - "type": "string" + "addBanding": { + "$ref": "AddBandingRequest", + "description": "Adds a new banded range" + }, + "autoResizeDimensions": { + "$ref": "AutoResizeDimensionsRequest", + "description": "Automatically resizes one or more dimensions based on the contents\nof the cells in that dimension." + }, + "appendCells": { + "$ref": "AppendCellsRequest", + "description": "Appends cells after the last row with data in a sheet." + }, + "cutPaste": { + "description": "Cuts data from one area and pastes it to another.", + "$ref": "CutPasteRequest" + }, + "mergeCells": { + "description": "Merges cells together.", + "$ref": "MergeCellsRequest" + }, + "updateNamedRange": { + "$ref": "UpdateNamedRangeRequest", + "description": "Updates a named range." + }, + "updateSheetProperties": { + "$ref": "UpdateSheetPropertiesRequest", + "description": "Updates a sheet's properties." + }, + "autoFill": { + "$ref": "AutoFillRequest", + "description": "Automatically fills in more data based on existing data." + }, + "deleteDimension": { + "$ref": "DeleteDimensionRequest", + "description": "Deletes rows or columns in a sheet." + }, + "sortRange": { + "$ref": "SortRangeRequest", + "description": "Sorts data in a range." + }, + "deleteProtectedRange": { + "$ref": "DeleteProtectedRangeRequest", + "description": "Deletes a protected range." + }, + "duplicateFilterView": { + "$ref": "DuplicateFilterViewRequest", + "description": "Duplicates a filter view." }, - "fillLength": { - "description": "The number of rows or columns that data should be filled into.\nPositive numbers expand beyond the last row or last column\nof the source. Negative numbers expand before the first row\nor first column of the source.", - "format": "int32", - "type": "integer" + "addChart": { + "description": "Adds a chart.", + "$ref": "AddChartRequest" }, - "source": { - "description": "The location of the data to use as the source of the autofill.", - "$ref": "GridRange" - } - }, - "id": "SourceAndDestination", - "description": "A combination of a source range and how to extend that source.", - "type": "object" - }, - "FilterView": { - "description": "A filter view.", - "type": "object", - "properties": { - "title": { - "description": "The name of the filter view.", - "type": "string" + "findReplace": { + "description": "Finds and replaces occurrences of some text with other text.", + "$ref": "FindReplaceRequest" }, - "range": { - "description": "The range this filter view covers.\n\nWhen writing, only one of range or named_range_id\nmay be set.", - "$ref": "GridRange" + "updateChartSpec": { + "$ref": "UpdateChartSpecRequest", + "description": "Updates a chart's specifications." }, - "criteria": { - "additionalProperties": { - "$ref": "FilterCriteria" - }, - "description": "The criteria for showing/hiding values per column.\nThe map's key is the column index, and the value is the criteria for\nthat column.", - "type": "object" + "textToColumns": { + "$ref": "TextToColumnsRequest", + "description": "Converts a column of text into many columns of text." }, - "sortSpecs": { - "description": "The sort order per column. Later specifications are used when values\nare equal in the earlier specifications.", - "type": "array", - "items": { - "$ref": "SortSpec" - } + "addSheet": { + "description": "Adds a sheet.", + "$ref": "AddSheetRequest" }, - "namedRangeId": { - "description": "The named range this filter view is backed by, if any.\n\nWhen writing, only one of range or named_range_id\nmay be set.", - "type": "string" + "updateProtectedRange": { + "description": "Updates a protected range.", + "$ref": "UpdateProtectedRangeRequest" }, - "filterViewId": { - "description": "The ID of the filter view.", - "format": "int32", - "type": "integer" - } - }, - "id": "FilterView" - }, - "BandingProperties": { - "description": "Properties referring a single dimension (either row or column). If both\nBandedRange.row_properties and BandedRange.column_properties are\nset, the fill colors are applied to cells according to the following rules:\n\n* header_color and footer_color take priority over band colors.\n* first_band_color takes priority over second_band_color.\n* row_properties takes priority over column_properties.\n\nFor example, the first row color takes priority over the first column\ncolor, but the first column color takes priority over the second row color.\nSimilarly, the row header takes priority over the column header in the\ntop left cell, but the column header takes priority over the first row\ncolor if the row header is not set.", - "type": "object", - "properties": { - "secondBandColor": { - "description": "The second color that is alternating. (Required)", - "$ref": "Color" + "copyPaste": { + "$ref": "CopyPasteRequest", + "description": "Copies data from one area and pastes it to another." }, - "footerColor": { - "description": "The color of the last row or column. If this field is not set, the last\nrow or column will be filled with either first_band_color or\nsecond_band_color, depending on the color of the previous row or\ncolumn.", - "$ref": "Color" + "deleteFilterView": { + "$ref": "DeleteFilterViewRequest", + "description": "Deletes a filter view from a sheet." }, - "headerColor": { - "$ref": "Color", - "description": "The color of the first row or column. If this field is set, the first\nrow or column will be filled with this color and the colors will\nalternate between first_band_color and second_band_color starting\nfrom the second row or column. Otherwise, the first row or column will be\nfilled with first_band_color and the colors will proceed to alternate\nas they normally would." + "insertDimension": { + "$ref": "InsertDimensionRequest", + "description": "Inserts new rows or columns in a sheet." }, - "firstBandColor": { - "$ref": "Color", - "description": "The first color that is alternating. (Required)" - } - }, - "id": "BandingProperties" - }, - "BasicFilter": { - "description": "The default filter associated with a sheet.", - "type": "object", - "properties": { - "sortSpecs": { - "description": "The sort order per column. Later specifications are used when values\nare equal in the earlier specifications.", - "type": "array", - "items": { - "$ref": "SortSpec" - } + "deleteRange": { + "$ref": "DeleteRangeRequest", + "description": "Deletes a range of cells from a sheet, shifting the remaining cells." }, - "range": { - "description": "The range the filter covers.", - "$ref": "GridRange" + "deleteBanding": { + "$ref": "DeleteBandingRequest", + "description": "Removes a banded range" }, - "criteria": { - "additionalProperties": { - "$ref": "FilterCriteria" - }, - "description": "The criteria for showing/hiding values per column.\nThe map's key is the column index, and the value is the criteria for\nthat column.", - "type": "object" - } - }, - "id": "BasicFilter" - }, - "AddProtectedRangeResponse": { - "properties": { - "protectedRange": { - "$ref": "ProtectedRange", - "description": "The newly added protected range." - } - }, - "id": "AddProtectedRangeResponse", - "description": "The result of adding a new protected range.", - "type": "object" - }, - "UpdateValuesResponse": { - "description": "The response when updating a range of values in a spreadsheet.", - "type": "object", - "properties": { - "spreadsheetId": { - "description": "The spreadsheet the updates were applied to.", - "type": "string" + "addFilterView": { + "$ref": "AddFilterViewRequest", + "description": "Adds a filter view." }, - "updatedRange": { - "description": "The range (in A1 notation) that updates were applied to.", - "type": "string" + "updateBorders": { + "$ref": "UpdateBordersRequest", + "description": "Updates the borders in a range of cells." }, - "updatedCells": { - "description": "The number of cells updated.", - "format": "int32", - "type": "integer" + "setDataValidation": { + "$ref": "SetDataValidationRequest", + "description": "Sets data validation for one or more cells." }, - "updatedRows": { - "description": "The number of rows where at least one cell in the row was updated.", - "format": "int32", - "type": "integer" + "deleteConditionalFormatRule": { + "description": "Deletes an existing conditional format rule.", + "$ref": "DeleteConditionalFormatRuleRequest" }, - "updatedData": { - "$ref": "ValueRange", - "description": "The values of the cells after updates were applied.\nThis is only included if the request's `includeValuesInResponse` field\nwas `true`." + "repeatCell": { + "$ref": "RepeatCellRequest", + "description": "Repeats a single cell across a range." }, - "updatedColumns": { - "description": "The number of columns where at least one cell in the column was updated.", - "format": "int32", - "type": "integer" - } - }, - "id": "UpdateValuesResponse" - }, - "ErrorValue": { - "properties": { - "type": { - "enum": [ - "ERROR_TYPE_UNSPECIFIED", - "ERROR", - "NULL_VALUE", - "DIVIDE_BY_ZERO", - "VALUE", - "REF", - "NAME", - "NUM", - "N_A", - "LOADING" - ], - "description": "The type of error.", - "type": "string", - "enumDescriptions": [ - "The default error type, do not use this.", - "Corresponds to the `#ERROR!` error.", - "Corresponds to the `#NULL!` error.", - "Corresponds to the `#DIV/0` error.", - "Corresponds to the `#VALUE!` error.", - "Corresponds to the `#REF!` error.", - "Corresponds to the `#NAME?` error.", - "Corresponds to the `#NUM`! error.", - "Corresponds to the `#N/A` error.", - "Corresponds to the `Loading...` state." - ] + "clearBasicFilter": { + "$ref": "ClearBasicFilterRequest", + "description": "Clears the basic filter on a sheet." }, - "message": { - "description": "A message with more information about the error\n(in the spreadsheet's locale).", - "type": "string" - } - }, - "id": "ErrorValue", - "description": "An error in a cell.", - "type": "object" - }, - "PivotValue": { - "description": "The definition of how a value in a pivot table should be calculated.", - "type": "object", - "properties": { - "sourceColumnOffset": { - "description": "The column offset of the source range that this value reads from.\n\nFor example, if the source was `C10:E15`, a `sourceColumnOffset` of `0`\nmeans this value refers to column `C`, whereas the offset `1` would\nrefer to column `D`.", - "format": "int32", - "type": "integer" + "appendDimension": { + "$ref": "AppendDimensionRequest", + "description": "Appends dimensions to the end of a sheet." }, - "name": { - "description": "A name to use for the value. This is only used if formula was set.\nOtherwise, the column name is used.", - "type": "string" + "updateConditionalFormatRule": { + "description": "Updates an existing conditional format rule.", + "$ref": "UpdateConditionalFormatRuleRequest" }, - "formula": { - "description": "A custom formula to calculate the value. The formula must start\nwith an `=` character.", - "type": "string" + "insertRange": { + "$ref": "InsertRangeRequest", + "description": "Inserts new cells in a sheet, shifting the existing cells." }, - "summarizeFunction": { - "enumDescriptions": [ - "The default, do not use.", - "Corresponds to the `SUM` function.", - "Corresponds to the `COUNTA` function.", - "Corresponds to the `COUNT` function.", - "Corresponds to the `COUNTUNIQUE` function.", - "Corresponds to the `AVERAGE` function.", - "Corresponds to the `MAX` function.", - "Corresponds to the `MIN` function.", - "Corresponds to the `MEDIAN` function.", - "Corresponds to the `PRODUCT` function.", - "Corresponds to the `STDEV` function.", - "Corresponds to the `STDEVP` function.", - "Corresponds to the `VAR` function.", - "Corresponds to the `VARP` function.", - "Indicates the formula should be used as-is.\nOnly valid if PivotValue.formula was set." - ], - "enum": [ - "PIVOT_STANDARD_VALUE_FUNCTION_UNSPECIFIED", - "SUM", - "COUNTA", - "COUNT", - "COUNTUNIQUE", - "AVERAGE", - "MAX", - "MIN", - "MEDIAN", - "PRODUCT", - "STDEV", - "STDEVP", - "VAR", - "VARP", - "CUSTOM" - ], - "description": "A function to summarize the value.\nIf formula is set, the only supported values are\nSUM and\nCUSTOM.\nIf sourceColumnOffset is set, then `CUSTOM`\nis not supported.", - "type": "string" + "moveDimension": { + "$ref": "MoveDimensionRequest", + "description": "Moves rows or columns to another location in a sheet." + }, + "updateBanding": { + "description": "Updates a banded range", + "$ref": "UpdateBandingRequest" + }, + "deleteNamedRange": { + "description": "Deletes a named range.", + "$ref": "DeleteNamedRangeRequest" + }, + "addProtectedRange": { + "$ref": "AddProtectedRangeRequest", + "description": "Adds a protected range." + }, + "duplicateSheet": { + "description": "Duplicates a sheet.", + "$ref": "DuplicateSheetRequest" + }, + "deleteSheet": { + "description": "Deletes a sheet.", + "$ref": "DeleteSheetRequest" + }, + "unmergeCells": { + "$ref": "UnmergeCellsRequest", + "description": "Unmerges merged cells." + }, + "updateEmbeddedObjectPosition": { + "description": "Updates an embedded object's (e.g. chart, image) position.", + "$ref": "UpdateEmbeddedObjectPositionRequest" } }, - "id": "PivotValue" + "id": "Request" }, - "CopySheetToAnotherSpreadsheetRequest": { - "description": "The request to copy a sheet across spreadsheets.", + "BooleanCondition": { + "description": "A condition that can evaluate to true or false.\nBooleanConditions are used by conditional formatting,\ndata validation, and the criteria in filters.", "type": "object", "properties": { - "destinationSpreadsheetId": { - "description": "The ID of the spreadsheet to copy the sheet to.", - "type": "string" - } - }, - "id": "CopySheetToAnotherSpreadsheetRequest" - }, - "PivotGroupSortValueBucket": { - "properties": { - "buckets": { - "description": "Determines the bucket from which values are chosen to sort.\n\nFor example, in a pivot table with one row group & two column groups,\nthe row group can list up to two values. The first value corresponds\nto a value within the first column group, and the second value\ncorresponds to a value in the second column group. If no values\nare listed, this would indicate that the row should be sorted according\nto the \"Grand Total\" over the column groups. If a single value is listed,\nthis would correspond to using the \"Total\" of that bucket.", + "type": { + "enum": [ + "CONDITION_TYPE_UNSPECIFIED", + "NUMBER_GREATER", + "NUMBER_GREATER_THAN_EQ", + "NUMBER_LESS", + "NUMBER_LESS_THAN_EQ", + "NUMBER_EQ", + "NUMBER_NOT_EQ", + "NUMBER_BETWEEN", + "NUMBER_NOT_BETWEEN", + "TEXT_CONTAINS", + "TEXT_NOT_CONTAINS", + "TEXT_STARTS_WITH", + "TEXT_ENDS_WITH", + "TEXT_EQ", + "TEXT_IS_EMAIL", + "TEXT_IS_URL", + "DATE_EQ", + "DATE_BEFORE", + "DATE_AFTER", + "DATE_ON_OR_BEFORE", + "DATE_ON_OR_AFTER", + "DATE_BETWEEN", + "DATE_NOT_BETWEEN", + "DATE_IS_VALID", + "ONE_OF_RANGE", + "ONE_OF_LIST", + "BLANK", + "NOT_BLANK", + "CUSTOM_FORMULA" + ], + "description": "The type of condition.", + "type": "string", + "enumDescriptions": [ + "The default value, do not use.", + "The cell's value must be greater than the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be greater than or equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be less than the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be less than or equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be not equal to the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be between the two condition values.\nSupported by data validation, conditional formatting and filters.\nRequires exactly two ConditionValues.", + "The cell's value must not be between the two condition values.\nSupported by data validation, conditional formatting and filters.\nRequires exactly two ConditionValues.", + "The cell's value must contain the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must not contain the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must start with the condition's value.\nSupported by conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must end with the condition's value.\nSupported by conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be exactly the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be a valid email address.\nSupported by data validation.\nRequires no ConditionValues.", + "The cell's value must be a valid URL.\nSupported by data validation.\nRequires no ConditionValues.", + "The cell's value must be the same date as the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue.", + "The cell's value must be before the date of the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue\nthat may be a relative date.", + "The cell's value must be after the date of the condition's value.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue\nthat may be a relative date.", + "The cell's value must be on or before the date of the condition's value.\nSupported by data validation.\nRequires a single ConditionValue\nthat may be a relative date.", + "The cell's value must be on or after the date of the condition's value.\nSupported by data validation.\nRequires a single ConditionValue\nthat may be a relative date.", + "The cell's value must be between the dates of the two condition values.\nSupported by data validation.\nRequires exactly two ConditionValues.", + "The cell's value must be outside the dates of the two condition values.\nSupported by data validation.\nRequires exactly two ConditionValues.", + "The cell's value must be a date.\nSupported by data validation.\nRequires no ConditionValues.", + "The cell's value must be listed in the grid in condition value's range.\nSupported by data validation.\nRequires a single ConditionValue,\nand the value must be a valid range in A1 notation.", + "The cell's value must in the list of condition values.\nSupported by data validation.\nSupports any number of condition values,\none per item in the list.\nFormulas are not supported in the values.", + "The cell's value must be empty.\nSupported by conditional formatting and filters.\nRequires no ConditionValues.", + "The cell's value must not be empty.\nSupported by conditional formatting and filters.\nRequires no ConditionValues.", + "The condition's formula must evaluate to true.\nSupported by data validation, conditional formatting and filters.\nRequires a single ConditionValue." + ] + }, + "values": { + "description": "The values of the condition. The number of supported values depends\non the condition type. Some support zero values,\nothers one or two values,\nand ConditionType.ONE_OF_LIST supports an arbitrary number of values.", "type": "array", "items": { - "$ref": "ExtendedValue" + "$ref": "ConditionValue" } - }, - "valuesIndex": { - "description": "The offset in the PivotTable.values list which the values in this\ngrouping should be sorted by.", - "format": "int32", - "type": "integer" } }, - "id": "PivotGroupSortValueBucket", - "description": "Information about which values in a pivot group should be used for sorting.", - "type": "object" + "id": "BooleanCondition" }, - "EmbeddedObjectPosition": { + "GridRange": { + "description": "A range on a sheet.\nAll indexes are zero-based.\nIndexes are half open, e.g the start index is inclusive\nand the end index is exclusive -- [start_index, end_index).\nMissing indexes indicate the range is unbounded on that side.\n\nFor example, if `\"Sheet1\"` is sheet ID 0, then:\n\n `Sheet1!A1:A1 == sheet_id: 0,\n start_row_index: 0, end_row_index: 1,\n start_column_index: 0, end_column_index: 1`\n\n `Sheet1!A3:B4 == sheet_id: 0,\n start_row_index: 2, end_row_index: 4,\n start_column_index: 0, end_column_index: 2`\n\n `Sheet1!A:B == sheet_id: 0,\n start_column_index: 0, end_column_index: 2`\n\n `Sheet1!A5:B == sheet_id: 0,\n start_row_index: 4,\n start_column_index: 0, end_column_index: 2`\n\n `Sheet1 == sheet_id:0`\n\nThe start index must always be less than or equal to the end index.\nIf the start index equals the end index, then the range is empty.\nEmpty ranges are typically not meaningful and are usually rendered in the\nUI as `#REF!`.", + "type": "object", "properties": { - "newSheet": { - "description": "If true, the embedded object will be put on a new sheet whose ID\nis chosen for you. Used only when writing.", - "type": "boolean" + "endRowIndex": { + "type": "integer", + "description": "The end row (exclusive) of the range, or not set if unbounded.", + "format": "int32" }, - "sheetId": { - "description": "The sheet this is on. Set only if the embedded object\nis on its own sheet. Must be non-negative.", + "endColumnIndex": { + "description": "The end column (exclusive) of the range, or not set if unbounded.", "format": "int32", "type": "integer" }, - "overlayPosition": { - "$ref": "OverlayPosition", - "description": "The position at which the object is overlaid on top of a grid." - } - }, - "id": "EmbeddedObjectPosition", - "description": "The position of an embedded object such as a chart.", - "type": "object" - }, - "DeleteProtectedRangeRequest": { - "description": "Deletes the protected range with the given ID.", - "type": "object", - "properties": { - "protectedRangeId": { - "description": "The ID of the protected range to delete.", + "startRowIndex": { + "description": "The start row (inclusive) of the range, or not set if unbounded.", "format": "int32", "type": "integer" - } - }, - "id": "DeleteProtectedRangeRequest" - }, - "AutoFillRequest": { - "properties": { - "sourceAndDestination": { - "description": "The source and destination areas to autofill.\nThis explicitly lists the source of the autofill and where to\nextend that data.", - "$ref": "SourceAndDestination" }, - "range": { - "description": "The range to autofill. This will examine the range and detect\nthe location that has data and automatically fill that data\nin to the rest of the range.", - "$ref": "GridRange" + "startColumnIndex": { + "type": "integer", + "description": "The start column (inclusive) of the range, or not set if unbounded.", + "format": "int32" }, - "useAlternateSeries": { - "description": "True if we should generate data with the \"alternate\" series.\nThis differs based on the type and amount of source data.", - "type": "boolean" + "sheetId": { + "description": "The sheet this range is on.", + "format": "int32", + "type": "integer" } }, - "id": "AutoFillRequest", - "description": "Fills in more data based on existing data.", - "type": "object" + "id": "GridRange" }, - "GradientRule": { - "description": "A rule that applies a gradient color scale format, based on\nthe interpolation points listed. The format of a cell will vary\nbased on its contents as compared to the values of the interpolation\npoints.", + "BasicChartSpec": { + "description": "The specification for a basic chart. See BasicChartType for the list\nof charts this supports.", "type": "object", "properties": { - "midpoint": { - "$ref": "InterpolationPoint", - "description": "An optional midway interpolation point." - }, - "minpoint": { - "$ref": "InterpolationPoint", - "description": "The starting interpolation point." - }, - "maxpoint": { - "$ref": "InterpolationPoint", - "description": "The final interpolation point." - } - }, - "id": "GradientRule" - }, - "SetBasicFilterRequest": { - "properties": { - "filter": { - "$ref": "BasicFilter", - "description": "The filter to set." - } - }, - "id": "SetBasicFilterRequest", - "description": "Sets the basic filter associated with a sheet.", - "type": "object" - }, - "ClearValuesRequest": { - "properties": {}, - "id": "ClearValuesRequest", - "description": "The request for clearing a range of values in a spreadsheet.", - "type": "object" - }, - "InterpolationPoint": { - "properties": { - "color": { - "description": "The color this interpolation point should use.", - "$ref": "Color" - }, - "type": { + "chartType": { + "enumDescriptions": [ + "Default value, do not use.", + "A \u003ca href=\"/chart/interactive/docs/gallery/barchart\"\u003ebar chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/linechart\"\u003eline chart\u003c/a\u003e.", + "An \u003ca href=\"/chart/interactive/docs/gallery/areachart\"\u003earea chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/columnchart\"\u003ecolumn chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/scatterchart\"\u003escatter chart\u003c/a\u003e.", + "A \u003ca href=\"/chart/interactive/docs/gallery/combochart\"\u003ecombo chart\u003c/a\u003e." + ], "enum": [ - "INTERPOLATION_POINT_TYPE_UNSPECIFIED", - "MIN", - "MAX", - "NUMBER", - "PERCENT", - "PERCENTILE" + "BASIC_CHART_TYPE_UNSPECIFIED", + "BAR", + "LINE", + "AREA", + "COLUMN", + "SCATTER", + "COMBO" ], - "description": "How the value should be interpreted.", - "type": "string", - "enumDescriptions": [ - "The default value, do not use.", - "The interpolation point will use the minimum value in the\ncells over the range of the conditional format.", - "The interpolation point will use the maximum value in the\ncells over the range of the conditional format.", - "The interpolation point will use exactly the value in\nInterpolationPoint.value.", - "The interpolation point will be the given percentage over\nall the cells in the range of the conditional format.\nThis is equivalent to NUMBER if the value was:\n`=(MAX(FLATTEN(range)) * (value / 100))\n + (MIN(FLATTEN(range)) * (1 - (value / 100)))`\n(where errors in the range are ignored when flattening).", - "The interpolation point will be the given percentile\nover all the cells in the range of the conditional format.\nThis is equivalent to NUMBER if the value was:\n`=PERCENTILE(FLATTEN(range), value / 100)`\n(where errors in the range are ignored when flattening)." - ] + "description": "The type of the chart.", + "type": "string" }, - "value": { - "description": "The value this interpolation point uses. May be a formula.\nUnused if type is MIN or\nMAX.", + "series": { + "description": "The data this chart is visualizing.", + "type": "array", + "items": { + "$ref": "BasicChartSeries" + } + }, + "legendPosition": { + "enumDescriptions": [ + "Default value, do not use.", + "The legend is rendered on the bottom of the chart.", + "The legend is rendered on the left of the chart.", + "The legend is rendered on the right of the chart.", + "The legend is rendered on the top of the chart.", + "No legend is rendered." + ], + "enum": [ + "BASIC_CHART_LEGEND_POSITION_UNSPECIFIED", + "BOTTOM_LEGEND", + "LEFT_LEGEND", + "RIGHT_LEGEND", + "TOP_LEGEND", + "NO_LEGEND" + ], + "description": "The position of the chart legend.", "type": "string" + }, + "domains": { + "description": "The domain of data this is charting.\nOnly a single domain is currently supported.", + "type": "array", + "items": { + "$ref": "BasicChartDomain" + } + }, + "headerCount": { + "type": "integer", + "description": "The number of rows or columns in the data that are \"headers\".\nIf not set, Google Sheets will guess how many rows are headers based\non the data.\n\n(Note that BasicChartAxis.title may override the axis title\n inferred from the header values.)", + "format": "int32" + }, + "axis": { + "description": "The axis on the chart.", + "type": "array", + "items": { + "$ref": "BasicChartAxis" + } } }, - "id": "InterpolationPoint", - "description": "A single interpolation point on a gradient conditional format.\nThese pin the gradient color scale according to the color,\ntype and value chosen.", - "type": "object" + "id": "BasicChartSpec" }, - "DeleteEmbeddedObjectRequest": { + "SetDataValidationRequest": { + "description": "Sets a data validation rule to every cell in the range.\nTo clear validation in a range, call this with no rule specified.", + "type": "object", "properties": { - "objectId": { - "description": "The ID of the embedded object to delete.", - "format": "int32", - "type": "integer" + "rule": { + "description": "The data validation rule to set on each cell in the range,\nor empty to clear the data validation in the range.", + "$ref": "DataValidationRule" + }, + "range": { + "$ref": "GridRange", + "description": "The range the data validation rule should apply to." } }, - "id": "DeleteEmbeddedObjectRequest", - "description": "Deletes the embedded object with the given ID.", - "type": "object" + "id": "SetDataValidationRequest" }, - "FindReplaceResponse": { - "description": "The result of the find/replace.", + "CellData": { + "description": "Data about a specific cell.", "type": "object", "properties": { - "sheetsChanged": { - "description": "The number of sheets changed.", - "format": "int32", - "type": "integer" + "pivotTable": { + "$ref": "PivotTable", + "description": "A pivot table anchored at this cell. The size of pivot table itself\nis computed dynamically based on its data, grouping, filters, values,\netc. Only the top-left cell of the pivot table contains the pivot table\ndefinition. The other cells will contain the calculated values of the\nresults of the pivot in their effective_value fields." }, - "formulasChanged": { - "description": "The number of formula cells changed.", - "format": "int32", - "type": "integer" + "userEnteredFormat": { + "$ref": "CellFormat", + "description": "The format the user entered for the cell.\n\nWhen writing, the new format will be merged with the existing format." }, - "valuesChanged": { - "description": "The number of non-formula cells changed.", - "format": "int32", - "type": "integer" + "effectiveFormat": { + "description": "The effective format being used by the cell.\nThis includes the results of applying any conditional formatting and,\nif the cell contains a formula, the computed number format.\nIf the effective format is the default format, effective format will\nnot be written.\nThis field is read-only.", + "$ref": "CellFormat" }, - "occurrencesChanged": { - "description": "The number of occurrences (possibly multiple within a cell) changed.\nFor example, if replacing `\"e\"` with `\"o\"` in `\"Google Sheets\"`, this would\nbe `\"3\"` because `\"Google Sheets\"` -\u003e `\"Googlo Shoots\"`.", - "format": "int32", - "type": "integer" + "note": { + "description": "Any note on the cell.", + "type": "string" }, - "rowsChanged": { - "description": "The number of rows changed.", - "format": "int32", - "type": "integer" - } - }, - "id": "FindReplaceResponse" - }, - "DuplicateFilterViewRequest": { - "properties": { - "filterId": { - "description": "The ID of the filter being duplicated.", - "format": "int32", - "type": "integer" + "userEnteredValue": { + "description": "The value the user entered in the cell. e.g, `1234`, `'Hello'`, or `=NOW()`\nNote: Dates, Times and DateTimes are represented as doubles in\nserial number format.", + "$ref": "ExtendedValue" + }, + "dataValidation": { + "$ref": "DataValidationRule", + "description": "A data validation rule on the cell, if any.\n\nWhen writing, the new data validation rule will overwrite any prior rule." + }, + "effectiveValue": { + "description": "The effective value of the cell. For cells with formulas, this will be\nthe calculated value. For cells with literals, this will be\nthe same as the user_entered_value.\nThis field is read-only.", + "$ref": "ExtendedValue" + }, + "textFormatRuns": { + "description": "Runs of rich text applied to subsections of the cell. Runs are only valid\non user entered strings, not formulas, bools, or numbers.\nRuns start at specific indexes in the text and continue until the next\nrun. Properties of a run will continue unless explicitly changed\nin a subsequent run (and properties of the first run will continue\nthe properties of the cell unless explicitly changed).\n\nWhen writing, the new runs will overwrite any prior runs. When writing a\nnew user_entered_value, previous runs will be erased.", + "type": "array", + "items": { + "$ref": "TextFormatRun" + } + }, + "formattedValue": { + "type": "string", + "description": "The formatted value of the cell.\nThis is the value as it's shown to the user.\nThis field is read-only." + }, + "hyperlink": { + "description": "A hyperlink this cell points to, if any.\nThis field is read-only. (To set it, use a `=HYPERLINK` formula.)", + "type": "string" } }, - "id": "DuplicateFilterViewRequest", - "description": "Duplicates a particular filter view.", - "type": "object" + "id": "CellData" }, - "DeleteSheetRequest": { + "BatchUpdateSpreadsheetRequest": { + "description": "The request for updating any aspect of a spreadsheet.", + "type": "object", "properties": { - "sheetId": { - "description": "The ID of the sheet to delete.", - "format": "int32", - "type": "integer" + "requests": { + "description": "A list of updates to apply to the spreadsheet.", + "type": "array", + "items": { + "$ref": "Request" + } + }, + "includeSpreadsheetInResponse": { + "description": "Determines if the update response should include the spreadsheet\nresource.", + "type": "boolean" + }, + "responseRanges": { + "description": "Limits the ranges included in the response spreadsheet.\nMeaningful only if include_spreadsheet_response is 'true'.", + "type": "array", + "items": { + "type": "string" + } + }, + "responseIncludeGridData": { + "description": "True if grid data should be returned. Meaningful only if\nif include_spreadsheet_response is 'true'.\nThis parameter is ignored if a field mask was set in the request.", + "type": "boolean" } }, - "id": "DeleteSheetRequest", - "description": "Deletes the requested sheet.", - "type": "object" + "id": "BatchUpdateSpreadsheetRequest" }, - "UpdateConditionalFormatRuleResponse": { - "description": "The result of updating a conditional format rule.", + "BasicChartAxis": { + "id": "BasicChartAxis", + "description": "An axis of the chart.\nA chart may not have more than one axis per\naxis position.", "type": "object", "properties": { - "oldIndex": { - "description": "The old index of the rule. Not set if a rule was replaced\n(because it is the same as new_index).", - "format": "int32", - "type": "integer" - }, - "newRule": { - "description": "The new rule that replaced the old rule (if replacing),\nor the rule that was moved (if moved)", - "$ref": "ConditionalFormatRule" + "format": { + "$ref": "TextFormat", + "description": "The format of the title.\nOnly valid if the axis is not associated with the domain." }, - "oldRule": { - "$ref": "ConditionalFormatRule", - "description": "The old (deleted) rule. Not set if a rule was moved\n(because it is the same as new_rule)." + "position": { + "description": "The position of this axis.", + "type": "string", + "enumDescriptions": [ + "Default value, do not use.", + "The axis rendered at the bottom of a chart.\nFor most charts, this is the standard major axis.\nFor bar charts, this is a minor axis.", + "The axis rendered at the left of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is the standard major axis.", + "The axis rendered at the right of a chart.\nFor most charts, this is a minor axis.\nFor bar charts, this is an unusual major axis." + ], + "enum": [ + "BASIC_CHART_AXIS_POSITION_UNSPECIFIED", + "BOTTOM_AXIS", + "LEFT_AXIS", + "RIGHT_AXIS" + ] }, - "newIndex": { - "description": "The index of the new rule.", - "format": "int32", - "type": "integer" + "title": { + "description": "The title of this axis. If set, this overrides any title inferred\nfrom headers of the data.", + "type": "string" } - }, - "id": "UpdateConditionalFormatRuleResponse" + } }, - "DuplicateSheetRequest": { - "description": "Duplicates the contents of a sheet.", + "Padding": { + "description": "The amount of padding around the cell, in pixels.\nWhen updating padding, every field must be specified.", "type": "object", "properties": { - "newSheetName": { - "description": "The name of the new sheet. If empty, a new name is chosen for you.", - "type": "string" + "right": { + "description": "The right padding of the cell.", + "format": "int32", + "type": "integer" }, - "sourceSheetId": { - "description": "The sheet to duplicate.", + "bottom": { + "description": "The bottom padding of the cell.", "format": "int32", "type": "integer" }, - "newSheetId": { - "description": "If set, the ID of the new sheet. If not set, an ID is chosen.\nIf set, the ID must not conflict with any existing sheet ID.\nIf set, it must be non-negative.", + "top": { + "description": "The top padding of the cell.", "format": "int32", "type": "integer" }, - "insertSheetIndex": { - "description": "The zero-based index where the new sheet should be inserted.\nThe index of all sheets after this are incremented.", + "left": { + "description": "The left padding of the cell.", "format": "int32", "type": "integer" } }, - "id": "DuplicateSheetRequest" + "id": "Padding" }, - "ConditionValue": { - "description": "The value of the condition.", + "DeleteDimensionRequest": { + "description": "Deletes the dimensions from the sheet.", "type": "object", "properties": { - "relativeDate": { - "enum": [ - "RELATIVE_DATE_UNSPECIFIED", - "PAST_YEAR", - "PAST_MONTH", - "PAST_WEEK", - "YESTERDAY", - "TODAY", - "TOMORROW" - ], - "description": "A relative date (based on the current date).\nValid only if the type is\nDATE_BEFORE,\nDATE_AFTER,\nDATE_ON_OR_BEFORE or\nDATE_ON_OR_AFTER.\n\nRelative dates are not supported in data validation.\nThey are supported only in conditional formatting and\nconditional filters.", - "type": "string", - "enumDescriptions": [ - "Default value, do not use.", - "The value is one year before today.", - "The value is one month before today.", - "The value is one week before today.", - "The value is yesterday.", - "The value is today.", - "The value is tomorrow." - ] - }, - "userEnteredValue": { - "description": "A value the condition is based on.\nThe value will be parsed as if the user typed into a cell.\nFormulas are supported (and must begin with an `=`).", - "type": "string" + "range": { + "description": "The dimensions to delete from the sheet.", + "$ref": "DimensionRange" } }, - "id": "ConditionValue" + "id": "DeleteDimensionRequest" }, - "ExtendedValue": { - "description": "The kinds of value that a cell in a spreadsheet can have.", - "type": "object", - "properties": { - "numberValue": { - "description": "Represents a double value.\nNote: Dates, Times and DateTimes are represented as doubles in\n\"serial number\" format.", - "format": "double", - "type": "number" - }, - "errorValue": { - "$ref": "ErrorValue", - "description": "Represents an error.\nThis field is read-only." - }, - "stringValue": { - "description": "Represents a string value.\nLeading single quotes are not included. For example, if the user typed\n`'123` into the UI, this would be represented as a `stringValue` of\n`\"123\"`.", - "type": "string" - }, - "boolValue": { - "description": "Represents a boolean value.", - "type": "boolean" + "UpdateChartSpecRequest": { + "description": "Updates a chart's specifications.\n(This does not move or resize a chart. To move or resize a chart, use\n UpdateEmbeddedObjectPositionRequest.)", + "type": "object", + "properties": { + "chartId": { + "description": "The ID of the chart to update.", + "format": "int32", + "type": "integer" }, - "formulaValue": { - "description": "Represents a formula.", - "type": "string" + "spec": { + "description": "The specification to apply to the chart.", + "$ref": "ChartSpec" } }, - "id": "ExtendedValue" + "id": "UpdateChartSpecRequest" }, - "Spreadsheet": { - "description": "Resource that represents a spreadsheet.", + "DeleteFilterViewRequest": { + "description": "Deletes a particular filter view.", "type": "object", "properties": { - "properties": { - "$ref": "SpreadsheetProperties", - "description": "Overall properties of a spreadsheet." + "filterId": { + "description": "The ID of the filter to delete.", + "format": "int32", + "type": "integer" + } + }, + "id": "DeleteFilterViewRequest" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "canonicalName": "Sheets", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/drive": { + "description": "View and manage the files in your Google Drive" }, - "spreadsheetId": { - "description": "The ID of the spreadsheet.\nThis field is read-only.", - "type": "string" + "https://www.googleapis.com/auth/drive.readonly": { + "description": "View the files in your Google Drive" }, - "sheets": { - "description": "The sheets that are part of a spreadsheet.", - "type": "array", - "items": { - "$ref": "Sheet" - } + "https://www.googleapis.com/auth/spreadsheets.readonly": { + "description": "View your Google Spreadsheets" }, - "namedRanges": { - "description": "The named ranges defined in a spreadsheet.", - "type": "array", - "items": { - "$ref": "NamedRange" - } + "https://www.googleapis.com/auth/spreadsheets": { + "description": "View and manage your spreadsheets in Google Drive" + } + } + } + }, + "rootUrl": "https://sheets.googleapis.com/", + "ownerDomain": "google.com", + "name": "sheets", + "batchPath": "batch", + "title": "Google Sheets API", + "ownerName": "Google", + "resources": { + "spreadsheets": { + "methods": { + "get": { + "description": "Returns the spreadsheet at the given ID.\nThe caller must specify the spreadsheet ID.\n\nBy default, data within grids will not be returned.\nYou can include grid data one of two ways:\n\n* Specify a field mask listing your desired fields using the `fields` URL\nparameter in HTTP\n\n* Set the includeGridData\nURL parameter to true. If a field mask is set, the `includeGridData`\nparameter is ignored\n\nFor large spreadsheets, it is recommended to retrieve only the specific\nfields of the spreadsheet that you want.\n\nTo retrieve only subsets of the spreadsheet, use the\nranges URL parameter.\nMultiple ranges can be specified. Limiting the range will\nreturn only the portions of the spreadsheet that intersect the requested\nranges. Ranges are specified using A1 notation.", + "httpMethod": "GET", + "parameterOrder": [ + "spreadsheetId" + ], + "response": { + "$ref": "Spreadsheet" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/spreadsheets.readonly" + ], + "parameters": { + "ranges": { + "repeated": true, + "location": "query", + "description": "The ranges to retrieve from the spreadsheet.", + "type": "string" + }, + "includeGridData": { + "location": "query", + "description": "True if grid data should be returned.\nThis parameter is ignored if a field mask was set in the request.", + "type": "boolean" + }, + "spreadsheetId": { + "description": "The spreadsheet to request.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}", + "id": "sheets.spreadsheets.get", + "path": "v4/spreadsheets/{spreadsheetId}" }, - "spreadsheetUrl": { - "description": "The url of the spreadsheet.\nThis field is read-only.", - "type": "string" + "create": { + "request": { + "$ref": "Spreadsheet" + }, + "description": "Creates a spreadsheet, returning the newly created spreadsheet.", + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "Spreadsheet" + }, + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "flatPath": "v4/spreadsheets", + "id": "sheets.spreadsheets.create", + "path": "v4/spreadsheets" + }, + "batchUpdate": { + "response": { + "$ref": "BatchUpdateSpreadsheetResponse" + }, + "parameterOrder": [ + "spreadsheetId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "parameters": { + "spreadsheetId": { + "location": "path", + "description": "The spreadsheet to apply the updates to.", + "required": true, + "type": "string" + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}:batchUpdate", + "path": "v4/spreadsheets/{spreadsheetId}:batchUpdate", + "id": "sheets.spreadsheets.batchUpdate", + "description": "Applies one or more updates to the spreadsheet.\n\nEach request is validated before\nbeing applied. If any request is not valid then the entire request will\nfail and nothing will be applied.\n\nSome requests have replies to\ngive you some information about how\nthey are applied. The replies will mirror the requests. For example,\nif you applied 4 updates and the 3rd one had a reply, then the\nresponse will have 2 empty replies, the actual reply, and another empty\nreply, in that order.\n\nDue to the collaborative nature of spreadsheets, it is not guaranteed that\nthe spreadsheet will reflect exactly your changes after this completes,\nhowever it is guaranteed that the updates in the request will be\napplied together atomically. Your changes may be altered with respect to\ncollaborator changes. If there are no collaborators, the spreadsheet\nshould reflect your changes.", + "request": { + "$ref": "BatchUpdateSpreadsheetRequest" + } } }, - "id": "Spreadsheet" - }, - "BatchClearValuesResponse": { - "description": "The response when updating a range of values in a spreadsheet.", - "type": "object", - "properties": { - "clearedRanges": { - "description": "The ranges that were cleared, in A1 notation.\n(If the requests were for an unbounded range or a ranger larger\n than the bounds of the sheet, this will be the actual ranges\n that were cleared, bounded to the sheet's limits.)", - "type": "array", - "items": { - "type": "string" + "resources": { + "values": { + "methods": { + "clear": { + "description": "Clears values from a spreadsheet.\nThe caller must specify the spreadsheet ID and range.\nOnly values are cleared -- all other properties of the cell (such as\nformatting, data validation, etc..) are kept.", + "request": { + "$ref": "ClearValuesRequest" + }, + "response": { + "$ref": "ClearValuesResponse" + }, + "parameterOrder": [ + "spreadsheetId", + "range" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "parameters": { + "spreadsheetId": { + "description": "The ID of the spreadsheet to update.", + "required": true, + "type": "string", + "location": "path" + }, + "range": { + "description": "The A1 notation of the values to clear.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}:clear", + "path": "v4/spreadsheets/{spreadsheetId}/values/{range}:clear", + "id": "sheets.spreadsheets.values.clear" + }, + "batchGet": { + "description": "Returns one or more ranges of values from a spreadsheet.\nThe caller must specify the spreadsheet ID and one or more ranges.", + "response": { + "$ref": "BatchGetValuesResponse" + }, + "parameterOrder": [ + "spreadsheetId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/spreadsheets.readonly" + ], + "parameters": { + "ranges": { + "location": "query", + "description": "The A1 notation of the values to retrieve.", + "type": "string", + "repeated": true + }, + "majorDimension": { + "location": "query", + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ], + "description": "The major dimension that results should use.\n\nFor example, if the spreadsheet data is: `A1=1,B1=2,A2=3,B2=4`,\nthen requesting `range=A1:B2,majorDimension=ROWS` will return\n`[[1,2],[3,4]]`,\nwhereas requesting `range=A1:B2,majorDimension=COLUMNS` will return\n`[[1,3],[2,4]]`.", + "type": "string" + }, + "spreadsheetId": { + "location": "path", + "description": "The ID of the spreadsheet to retrieve data from.", + "required": true, + "type": "string" + }, + "valueRenderOption": { + "location": "query", + "enum": [ + "FORMATTED_VALUE", + "UNFORMATTED_VALUE", + "FORMULA" + ], + "description": "How values should be represented in the output.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", + "type": "string" + }, + "dateTimeRenderOption": { + "description": "How dates, times, and durations should be represented in the output.\nThis is ignored if value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", + "type": "string", + "location": "query", + "enum": [ + "SERIAL_NUMBER", + "FORMATTED_STRING" + ] + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}/values:batchGet", + "path": "v4/spreadsheets/{spreadsheetId}/values:batchGet", + "id": "sheets.spreadsheets.values.batchGet" + }, + "append": { + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "parameters": { + "insertDataOption": { + "location": "query", + "enum": [ + "OVERWRITE", + "INSERT_ROWS" + ], + "description": "How the input data should be inserted.", + "type": "string" + }, + "valueInputOption": { + "description": "How the input data should be interpreted.", + "type": "string", + "location": "query", + "enum": [ + "INPUT_VALUE_OPTION_UNSPECIFIED", + "RAW", + "USER_ENTERED" + ] + }, + "responseDateTimeRenderOption": { + "enum": [ + "SERIAL_NUMBER", + "FORMATTED_STRING" + ], + "description": "Determines how dates, times, and durations in the response should be\nrendered. This is ignored if response_value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", + "type": "string", + "location": "query" + }, + "range": { + "description": "The A1 notation of a range to search for a logical table of data.\nValues will be appended after the last row of the table.", + "required": true, + "type": "string", + "location": "path" + }, + "includeValuesInResponse": { + "location": "query", + "description": "Determines if the update response should include the values\nof the cells that were appended. By default, responses\ndo not include the updated values.", + "type": "boolean" + }, + "spreadsheetId": { + "location": "path", + "description": "The ID of the spreadsheet to update.", + "required": true, + "type": "string" + }, + "responseValueRenderOption": { + "location": "query", + "enum": [ + "FORMATTED_VALUE", + "UNFORMATTED_VALUE", + "FORMULA" + ], + "description": "Determines how values in the response should be rendered.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", + "type": "string" + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}:append", + "id": "sheets.spreadsheets.values.append", + "path": "v4/spreadsheets/{spreadsheetId}/values/{range}:append", + "description": "Appends values to a spreadsheet. The input range is used to search for\nexisting data and find a \"table\" within that range. Values will be\nappended to the next row of the table, starting with the first column of\nthe table. See the\n[guide](/sheets/guides/values#appending_values)\nand\n[sample code](/sheets/samples/writing#append_values)\nfor specific details of how tables are detected and data is appended.\n\nThe caller must specify the spreadsheet ID, range, and\na valueInputOption. The `valueInputOption` only\ncontrols how the input data will be added to the sheet (column-wise or\nrow-wise), it does not influence what cell the data starts being written\nto.", + "request": { + "$ref": "ValueRange" + }, + "httpMethod": "POST", + "parameterOrder": [ + "spreadsheetId", + "range" + ], + "response": { + "$ref": "AppendValuesResponse" + } + }, + "batchClear": { + "description": "Clears one or more ranges of values from a spreadsheet.\nThe caller must specify the spreadsheet ID and one or more ranges.\nOnly values are cleared -- all other properties of the cell (such as\nformatting, data validation, etc..) are kept.", + "request": { + "$ref": "BatchClearValuesRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "spreadsheetId" + ], + "response": { + "$ref": "BatchClearValuesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "parameters": { + "spreadsheetId": { + "required": true, + "type": "string", + "location": "path", + "description": "The ID of the spreadsheet to update." + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}/values:batchClear", + "id": "sheets.spreadsheets.values.batchClear", + "path": "v4/spreadsheets/{spreadsheetId}/values:batchClear" + }, + "get": { + "response": { + "$ref": "ValueRange" + }, + "parameterOrder": [ + "spreadsheetId", + "range" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/spreadsheets.readonly" + ], + "parameters": { + "spreadsheetId": { + "location": "path", + "description": "The ID of the spreadsheet to retrieve data from.", + "required": true, + "type": "string" + }, + "range": { + "location": "path", + "description": "The A1 notation of the values to retrieve.", + "required": true, + "type": "string" + }, + "valueRenderOption": { + "description": "How values should be represented in the output.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", + "type": "string", + "location": "query", + "enum": [ + "FORMATTED_VALUE", + "UNFORMATTED_VALUE", + "FORMULA" + ] + }, + "dateTimeRenderOption": { + "location": "query", + "enum": [ + "SERIAL_NUMBER", + "FORMATTED_STRING" + ], + "description": "How dates, times, and durations should be represented in the output.\nThis is ignored if value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", + "type": "string" + }, + "majorDimension": { + "description": "The major dimension that results should use.\n\nFor example, if the spreadsheet data is: `A1=1,B1=2,A2=3,B2=4`,\nthen requesting `range=A1:B2,majorDimension=ROWS` will return\n`[[1,2],[3,4]]`,\nwhereas requesting `range=A1:B2,majorDimension=COLUMNS` will return\n`[[1,3],[2,4]]`.", + "type": "string", + "location": "query", + "enum": [ + "DIMENSION_UNSPECIFIED", + "ROWS", + "COLUMNS" + ] + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}", + "path": "v4/spreadsheets/{spreadsheetId}/values/{range}", + "id": "sheets.spreadsheets.values.get", + "description": "Returns a range of values from a spreadsheet.\nThe caller must specify the spreadsheet ID and a range." + }, + "update": { + "path": "v4/spreadsheets/{spreadsheetId}/values/{range}", + "id": "sheets.spreadsheets.values.update", + "description": "Sets values in a range of a spreadsheet.\nThe caller must specify the spreadsheet ID, range, and\na valueInputOption.", + "request": { + "$ref": "ValueRange" + }, + "response": { + "$ref": "UpdateValuesResponse" + }, + "parameterOrder": [ + "spreadsheetId", + "range" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "parameters": { + "range": { + "location": "path", + "description": "The A1 notation of the values to update.", + "required": true, + "type": "string" + }, + "includeValuesInResponse": { + "location": "query", + "description": "Determines if the update response should include the values\nof the cells that were updated. By default, responses\ndo not include the updated values.\nIf the range to write was larger than than the range actually written,\nthe response will include all values in the requested range (excluding\ntrailing empty rows and columns).", + "type": "boolean" + }, + "spreadsheetId": { + "description": "The ID of the spreadsheet to update.", + "required": true, + "type": "string", + "location": "path" + }, + "responseValueRenderOption": { + "location": "query", + "enum": [ + "FORMATTED_VALUE", + "UNFORMATTED_VALUE", + "FORMULA" + ], + "description": "Determines how values in the response should be rendered.\nThe default render option is ValueRenderOption.FORMATTED_VALUE.", + "type": "string" + }, + "valueInputOption": { + "location": "query", + "enum": [ + "INPUT_VALUE_OPTION_UNSPECIFIED", + "RAW", + "USER_ENTERED" + ], + "description": "How the input data should be interpreted.", + "type": "string" + }, + "responseDateTimeRenderOption": { + "enum": [ + "SERIAL_NUMBER", + "FORMATTED_STRING" + ], + "description": "Determines how dates, times, and durations in the response should be\nrendered. This is ignored if response_value_render_option is\nFORMATTED_VALUE.\nThe default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].", + "type": "string", + "location": "query" + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}/values/{range}" + }, + "batchUpdate": { + "response": { + "$ref": "BatchUpdateValuesResponse" + }, + "parameterOrder": [ + "spreadsheetId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "parameters": { + "spreadsheetId": { + "location": "path", + "description": "The ID of the spreadsheet to update.", + "required": true, + "type": "string" + } + }, + "flatPath": "v4/spreadsheets/{spreadsheetId}/values:batchUpdate", + "path": "v4/spreadsheets/{spreadsheetId}/values:batchUpdate", + "id": "sheets.spreadsheets.values.batchUpdate", + "description": "Sets values in one or more ranges of a spreadsheet.\nThe caller must specify the spreadsheet ID,\na valueInputOption, and one or more\nValueRanges.", + "request": { + "$ref": "BatchUpdateValuesRequest" + } + } } }, - "spreadsheetId": { - "description": "The spreadsheet the updates were applied to.", - "type": "string" + "sheets": { + "methods": { + "copyTo": { + "httpMethod": "POST", + "parameterOrder": [ + "spreadsheetId", + "sheetId" + ], + "response": { + "$ref": "SheetProperties" + }, + "parameters": { + "sheetId": { + "location": "path", + "description": "The ID of the sheet to copy.", + "format": "int32", + "required": true, + "type": "integer" + }, + "spreadsheetId": { + "location": "path", + "description": "The ID of the spreadsheet containing the sheet to copy.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/spreadsheets" + ], + "flatPath": "v4/spreadsheets/{spreadsheetId}/sheets/{sheetId}:copyTo", + "id": "sheets.spreadsheets.sheets.copyTo", + "path": "v4/spreadsheets/{spreadsheetId}/sheets/{sheetId}:copyTo", + "request": { + "$ref": "CopySheetToAnotherSpreadsheetRequest" + }, + "description": "Copies a single sheet from a spreadsheet to another spreadsheet.\nReturns the properties of the newly created sheet." + } + } } - }, - "id": "BatchClearValuesResponse" + } + } + }, + "parameters": { + "quotaUser": { + "type": "string", + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." }, - "BandedRange": { - "properties": { - "bandedRangeId": { - "description": "The id of the banded range.", - "format": "int32", - "type": "integer" - }, - "rowProperties": { - "$ref": "BandingProperties", - "description": "Properties for row bands. These properties will be applied on a row-by-row\nbasis throughout all the rows in the range. At least one of\nrow_properties or column_properties must be specified." - }, - "columnProperties": { - "description": "Properties for column bands. These properties will be applied on a column-\nby-column basis throughout all the columns in the range. At least one of\nrow_properties or column_properties must be specified.", - "$ref": "BandingProperties" - }, - "range": { - "$ref": "GridRange", - "description": "The range over which these properties are applied." - } - }, - "id": "BandedRange", - "description": "A banded (alternating colors) range in a sheet.", - "type": "object" + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" }, - "AddChartRequest": { - "description": "Adds a chart to a sheet in the spreadsheet.", - "type": "object", - "properties": { - "chart": { - "description": "The chart that should be added to the spreadsheet, including the position\nwhere it should be placed. The chartId\nfield is optional; if one is not set, an id will be randomly generated. (It\nis an error to specify the ID of a chart that already exists.)", - "$ref": "EmbeddedChart" - } - }, - "id": "AddChartRequest" + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" }, - "UpdateProtectedRangeRequest": { - "description": "Updates an existing protected range with the specified\nprotectedRangeId.", - "type": "object", - "properties": { - "protectedRange": { - "$ref": "ProtectedRange", - "description": "The protected range to update with the new properties." - }, - "fields": { - "description": "The fields that should be updated. At least one field must be specified.\nThe root `protectedRange` is implied and should not be specified.\nA single `\"*\"` can be used as short-hand for listing every field.", - "format": "google-fieldmask", - "type": "string" - } - }, - "id": "UpdateProtectedRangeRequest" + "bearer_token": { + "type": "string", + "location": "query", + "description": "OAuth bearer token." }, - "TextFormat": { - "description": "The format of a run of text in a cell.\nAbsent values indicate that the field isn't specified.", - "type": "object", - "properties": { - "fontFamily": { - "description": "The font family.", - "type": "string" - }, - "italic": { - "description": "True if the text is italicized.", - "type": "boolean" - }, - "strikethrough": { - "description": "True if the text has a strikethrough.", - "type": "boolean" - }, - "fontSize": { - "description": "The size of the font.", - "format": "int32", - "type": "integer" - }, - "underline": { - "description": "True if the text is underlined.", - "type": "boolean" - }, - "bold": { - "description": "True if the text is bold.", - "type": "boolean" - }, - "foregroundColor": { - "$ref": "Color", - "description": "The foreground color of the text." - } - }, - "id": "TextFormat" + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" }, - "AddSheetResponse": { - "description": "The result of adding a sheet.", - "type": "object", - "properties": { - "properties": { - "description": "The properties of the newly added sheet.", - "$ref": "SheetProperties" - } - }, - "id": "AddSheetResponse" + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" }, - "AddFilterViewResponse": { - "properties": { - "filter": { - "$ref": "FilterView", - "description": "The newly added filter view." - } - }, - "id": "AddFilterViewResponse", - "description": "The result of adding a filter view.", - "type": "object" - } - }, - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "protocol": "rest", - "canonicalName": "Sheets", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/drive": { - "description": "View and manage the files in your Google Drive" - }, - "https://www.googleapis.com/auth/drive.readonly": { - "description": "View the files in your Google Drive" - }, - "https://www.googleapis.com/auth/spreadsheets.readonly": { - "description": "View your Google Spreadsheets" - }, - "https://www.googleapis.com/auth/spreadsheets": { - "description": "View and manage your spreadsheets in Google Drive" - } - } + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "$.xgafv": { + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ] + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" } }, - "rootUrl": "https://sheets.googleapis.com/", - "ownerDomain": "google.com", - "name": "sheets", - "batchPath": "batch", - "title": "Google Sheets API" + "version": "v4", + "baseUrl": "https://sheets.googleapis.com/", + "servicePath": "", + "description": "Reads and writes Google Sheets.", + "kind": "discovery#restDescription", + "basePath": "", + "revision": "20170221", + "documentationLink": "https://developers.google.com/sheets/", + "id": "sheets:v4", + "discoveryVersion": "v1", + "version_module": "True" } diff --git a/vendor/google.golang.org/api/sheets/v4/sheets-gen.go b/vendor/google.golang.org/api/sheets/v4/sheets-gen.go index d346f1f8f..5a3f027b6 100644 --- a/vendor/google.golang.org/api/sheets/v4/sheets-gen.go +++ b/vendor/google.golang.org/api/sheets/v4/sheets-gen.go @@ -70,9 +70,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Spreadsheets *SpreadsheetsService } @@ -84,6 +85,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewSpreadsheetsService(s *Service) *SpreadsheetsService { rs := &SpreadsheetsService{s: s} rs.Sheets = NewSpreadsheetsSheetsService(s) @@ -4002,6 +4007,59 @@ func (s *InterpolationPoint) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// IterativeCalculationSettings: Settings to control how circular +// dependencies are resolved with iterative +// calculation. +type IterativeCalculationSettings struct { + // ConvergenceThreshold: When iterative calculation is enabled, the + // threshold value such that + // calculation rounds stop when succesive results differ by less. + ConvergenceThreshold float64 `json:"convergenceThreshold,omitempty"` + + // MaxIterations: When iterative calculation is enabled, the maximum + // number of calculation + // rounds to perform during iterative calculation. + MaxIterations int64 `json:"maxIterations,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ConvergenceThreshold") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConvergenceThreshold") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *IterativeCalculationSettings) MarshalJSON() ([]byte, error) { + type noMethod IterativeCalculationSettings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *IterativeCalculationSettings) UnmarshalJSON(data []byte) error { + type noMethod IterativeCalculationSettings + var s1 struct { + ConvergenceThreshold gensupport.JSONFloat64 `json:"convergenceThreshold"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.ConvergenceThreshold = float64(s1.ConvergenceThreshold) + return nil +} + // MergeCellsRequest: Merges all cells in the range. type MergeCellsRequest struct { // MergeType: How the cells should be merged. @@ -5469,6 +5527,13 @@ type SpreadsheetProperties struct { // This field is read-only. DefaultFormat *CellFormat `json:"defaultFormat,omitempty"` + // IterativeCalculationSettings: Determines whether and how circular + // references are resolved with iterative + // calculation. Absence of this field means that circular references + // will + // result in calculation errors. + IterativeCalculationSettings *IterativeCalculationSettings `json:"iterativeCalculationSettings,omitempty"` + // Locale: The locale of the spreadsheet in one of the following // formats: // @@ -6434,6 +6499,7 @@ func (c *SpreadsheetsBatchUpdateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchupdatespreadsheetrequest) if err != nil { @@ -6568,6 +6634,7 @@ func (c *SpreadsheetsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.spreadsheet) if err != nil { @@ -6739,6 +6806,7 @@ func (c *SpreadsheetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6886,6 +6954,7 @@ func (c *SpreadsheetsSheetsCopyToCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.copysheettoanotherspreadsheetrequest) if err != nil { @@ -7116,6 +7185,7 @@ func (c *SpreadsheetsValuesAppendCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.valuerange) if err != nil { @@ -7308,6 +7378,7 @@ func (c *SpreadsheetsValuesBatchClearCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchclearvaluesrequest) if err != nil { @@ -7510,6 +7581,7 @@ func (c *SpreadsheetsValuesBatchGetCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7680,6 +7752,7 @@ func (c *SpreadsheetsValuesBatchUpdateCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchupdatevaluesrequest) if err != nil { @@ -7821,6 +7894,7 @@ func (c *SpreadsheetsValuesClearCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.clearvaluesrequest) if err != nil { @@ -8025,6 +8099,7 @@ func (c *SpreadsheetsValuesGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8256,6 +8331,7 @@ func (c *SpreadsheetsValuesUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.valuerange) if err != nil { diff --git a/vendor/google.golang.org/api/siteverification/v1/siteverification-gen.go b/vendor/google.golang.org/api/siteverification/v1/siteverification-gen.go index 0b3867ec2..902d7e334 100644 --- a/vendor/google.golang.org/api/siteverification/v1/siteverification-gen.go +++ b/vendor/google.golang.org/api/siteverification/v1/siteverification-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only WebResource *WebResourceService } @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewWebResourceService(s *Service) *WebResourceService { rs := &WebResourceService{s: s} return rs @@ -347,6 +352,7 @@ func (c *WebResourceDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "webResource/{id}") @@ -453,6 +459,7 @@ func (c *WebResourceGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -581,6 +588,7 @@ func (c *WebResourceGetTokenCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.siteverificationwebresourcegettokenrequest) if err != nil { @@ -702,6 +710,7 @@ func (c *WebResourceInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.siteverificationwebresourceresource) if err != nil { @@ -841,6 +850,7 @@ func (c *WebResourceListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -957,6 +967,7 @@ func (c *WebResourcePatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.siteverificationwebresourceresource) if err != nil { @@ -1091,6 +1102,7 @@ func (c *WebResourceUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.siteverificationwebresourceresource) if err != nil { diff --git a/vendor/google.golang.org/api/slides/v1/slides-api.json b/vendor/google.golang.org/api/slides/v1/slides-api.json index 42d567729..4f212e08e 100644 --- a/vendor/google.golang.org/api/slides/v1/slides-api.json +++ b/vendor/google.golang.org/api/slides/v1/slides-api.json @@ -1,1399 +1,928 @@ { + "version": "v1", + "baseUrl": "https://slides.googleapis.com/", + "description": "An API for creating and editing Google Slides presentations.", + "kind": "discovery#restDescription", + "servicePath": "", + "basePath": "", "id": "slides:v1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/drive": { - "description": "View and manage the files in your Google Drive" - }, - "https://www.googleapis.com/auth/spreadsheets.readonly": { - "description": "View your Google Spreadsheets" + "documentationLink": "https://developers.google.com/slides/", + "revision": "20170216", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "DeleteTableRowRequest": { + "description": "Deletes a row from a table.", + "type": "object", + "properties": { + "cellLocation": { + "$ref": "TableCellLocation", + "description": "The reference table cell location from which a row will be deleted.\n\nThe row this cell spans will be deleted. If this is a merged cell, multiple\nrows will be deleted. If no rows remain in the table after this deletion,\nthe whole table is deleted." }, - "https://www.googleapis.com/auth/presentations": { - "description": "View and manage your Google Slides presentations" + "tableObjectId": { + "description": "The table to delete rows from.", + "type": "string" + } + }, + "id": "DeleteTableRowRequest" + }, + "Bullet": { + "description": "Describes the bullet of a paragraph.", + "type": "object", + "properties": { + "glyph": { + "description": "The rendered bullet glyph for this paragraph.", + "type": "string" }, - "https://www.googleapis.com/auth/presentations.readonly": { - "description": "View your Google Slides presentations" + "nestingLevel": { + "description": "The nesting level of this paragraph in the list.", + "format": "int32", + "type": "integer" }, - "https://www.googleapis.com/auth/drive.readonly": { - "description": "View the files in your Google Drive" + "bulletStyle": { + "$ref": "TextStyle", + "description": "The paragraph specific text style applied to this bullet." }, - "https://www.googleapis.com/auth/spreadsheets": { - "description": "View and manage your spreadsheets in Google Drive" - } - } - } - }, - "description": "An API for creating and editing Google Slides presentations.", - "protocol": "rest", - "title": "Google Slides API", - "resources": { - "presentations": { - "resources": { - "pages": { - "methods": { - "get": { - "id": "slides.presentations.pages.get", - "response": { - "$ref": "Page" - }, - "parameterOrder": [ - "presentationId", - "pageObjectId" - ], - "description": "Gets the latest version of the specified page in the presentation.", - "flatPath": "v1/presentations/{presentationId}/pages/{pageObjectId}", - "httpMethod": "GET", - "parameters": { - "presentationId": { - "description": "The ID of the presentation to retrieve.", - "required": true, - "location": "path", - "type": "string" - }, - "pageObjectId": { - "description": "The object ID of the page to retrieve.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/presentations/{presentationId}/pages/{pageObjectId}", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/presentations", - "https://www.googleapis.com/auth/presentations.readonly" - ] - } - } + "listId": { + "description": "The ID of the list this paragraph belongs to.", + "type": "string" } }, - "methods": { - "get": { - "id": "slides.presentations.get", - "response": { - "$ref": "Presentation" - }, - "parameterOrder": [ - "presentationId" - ], - "description": "Gets the latest version of the specified presentation.", - "flatPath": "v1/presentations/{presentationsId}", - "httpMethod": "GET", - "parameters": { - "presentationId": { - "description": "The ID of the presentation to retrieve.", - "required": true, - "pattern": "^[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1/presentations/{+presentationId}", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/presentations", - "https://www.googleapis.com/auth/presentations.readonly" - ] - }, - "create": { - "id": "slides.presentations.create", - "response": { - "$ref": "Presentation" - }, - "parameterOrder": [], - "description": "Creates a new presentation using the title given in the request. Other\nfields in the request are ignored.\nReturns the created presentation.", - "request": { - "$ref": "Presentation" - }, - "flatPath": "v1/presentations", - "httpMethod": "POST", - "parameters": {}, - "path": "v1/presentations", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/presentations" - ] - }, - "batchUpdate": { - "id": "slides.presentations.batchUpdate", - "response": { - "$ref": "BatchUpdatePresentationResponse" - }, - "parameterOrder": [ - "presentationId" - ], - "description": "Applies one or more updates to the presentation.\n\nEach request is validated before\nbeing applied. If any request is not valid, then the entire request will\nfail and nothing will be applied.\n\nSome requests have replies to\ngive you some information about how they are applied. Other requests do\nnot need to return information; these each return an empty reply.\nThe order of replies matches that of the requests.\n\nFor example, suppose you call batchUpdate with four updates, and only the\nthird one returns information. The response would have two empty replies:\nthe reply to the third request, and another empty reply, in that order.\n\nBecause other users may be editing the presentation, the presentation\nmight not exactly reflect your changes: your changes may\nbe altered with respect to collaborator changes. If there are no\ncollaborators, the presentation should reflect your changes. In any case,\nthe updates in your request are guaranteed to be applied together\natomically.", - "request": { - "$ref": "BatchUpdatePresentationRequest" - }, - "flatPath": "v1/presentations/{presentationId}:batchUpdate", - "httpMethod": "POST", - "parameters": { - "presentationId": { - "description": "The presentation to apply the updates to.", - "required": true, - "location": "path", - "type": "string" - } - }, - "path": "v1/presentations/{presentationId}:batchUpdate", - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/presentations", - "https://www.googleapis.com/auth/spreadsheets", - "https://www.googleapis.com/auth/spreadsheets.readonly" - ] + "id": "Bullet" + }, + "OutlineFill": { + "description": "The fill of the outline.", + "type": "object", + "properties": { + "solidFill": { + "$ref": "SolidFill", + "description": "Solid color fill." } - } - } - }, - "schemas": { - "StretchedPictureFill": { - "description": "The stretched picture fill. The page or page element is filled entirely with\nthe specified picture. The picture is stretched to fit its container.", + }, + "id": "OutlineFill" + }, + "TableCellLocation": { + "description": "A location of a single table cell within a table.", "type": "object", "properties": { - "contentUrl": { - "description": "Reading the content_url:\n\nAn URL to a picture with a default lifetime of 30 minutes.\nThis URL is tagged with the account of the requester. Anyone with the URL\neffectively accesses the picture as the original requester. Access to the\npicture may be lost if the presentation's sharing settings change.\n\nWriting the content_url:\n\nThe picture is fetched once at insertion time and a copy is stored for\ndisplay inside the presentation. Pictures must be less than 50MB in size,\ncannot exceed 25 megapixels, and must be in either in PNG, JPEG, or GIF\nformat.", - "type": "string" + "rowIndex": { + "description": "The 0-based row index.", + "format": "int32", + "type": "integer" }, - "size": { - "description": "The original size of the picture fill. This field is read-only.", - "$ref": "Size" + "columnIndex": { + "description": "The 0-based column index.", + "format": "int32", + "type": "integer" } }, - "id": "StretchedPictureFill" + "id": "TableCellLocation" }, - "Image": { - "description": "A PageElement kind representing an\nimage.", + "CreateLineResponse": { + "description": "The result of creating a line.", "type": "object", "properties": { - "contentUrl": { - "description": "An URL to an image with a default lifetime of 30 minutes.\nThis URL is tagged with the account of the requester. Anyone with the URL\neffectively accesses the image as the original requester. Access to the\nimage may be lost if the presentation's sharing settings change.", + "objectId": { + "description": "The object ID of the created line.", "type": "string" - }, - "imageProperties": { - "description": "The properties of the image.", - "$ref": "ImageProperties" } }, - "id": "Image" + "id": "CreateLineResponse" }, - "VideoProperties": { - "description": "The properties of the Video.", + "ReplaceAllTextResponse": { + "description": "The result of replacing text.", "type": "object", "properties": { - "outline": { - "description": "The outline of the video. The default outline matches the defaults for new\nvideos created in the Slides editor.", - "$ref": "Outline" + "occurrencesChanged": { + "description": "The number of occurrences changed by replacing all text.", + "format": "int32", + "type": "integer" } }, - "id": "VideoProperties" + "id": "ReplaceAllTextResponse" }, - "CropProperties": { - "description": "The crop properties of an object enclosed in a container. For example, an\nImage.\n\nThe crop properties is represented by the offsets of four edges which define\na crop rectangle. The offsets are measured in percentage from the\ncorresponding edges of the object's original bounding rectangle towards\ninside, relative to the object's original dimensions.\n\n- If the offset is in the interval (0, 1), the corresponding edge of crop\nrectangle is positioned inside of the object's original bounding rectangle.\n- If the offset is negative or greater than 1, the corresponding edge of crop\nrectangle is positioned outside of the object's original bounding rectangle.\n- If the left edge of the crop rectangle is on the right side of its right\nedge, the object will be flipped horizontally.\n- If the top edge of the crop rectangle is below its bottom edge, the object\nwill be flipped vertically.\n- If all offsets and rotation angle is 0, the object is not cropped.\n\nAfter cropping, the content in the crop rectangle will be stretched to fit\nits container.", + "UpdateParagraphStyleRequest": { + "description": "Updates the styling for all of the paragraphs within a Shape or Table that\noverlap with the given text index range.", "type": "object", "properties": { - "rightOffset": { - "description": "The offset specifies the right edge of the crop rectangle that is located\nto the left of the original bounding rectangle right edge, relative to the\nobject's original width.", - "type": "number", - "format": "float" + "cellLocation": { + "description": "The location of the cell in the table containing the paragraph(s) to\nstyle. If object_id refers to a table, cell_location must have a value.\nOtherwise, it must not.", + "$ref": "TableCellLocation" }, - "angle": { - "description": "The rotation angle of the crop window around its center, in radians.\nRotation angle is applied after the offset.", - "type": "number", - "format": "float" + "style": { + "$ref": "ParagraphStyle", + "description": "The paragraph's style." }, - "leftOffset": { - "description": "The offset specifies the left edge of the crop rectangle that is located to\nthe right of the original bounding rectangle left edge, relative to the\nobject's original width.", - "type": "number", - "format": "float" + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `style` is implied and\nshould not be specified. A single `\"*\"` can be used as short-hand for\nlisting every field.\n\nFor example, to update the paragraph alignment, set `fields` to\n`\"alignment\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", + "type": "string" }, - "topOffset": { - "description": "The offset specifies the top edge of the crop rectangle that is located\nbelow the original bounding rectangle top edge, relative to the object's\noriginal height.", - "type": "number", - "format": "float" + "objectId": { + "description": "The object ID of the shape or table with the text to be styled.", + "type": "string" }, - "bottomOffset": { - "description": "The offset specifies the bottom edge of the crop rectangle that is located\nabove the original bounding rectangle bottom edge, relative to the object's\noriginal height.", - "type": "number", - "format": "float" + "textRange": { + "description": "The range of text containing the paragraph(s) to style.", + "$ref": "Range" } }, - "id": "CropProperties" + "id": "UpdateParagraphStyleRequest" }, - "TableRange": { - "description": "A table range represents a reference to a subset of a table.\n\nIt's important to note that the cells specified by a table range do not\nnecessarily form a rectangle. For example, let's say we have a 3 x 3 table\nwhere all the cells of the last row are merged together. The table looks\nlike this:\n\n \n [ ]\n\nA table range with location = (0, 0), row span = 3 and column span = 2\nspecifies the following cells:\n\n x x \n [ x ]", + "ColorScheme": { + "description": "The palette of predefined colors for a page.", "type": "object", "properties": { - "location": { - "description": "The starting location of the table range.", - "$ref": "TableCellLocation" - }, - "rowSpan": { - "description": "The row span of the table range.", - "type": "integer", - "format": "int32" - }, - "columnSpan": { - "description": "The column span of the table range.", - "type": "integer", - "format": "int32" - } - }, - "id": "TableRange" - }, - "UpdateTextStyleRequest": { - "description": "Update the styling of text in a Shape or\nTable.", - "type": "object", - "properties": { - "style": { - "description": "The style(s) to set on the text.\n\nIf the value for a particular style matches that of the parent, that style\nwill be set to inherit.\n\nCertain text style changes may cause other changes meant to mirror the\nbehavior of the Slides editor. See the documentation of\nTextStyle for more information.", - "$ref": "TextStyle" - }, - "objectId": { - "description": "The object ID of the shape or table with the text to be styled.", - "type": "string" - }, - "fields": { - "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `style` is implied and\nshould not be specified. A single `\"*\"` can be used as short-hand for\nlisting every field.\n\nFor example to update the text style to bold, set `fields` to `\"bold\"`.\n\nTo reset a property to its default value,\ninclude its field name in the field mask but leave the field itself unset.", - "type": "string", - "format": "google-fieldmask" - }, - "textRange": { - "description": "The range of text to style.\n\nThe range may be extended to include adjacent newlines.\n\nIf the range fully contains a paragraph belonging to a list, the\nparagraph's bullet is also updated with the matching text style.", - "$ref": "Range" - }, - "cellLocation": { - "description": "The optional table cell location if the text to be styled is in a table\ncell. If present, the object_id must refer to a table.", - "$ref": "TableCellLocation" - } - }, - "id": "UpdateTextStyleRequest" - }, - "InsertTextRequest": { - "description": "Inserts text into a shape or a table cell.", - "type": "object", - "properties": { - "text": { - "description": "The text to be inserted.\n\nInserting a newline character will implicitly create a new\nParagraphMarker at that index.\nThe paragraph style of the new paragraph will be copied from the paragraph\nat the current insertion index, including lists and bullets.\n\nText styles for inserted text will be determined automatically, generally\npreserving the styling of neighboring text. In most cases, the text will be\nadded to the TextRun that exists at the\ninsertion index.\n\nSome control characters (U+0000-U+0008, U+000C-U+001F) and characters\nfrom the Unicode Basic Multilingual Plane Private Use Area (U+E000-U+F8FF)\nwill be stripped out of the inserted text.", - "type": "string" - }, - "objectId": { - "description": "The object ID of the shape or table where the text will be inserted.", - "type": "string" - }, - "insertionIndex": { - "description": "The index where the text will be inserted, in Unicode code units, based\non TextElement indexes.\n\nThe index is zero-based and is computed from the start of the string.\nThe index may be adjusted to prevent insertions inside Unicode grapheme\nclusters. In these cases, the text will be inserted immediately after the\ngrapheme cluster.", - "type": "integer", - "format": "int32" - }, - "cellLocation": { - "description": "The optional table cell location if the text is to be inserted into a table\ncell. If present, the object_id must refer to a table.", - "$ref": "TableCellLocation" - } - }, - "id": "InsertTextRequest" - }, - "RgbColor": { - "description": "An RGB color.", - "type": "object", - "properties": { - "green": { - "description": "The green component of the color, from 0.0 to 1.0.", - "type": "number", - "format": "float" - }, - "blue": { - "description": "The blue component of the color, from 0.0 to 1.0.", - "type": "number", - "format": "float" - }, - "red": { - "description": "The red component of the color, from 0.0 to 1.0.", - "type": "number", - "format": "float" - } - }, - "id": "RgbColor" - }, - "PageElementProperties": { - "description": "Common properties for a page element.\n\nNote: When you initially create a\nPageElement, the API may modify\nthe values of both `size` and `transform`, but the\nvisual size will be unchanged.", - "type": "object", - "properties": { - "size": { - "description": "The size of the element.", - "$ref": "Size" - }, - "pageObjectId": { - "description": "The object ID of the page where the element is located.", - "type": "string" - }, - "transform": { - "description": "The transform for the element.", - "$ref": "AffineTransform" - } - }, - "id": "PageElementProperties" - }, - "DeleteTextRequest": { - "description": "Deletes text from a shape or a table cell.", - "type": "object", - "properties": { - "objectId": { - "description": "The object ID of the shape or table from which the text will be deleted.", - "type": "string" - }, - "textRange": { - "description": "The range of text to delete, based on TextElement indexes.\n\nThere is always an implicit newline character at the end of a shape's or\ntable cell's text that cannot be deleted. `Range.Type.ALL` will use the\ncorrect bounds, but care must be taken when specifying explicit bounds for\nrange types `FROM_START_INDEX` and `FIXED_RANGE`. For example, if the text\nis \"ABC\", followed by an implicit newline, then the maximum value is 2 for\n`text_range.start_index` and 3 for `text_range.end_index`.\n\nDeleting text that crosses a paragraph boundary may result in changes\nto paragraph styles and lists as the two paragraphs are merged.\n\nRanges that include only one code unit of a surrogate pair are expanded to\ninclude both code units.", - "$ref": "Range" - }, - "cellLocation": { - "description": "The optional table cell location if the text is to be deleted from a table\ncell. If present, the object_id must refer to a table.", - "$ref": "TableCellLocation" - } - }, - "id": "DeleteTextRequest" - }, - "ParagraphStyle": { - "description": "Styles that apply to a whole paragraph.\n\nIf this text is contained in a shape with a parent placeholder, then these paragraph styles may be\ninherited from the parent. Which paragraph styles are inherited depend on the\nnesting level of lists:\n\n* A paragraph not in a list will inherit its paragraph style from the\n paragraph at the 0 nesting level of the list inside the parent placeholder.\n* A paragraph in a list will inherit its paragraph style from the paragraph\n at its corresponding nesting level of the list inside the parent\n placeholder.\n\nInherited paragraph styles are represented as unset fields in this message.", - "type": "object", - "properties": { - "lineSpacing": { - "description": "The amount of space between lines, as a percentage of normal, where normal\nis represented as 100.0. If unset, the value is inherited from the parent.\nThis property is read-only.", - "type": "number", - "format": "float" - }, - "spacingMode": { - "description": "The spacing mode for the paragraph. This property is read-only.", - "enum": [ - "SPACING_MODE_UNSPECIFIED", - "NEVER_COLLAPSE", - "COLLAPSE_LISTS" - ], - "enumDescriptions": [ - "The spacing mode is inherited from the parent.", - "Paragraph spacing is always rendered.", - "Paragraph spacing is skipped between list elements." - ], - "type": "string" - }, - "alignment": { - "description": "The text alignment for this paragraph. This property is read-only.", - "enum": [ - "ALIGNMENT_UNSPECIFIED", - "START", - "CENTER", - "END", - "JUSTIFIED" - ], - "enumDescriptions": [ - "The paragraph alignment is inherited from the parent.", - "The paragraph is aligned to the start of the line. Left-aligned for\nLTR text, right-aligned otherwise.", - "The paragraph is centered.", - "The paragraph is aligned to the end of the line. Right-aligned for\nLTR text, left-aligned otherwise.", - "The paragraph is justified." - ], - "type": "string" - }, - "spaceAbove": { - "description": "The amount of extra space above the paragraph. If unset, the value is\ninherited from the parent. This property is read-only.", - "$ref": "Dimension" - }, - "direction": { - "description": "The text direction of this paragraph. This property is read-only.", - "enum": [ - "TEXT_DIRECTION_UNSPECIFIED", - "LEFT_TO_RIGHT", - "RIGHT_TO_LEFT" - ], - "enumDescriptions": [ - "The text direction is inherited from the parent.", - "The text goes from left to right.", - "The text goes from right to left." - ], - "type": "string" - }, - "indentEnd": { - "description": "The amount indentation for the paragraph on the side that corresponds to\nthe end of the text, based on the current text direction. If unset, the\nvalue is inherited from the parent. This property is read-only.", - "$ref": "Dimension" - }, - "indentFirstLine": { - "description": "The amount of indentation for the start of the first line of the paragraph.\nIf unset, the value is inherited from the parent. This property is\nread-only.", - "$ref": "Dimension" - }, - "indentStart": { - "description": "The amount indentation for the paragraph on the side that corresponds to\nthe start of the text, based on the current text direction. If unset, the\nvalue is inherited from the parent. This property is read-only.", - "$ref": "Dimension" - }, - "spaceBelow": { - "description": "The amount of extra space above the paragraph. If unset, the value is\ninherited from the parent. This property is read-only.", - "$ref": "Dimension" - } - }, - "id": "ParagraphStyle" - }, - "Page": { - "description": "A page in a presentation.", - "type": "object", - "properties": { - "slideProperties": { - "description": "Slide specific properties. Only set if page_type = SLIDE.", - "$ref": "SlideProperties" - }, - "pageType": { - "description": "The type of the page.", - "enum": [ - "SLIDE", - "MASTER", - "LAYOUT" - ], - "enumDescriptions": [ - "A slide page.", - "A master slide page.", - "A layout page." - ], - "type": "string" - }, - "pageElements": { - "description": "The page elements rendered on the page.", + "colors": { + "description": "The ThemeColorType and corresponding concrete color pairs.", "type": "array", "items": { - "$ref": "PageElement" + "$ref": "ThemeColorPair" } - }, - "pageProperties": { - "description": "The properties of the page.", - "$ref": "PageProperties" - }, - "layoutProperties": { - "description": "Layout specific properties. Only set if page_type = LAYOUT.", - "$ref": "LayoutProperties" - }, - "objectId": { - "description": "The object ID for this page. Object IDs used by\nPage and\nPageElement share the same namespace.", - "type": "string" } }, - "id": "Page" + "id": "ColorScheme" }, - "UpdateShapePropertiesRequest": { - "description": "Update the properties of a Shape.", + "Shape": { + "description": "A PageElement kind representing a\ngeneric shape that does not have a more specific classification.", "type": "object", "properties": { - "shapeProperties": { - "description": "The shape properties to update.", - "$ref": "ShapeProperties" - }, - "objectId": { - "description": "The object ID of the shape the updates are applied to.", - "type": "string" - }, - "fields": { - "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `shapeProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the shape background solid fill color, set `fields`\nto `\"shapeBackgroundFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "shapeType": { + "description": "The type of the shape.", "type": "string", - "format": "google-fieldmask" - } - }, - "id": "UpdateShapePropertiesRequest" - }, - "CreateLineResponse": { - "description": "The result of creating a line.", - "type": "object", - "properties": { - "objectId": { - "description": "The object ID of the created line.", - "type": "string" - } - }, - "id": "CreateLineResponse" - }, - "Presentation": { - "description": "A Google Slides presentation.", - "type": "object", - "properties": { - "title": { - "description": "The title of the presentation.", - "type": "string" - }, - "locale": { - "description": "The locale of the presentation, as an IETF BCP 47 language tag.", - "type": "string" - }, - "slides": { - "description": "The slides in the presentation.\nA slide inherits properties from a slide layout.", - "type": "array", - "items": { - "$ref": "Page" - } - }, - "masters": { - "description": "The slide masters in the presentation. A slide master contains all common\npage elements and the common properties for a set of layouts. They serve\nthree purposes:\n\n- Placeholder shapes on a master contain the default text styles and shape\n properties of all placeholder shapes on pages that use that master.\n- The master page properties define the common page properties inherited by\n its layouts.\n- Any other shapes on the master slide will appear on all slides using that\n master, regardless of their layout.", - "type": "array", - "items": { - "$ref": "Page" - } - }, - "pageSize": { - "description": "The size of pages in the presentation.", - "$ref": "Size" - }, - "presentationId": { - "description": "The ID of the presentation.", - "type": "string" - }, - "layouts": { - "description": "The layouts in the presentation. A layout is a template that determines\nhow content is arranged and styled on the slides that inherit from that\nlayout.", - "type": "array", - "items": { - "$ref": "Page" - } - } - }, - "id": "Presentation" - }, - "CreateImageRequest": { - "description": "Creates an image.", - "type": "object", - "properties": { - "url": { - "description": "The image URL.\n\nThe image is fetched once at insertion time and a copy is stored for\ndisplay inside the presentation. Images must be less than 50MB in size,\ncannot exceed 25 megapixels, and must be in either in PNG, JPEG, or GIF\nformat.", - "type": "string" - }, - "objectId": { - "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", - "type": "string" - }, - "elementProperties": { - "description": "The element properties for the image.\n\nWhen the aspect ratio of the provided size does not match the image aspect\nratio, the image is scaled and centered with respect to the size in order\nto maintain aspect ratio. The provided transform is applied after this\noperation.", - "$ref": "PageElementProperties" - } - }, - "id": "CreateImageRequest" - }, - "SlideProperties": { - "description": "The properties of Page that are only\nrelevant for pages with page_type SLIDE.", - "type": "object", - "properties": { - "layoutObjectId": { - "description": "The object ID of the layout that this slide is based on.", - "type": "string" - }, - "masterObjectId": { - "description": "The object ID of the master that this slide is based on.", - "type": "string" - } - }, - "id": "SlideProperties" - }, - "UpdatePageElementTransformRequest": { - "description": "Updates the transform of a page element.", - "type": "object", - "properties": { - "applyMode": { - "description": "The apply mode of the transform update.", - "enum": [ - "APPLY_MODE_UNSPECIFIED", - "RELATIVE", - "ABSOLUTE" - ], - "enumDescriptions": [ - "Unspecified mode.", - "Applies the new AffineTransform matrix to the existing one, and\nreplaces the existing one with the resulting concatenation.", - "Replaces the existing AffineTransform matrix with the new one." - ], - "type": "string" - }, - "objectId": { - "description": "The object ID of the page element to update.", - "type": "string" - }, - "transform": { - "description": "The input transform matrix used to update the page element.", - "$ref": "AffineTransform" - } - }, - "id": "UpdatePageElementTransformRequest" - }, - "List": { - "description": "A List describes the look and feel of bullets belonging to paragraphs\nassociated with a list. A paragraph that is part of a list has an implicit\nreference to that list's ID.", - "type": "object", - "properties": { - "nestingLevel": { - "description": "A map of nesting levels to the properties of bullets at the associated\nlevel. A list has at most nine levels of nesting, so the possible values\nfor the keys of this map are 0 through 8, inclusive.", - "additionalProperties": { - "$ref": "NestingLevel" - }, - "type": "object" - }, - "listId": { - "description": "The ID of the list.", - "type": "string" - } - }, - "id": "List" - }, - "CreateVideoResponse": { - "description": "The result of creating a video.", - "type": "object", - "properties": { - "objectId": { - "description": "The object ID of the created video.", - "type": "string" - } - }, - "id": "CreateVideoResponse" - }, - "InsertTableRowsRequest": { - "description": "Inserts rows into a table.", - "type": "object", - "properties": { - "tableObjectId": { - "description": "The table to insert rows into.", - "type": "string" - }, - "insertBelow": { - "description": "Whether to insert new rows below the reference cell location.\n\n- `True`: insert below the cell.\n- `False`: insert above the cell.", - "type": "boolean" - }, - "cellLocation": { - "description": "The reference table cell location from which rows will be inserted.\n\nA new row will be inserted above (or below) the row where the reference\ncell is. If the reference cell is a merged cell, a new row will be\ninserted above (or below) the merged cell.", - "$ref": "TableCellLocation" - }, - "number": { - "description": "The number of rows to be inserted. Maximum 20 per request.", - "type": "integer", - "format": "int32" - } - }, - "id": "InsertTableRowsRequest" - }, - "UpdateVideoPropertiesRequest": { - "description": "Update the properties of a Video.", - "type": "object", - "properties": { - "videoProperties": { - "description": "The video properties to update.", - "$ref": "VideoProperties" - }, - "objectId": { - "description": "The object ID of the video the updates are applied to.", - "type": "string" - }, - "fields": { - "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `videoProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the video outline color, set `fields` to\n`\"outline.outlineFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", - "type": "string", - "format": "google-fieldmask" - } - }, - "id": "UpdateVideoPropertiesRequest" - }, - "OpaqueColor": { - "description": "A themeable solid color value.", - "type": "object", - "properties": { - "rgbColor": { - "description": "An opaque RGB color.", - "$ref": "RgbColor" - }, - "themeColor": { - "description": "An opaque theme color.", - "enum": [ - "THEME_COLOR_TYPE_UNSPECIFIED", - "DARK1", - "LIGHT1", - "DARK2", - "LIGHT2", - "ACCENT1", - "ACCENT2", - "ACCENT3", - "ACCENT4", - "ACCENT5", - "ACCENT6", - "HYPERLINK", - "FOLLOWED_HYPERLINK", - "TEXT1", - "BACKGROUND1", - "TEXT2", - "BACKGROUND2" - ], - "enumDescriptions": [ - "Unspecified theme color. This value should not be used.", - "Represents the first dark color.", - "Represents the first light color.", - "Represents the second dark color.", - "Represents the second light color.", - "Represents the first accent color.", - "Represents the second accent color.", - "Represents the third accent color.", - "Represents the fourth accent color.", - "Represents the fifth accent color.", - "Represents the sixth accent color.", - "Represents the color to use for hyperlinks.", - "Represents the color to use for visited hyperlinks.", - "Represents the first text color.", - "Represents the first background color.", - "Represents the second text color.", - "Represents the second background color." - ], - "type": "string" - } - }, - "id": "OpaqueColor" - }, - "Response": { - "description": "A single response from an update.", - "type": "object", - "properties": { - "createTable": { - "description": "The result of creating a table.", - "$ref": "CreateTableResponse" - }, - "replaceAllText": { - "description": "The result of replacing text.", - "$ref": "ReplaceAllTextResponse" - }, - "createLine": { - "description": "The result of creating a line.", - "$ref": "CreateLineResponse" - }, - "createSheetsChart": { - "description": "The result of creating a Google Sheets chart.", - "$ref": "CreateSheetsChartResponse" - }, - "createSlide": { - "description": "The result of creating a slide.", - "$ref": "CreateSlideResponse" - }, - "createShape": { - "description": "The result of creating a shape.", - "$ref": "CreateShapeResponse" - }, - "replaceAllShapesWithImage": { - "description": "The result of replacing all shapes matching some criteria with an\nimage.", - "$ref": "ReplaceAllShapesWithImageResponse" - }, - "createVideo": { - "description": "The result of creating a video.", - "$ref": "CreateVideoResponse" - }, - "createImage": { - "description": "The result of creating an image.", - "$ref": "CreateImageResponse" - }, - "duplicateObject": { - "description": "The result of duplicating an object.", - "$ref": "DuplicateObjectResponse" - } - }, - "id": "Response" - }, - "LineProperties": { - "description": "The properties of the Line.\n\nWhen unset, these fields default to values that match the appearance of\nnew lines created in the Slides editor.", - "type": "object", - "properties": { - "weight": { - "description": "The thickness of the line.", - "$ref": "Dimension" - }, - "endArrow": { - "description": "The style of the arrow at the end of the line.", - "enum": [ - "ARROW_STYLE_UNSPECIFIED", - "NONE", - "STEALTH_ARROW", - "FILL_ARROW", - "FILL_CIRCLE", - "FILL_SQUARE", - "FILL_DIAMOND", - "OPEN_ARROW", - "OPEN_CIRCLE", - "OPEN_SQUARE", - "OPEN_DIAMOND" - ], - "enumDescriptions": [ - "An unspecified arrow style.", - "No arrow.", - "Arrow with notched back. Corresponds to ECMA-376 ST_LineEndType value\n'stealth'.", - "Filled arrow. Corresponds to ECMA-376 ST_LineEndType value 'triangle'.", - "Filled circle. Corresponds to ECMA-376 ST_LineEndType value 'oval'.", - "Filled square.", - "Filled diamond. Corresponds to ECMA-376 ST_LineEndType value 'diamond'.", - "Hollow arrow.", - "Hollow circle.", - "Hollow square.", - "Hollow diamond." - ], - "type": "string" - }, - "link": { - "description": "The hyperlink destination of the line. If unset, there is no link.", - "$ref": "Link" - }, - "lineFill": { - "description": "The fill of the line. The default line fill matches the defaults for new\nlines created in the Slides editor.", - "$ref": "LineFill" - }, - "dashStyle": { - "description": "The dash style of the line.", - "enum": [ - "DASH_STYLE_UNSPECIFIED", - "SOLID", - "DOT", - "DASH", - "DASH_DOT", - "LONG_DASH", - "LONG_DASH_DOT" - ], "enumDescriptions": [ - "Unspecified dash style.", - "Solid line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'solid'.\nThis is the default dash style.", - "Dotted line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dot'.", - "Dashed line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dash'.", - "Alternating dashes and dots. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'dashDot'.", - "Line with large dashes. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'lgDash'.", - "Alternating large dashes and dots. Corresponds to ECMA-376\nST_PresetLineDashVal value 'lgDashDot'." + "The shape type that is not predefined.", + "Text box shape.", + "Rectangle shape. Corresponds to ECMA-376 ST_ShapeType 'rect'.", + "Round corner rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'roundRect'", + "Ellipse shape. Corresponds to ECMA-376 ST_ShapeType 'ellipse'", + "Curved arc shape. Corresponds to ECMA-376 ST_ShapeType 'arc'", + "Bent arrow shape. Corresponds to ECMA-376 ST_ShapeType 'bentArrow'", + "Bent up arrow shape. Corresponds to ECMA-376 ST_ShapeType 'bentUpArrow'", + "Bevel shape. Corresponds to ECMA-376 ST_ShapeType 'bevel'", + "Block arc shape. Corresponds to ECMA-376 ST_ShapeType 'blockArc'", + "Brace pair shape. Corresponds to ECMA-376 ST_ShapeType 'bracePair'", + "Bracket pair shape. Corresponds to ECMA-376 ST_ShapeType 'bracketPair'", + "Can shape. Corresponds to ECMA-376 ST_ShapeType 'can'", + "Chevron shape. Corresponds to ECMA-376 ST_ShapeType 'chevron'", + "Chord shape. Corresponds to ECMA-376 ST_ShapeType 'chord'", + "Cloud shape. Corresponds to ECMA-376 ST_ShapeType 'cloud'", + "Corner shape. Corresponds to ECMA-376 ST_ShapeType 'corner'", + "Cube shape. Corresponds to ECMA-376 ST_ShapeType 'cube'", + "Curved down arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedDownArrow'", + "Curved left arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedLeftArrow'", + "Curved right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedRightArrow'", + "Curved up arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedUpArrow'", + "Decagon shape. Corresponds to ECMA-376 ST_ShapeType 'decagon'", + "Diagonal stripe shape. Corresponds to ECMA-376 ST_ShapeType 'diagStripe'", + "Diamond shape. Corresponds to ECMA-376 ST_ShapeType 'diamond'", + "Dodecagon shape. Corresponds to ECMA-376 ST_ShapeType 'dodecagon'", + "Donut shape. Corresponds to ECMA-376 ST_ShapeType 'donut'", + "Double wave shape. Corresponds to ECMA-376 ST_ShapeType 'doubleWave'", + "Down arrow shape. Corresponds to ECMA-376 ST_ShapeType 'downArrow'", + "Callout down arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'downArrowCallout'", + "Folded corner shape. Corresponds to ECMA-376 ST_ShapeType 'foldedCorner'", + "Frame shape. Corresponds to ECMA-376 ST_ShapeType 'frame'", + "Half frame shape. Corresponds to ECMA-376 ST_ShapeType 'halfFrame'", + "Heart shape. Corresponds to ECMA-376 ST_ShapeType 'heart'", + "Heptagon shape. Corresponds to ECMA-376 ST_ShapeType 'heptagon'", + "Hexagon shape. Corresponds to ECMA-376 ST_ShapeType 'hexagon'", + "Home plate shape. Corresponds to ECMA-376 ST_ShapeType 'homePlate'", + "Horizontal scroll shape. Corresponds to ECMA-376 ST_ShapeType\n'horizontalScroll'", + "Irregular seal 1 shape. Corresponds to ECMA-376 ST_ShapeType\n'irregularSeal1'", + "Irregular seal 2 shape. Corresponds to ECMA-376 ST_ShapeType\n'irregularSeal2'", + "Left arrow shape. Corresponds to ECMA-376 ST_ShapeType 'leftArrow'", + "Callout left arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftArrowCallout'", + "Left brace shape. Corresponds to ECMA-376 ST_ShapeType 'leftBrace'", + "Left bracket shape. Corresponds to ECMA-376 ST_ShapeType 'leftBracket'", + "Left right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftRightArrow'", + "Callout left right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftRightArrowCallout'", + "Left right up arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftRightUpArrow'", + "Left up arrow shape. Corresponds to ECMA-376 ST_ShapeType 'leftUpArrow'", + "Lightning bolt shape. Corresponds to ECMA-376 ST_ShapeType\n'lightningBolt'", + "Divide math shape. Corresponds to ECMA-376 ST_ShapeType 'mathDivide'", + "Equal math shape. Corresponds to ECMA-376 ST_ShapeType 'mathEqual'", + "Minus math shape. Corresponds to ECMA-376 ST_ShapeType 'mathMinus'", + "Multiply math shape. Corresponds to ECMA-376 ST_ShapeType 'mathMultiply'", + "Not equal math shape. Corresponds to ECMA-376 ST_ShapeType 'mathNotEqual'", + "Plus math shape. Corresponds to ECMA-376 ST_ShapeType 'mathPlus'", + "Moon shape. Corresponds to ECMA-376 ST_ShapeType 'moon'", + "No smoking shape. Corresponds to ECMA-376 ST_ShapeType 'noSmoking'", + "Notched right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'notchedRightArrow'", + "Octagon shape. Corresponds to ECMA-376 ST_ShapeType 'octagon'", + "Parallelogram shape. Corresponds to ECMA-376 ST_ShapeType 'parallelogram'", + "Pentagon shape. Corresponds to ECMA-376 ST_ShapeType 'pentagon'", + "Pie shape. Corresponds to ECMA-376 ST_ShapeType 'pie'", + "Plaque shape. Corresponds to ECMA-376 ST_ShapeType 'plaque'", + "Plus shape. Corresponds to ECMA-376 ST_ShapeType 'plus'", + "Quad-arrow shape. Corresponds to ECMA-376 ST_ShapeType 'quadArrow'", + "Callout quad-arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'quadArrowCallout'", + "Ribbon shape. Corresponds to ECMA-376 ST_ShapeType 'ribbon'", + "Ribbon 2 shape. Corresponds to ECMA-376 ST_ShapeType 'ribbon2'", + "Right arrow shape. Corresponds to ECMA-376 ST_ShapeType 'rightArrow'", + "Callout right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'rightArrowCallout'", + "Right brace shape. Corresponds to ECMA-376 ST_ShapeType 'rightBrace'", + "Right bracket shape. Corresponds to ECMA-376 ST_ShapeType 'rightBracket'", + "One round corner rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'round1Rect'", + "Two diagonal round corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'round2DiagRect'", + "Two same-side round corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'round2SameRect'", + "Right triangle shape. Corresponds to ECMA-376 ST_ShapeType 'rtTriangle'", + "Smiley face shape. Corresponds to ECMA-376 ST_ShapeType 'smileyFace'", + "One snip corner rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'snip1Rect'", + "Two diagonal snip corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'snip2DiagRect'", + "Two same-side snip corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'snip2SameRect'", + "One snip one round corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'snipRoundRect'", + "Ten pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star10'", + "Twelve pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star12'", + "Sixteen pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star16'", + "Twenty four pointed star shape. Corresponds to ECMA-376 ST_ShapeType\n'star24'", + "Thirty two pointed star shape. Corresponds to ECMA-376 ST_ShapeType\n'star32'", + "Four pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star4'", + "Five pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star5'", + "Six pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star6'", + "Seven pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star7'", + "Eight pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star8'", + "Striped right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'stripedRightArrow'", + "Sun shape. Corresponds to ECMA-376 ST_ShapeType 'sun'", + "Trapezoid shape. Corresponds to ECMA-376 ST_ShapeType 'trapezoid'", + "Triangle shape. Corresponds to ECMA-376 ST_ShapeType 'triangle'", + "Up arrow shape. Corresponds to ECMA-376 ST_ShapeType 'upArrow'", + "Callout up arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'upArrowCallout'", + "Up down arrow shape. Corresponds to ECMA-376 ST_ShapeType 'upDownArrow'", + "U-turn arrow shape. Corresponds to ECMA-376 ST_ShapeType 'uturnArrow'", + "Vertical scroll shape. Corresponds to ECMA-376 ST_ShapeType\n'verticalScroll'", + "Wave shape. Corresponds to ECMA-376 ST_ShapeType 'wave'", + "Callout wedge ellipse shape. Corresponds to ECMA-376 ST_ShapeType\n'wedgeEllipseCallout'", + "Callout wedge rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'wedgeRectCallout'", + "Callout wedge round rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'wedgeRoundRectCallout'", + "Alternate process flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartAlternateProcess'", + "Collate flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartCollate'", + "Connector flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartConnector'", + "Decision flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartDecision'", + "Delay flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartDelay'", + "Display flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartDisplay'", + "Document flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartDocument'", + "Extract flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartExtract'", + "Input output flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartInputOutput'", + "Internal storage flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartInternalStorage'", + "Magnetic disk flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMagneticDisk'", + "Magnetic drum flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMagneticDrum'", + "Magnetic tape flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMagneticTape'", + "Manual input flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartManualInput'", + "Manual operation flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartManualOperation'", + "Merge flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartMerge'", + "Multi-document flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMultidocument'", + "Offline storage flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartOfflineStorage'", + "Off-page connector flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartOffpageConnector'", + "Online storage flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartOnlineStorage'", + "Or flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartOr'", + "Predefined process flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPredefinedProcess'", + "Preparation flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPreparation'", + "Process flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartProcess'", + "Punched card flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPunchedCard'", + "Punched tape flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPunchedTape'", + "Sort flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartSort'", + "Summing junction flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartSummingJunction'", + "Terminator flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartTerminator'", + "East arrow shape.", + "Northeast arrow shape.", + "North arrow shape.", + "Speech shape.", + "Star burst shape.", + "Teardrop shape. Corresponds to ECMA-376 ST_ShapeType 'teardrop'", + "Ellipse ribbon shape. Corresponds to ECMA-376 ST_ShapeType\n'ellipseRibbon'", + "Ellipse ribbon 2 shape. Corresponds to ECMA-376 ST_ShapeType\n'ellipseRibbon2'", + "Callout cloud shape. Corresponds to ECMA-376 ST_ShapeType 'cloudCallout'", + "Custom shape." ], - "type": "string" - }, - "startArrow": { - "description": "The style of the arrow at the beginning of the line.", "enum": [ - "ARROW_STYLE_UNSPECIFIED", - "NONE", - "STEALTH_ARROW", - "FILL_ARROW", - "FILL_CIRCLE", - "FILL_SQUARE", - "FILL_DIAMOND", - "OPEN_ARROW", - "OPEN_CIRCLE", - "OPEN_SQUARE", - "OPEN_DIAMOND" - ], - "enumDescriptions": [ - "An unspecified arrow style.", - "No arrow.", - "Arrow with notched back. Corresponds to ECMA-376 ST_LineEndType value\n'stealth'.", - "Filled arrow. Corresponds to ECMA-376 ST_LineEndType value 'triangle'.", - "Filled circle. Corresponds to ECMA-376 ST_LineEndType value 'oval'.", - "Filled square.", - "Filled diamond. Corresponds to ECMA-376 ST_LineEndType value 'diamond'.", - "Hollow arrow.", - "Hollow circle.", - "Hollow square.", - "Hollow diamond." - ], - "type": "string" - } - }, - "id": "LineProperties" - }, - "Table": { - "description": "A PageElement kind representing a\ntable.", - "type": "object", - "properties": { - "columns": { - "description": "Number of columns in the table.", - "type": "integer", - "format": "int32" - }, - "tableColumns": { - "description": "Properties of each column.", - "type": "array", - "items": { - "$ref": "TableColumnProperties" - } - }, - "rows": { - "description": "Number of rows in the table.", - "type": "integer", - "format": "int32" + "TYPE_UNSPECIFIED", + "TEXT_BOX", + "RECTANGLE", + "ROUND_RECTANGLE", + "ELLIPSE", + "ARC", + "BENT_ARROW", + "BENT_UP_ARROW", + "BEVEL", + "BLOCK_ARC", + "BRACE_PAIR", + "BRACKET_PAIR", + "CAN", + "CHEVRON", + "CHORD", + "CLOUD", + "CORNER", + "CUBE", + "CURVED_DOWN_ARROW", + "CURVED_LEFT_ARROW", + "CURVED_RIGHT_ARROW", + "CURVED_UP_ARROW", + "DECAGON", + "DIAGONAL_STRIPE", + "DIAMOND", + "DODECAGON", + "DONUT", + "DOUBLE_WAVE", + "DOWN_ARROW", + "DOWN_ARROW_CALLOUT", + "FOLDED_CORNER", + "FRAME", + "HALF_FRAME", + "HEART", + "HEPTAGON", + "HEXAGON", + "HOME_PLATE", + "HORIZONTAL_SCROLL", + "IRREGULAR_SEAL_1", + "IRREGULAR_SEAL_2", + "LEFT_ARROW", + "LEFT_ARROW_CALLOUT", + "LEFT_BRACE", + "LEFT_BRACKET", + "LEFT_RIGHT_ARROW", + "LEFT_RIGHT_ARROW_CALLOUT", + "LEFT_RIGHT_UP_ARROW", + "LEFT_UP_ARROW", + "LIGHTNING_BOLT", + "MATH_DIVIDE", + "MATH_EQUAL", + "MATH_MINUS", + "MATH_MULTIPLY", + "MATH_NOT_EQUAL", + "MATH_PLUS", + "MOON", + "NO_SMOKING", + "NOTCHED_RIGHT_ARROW", + "OCTAGON", + "PARALLELOGRAM", + "PENTAGON", + "PIE", + "PLAQUE", + "PLUS", + "QUAD_ARROW", + "QUAD_ARROW_CALLOUT", + "RIBBON", + "RIBBON_2", + "RIGHT_ARROW", + "RIGHT_ARROW_CALLOUT", + "RIGHT_BRACE", + "RIGHT_BRACKET", + "ROUND_1_RECTANGLE", + "ROUND_2_DIAGONAL_RECTANGLE", + "ROUND_2_SAME_RECTANGLE", + "RIGHT_TRIANGLE", + "SMILEY_FACE", + "SNIP_1_RECTANGLE", + "SNIP_2_DIAGONAL_RECTANGLE", + "SNIP_2_SAME_RECTANGLE", + "SNIP_ROUND_RECTANGLE", + "STAR_10", + "STAR_12", + "STAR_16", + "STAR_24", + "STAR_32", + "STAR_4", + "STAR_5", + "STAR_6", + "STAR_7", + "STAR_8", + "STRIPED_RIGHT_ARROW", + "SUN", + "TRAPEZOID", + "TRIANGLE", + "UP_ARROW", + "UP_ARROW_CALLOUT", + "UP_DOWN_ARROW", + "UTURN_ARROW", + "VERTICAL_SCROLL", + "WAVE", + "WEDGE_ELLIPSE_CALLOUT", + "WEDGE_RECTANGLE_CALLOUT", + "WEDGE_ROUND_RECTANGLE_CALLOUT", + "FLOW_CHART_ALTERNATE_PROCESS", + "FLOW_CHART_COLLATE", + "FLOW_CHART_CONNECTOR", + "FLOW_CHART_DECISION", + "FLOW_CHART_DELAY", + "FLOW_CHART_DISPLAY", + "FLOW_CHART_DOCUMENT", + "FLOW_CHART_EXTRACT", + "FLOW_CHART_INPUT_OUTPUT", + "FLOW_CHART_INTERNAL_STORAGE", + "FLOW_CHART_MAGNETIC_DISK", + "FLOW_CHART_MAGNETIC_DRUM", + "FLOW_CHART_MAGNETIC_TAPE", + "FLOW_CHART_MANUAL_INPUT", + "FLOW_CHART_MANUAL_OPERATION", + "FLOW_CHART_MERGE", + "FLOW_CHART_MULTIDOCUMENT", + "FLOW_CHART_OFFLINE_STORAGE", + "FLOW_CHART_OFFPAGE_CONNECTOR", + "FLOW_CHART_ONLINE_STORAGE", + "FLOW_CHART_OR", + "FLOW_CHART_PREDEFINED_PROCESS", + "FLOW_CHART_PREPARATION", + "FLOW_CHART_PROCESS", + "FLOW_CHART_PUNCHED_CARD", + "FLOW_CHART_PUNCHED_TAPE", + "FLOW_CHART_SORT", + "FLOW_CHART_SUMMING_JUNCTION", + "FLOW_CHART_TERMINATOR", + "ARROW_EAST", + "ARROW_NORTH_EAST", + "ARROW_NORTH", + "SPEECH", + "STARBURST", + "TEARDROP", + "ELLIPSE_RIBBON", + "ELLIPSE_RIBBON_2", + "CLOUD_CALLOUT", + "CUSTOM" + ] }, - "tableRows": { - "description": "Properties and contents of each row.\n\nCells that span multiple rows are contained in only one of these rows and\nhave a row_span greater\nthan 1.", - "type": "array", - "items": { - "$ref": "TableRow" - } - } - }, - "id": "Table" - }, - "NestingLevel": { - "description": "Contains properties describing the look and feel of a list bullet at a given\nlevel of nesting.", - "type": "object", - "properties": { - "bulletStyle": { - "description": "The style of a bullet at this level of nesting.", - "$ref": "TextStyle" - } - }, - "id": "NestingLevel" - }, - "DuplicateObjectResponse": { - "description": "The response of duplicating an object.", - "type": "object", - "properties": { - "objectId": { - "description": "The ID of the new duplicate object.", - "type": "string" - } - }, - "id": "DuplicateObjectResponse" - }, - "RefreshSheetsChartRequest": { - "description": "Refreshes an embedded Google Sheets chart by replacing it with the latest\nversion of the chart from Google Sheets.\n\nNOTE: Refreshing charts requires at least one of the spreadsheets.readonly,\nspreadsheets, drive.readonly, or drive OAuth scopes.", - "type": "object", - "properties": { - "objectId": { - "description": "The object ID of the chart to refresh.", - "type": "string" - } - }, - "id": "RefreshSheetsChartRequest" - }, - "TableCellLocation": { - "description": "A location of a single table cell within a table.", - "type": "object", - "properties": { - "rowIndex": { - "description": "The 0-based row index.", - "type": "integer", - "format": "int32" + "text": { + "description": "The text content of the shape.", + "$ref": "TextContent" }, - "columnIndex": { - "description": "The 0-based column index.", - "type": "integer", - "format": "int32" - } - }, - "id": "TableCellLocation" - }, - "TextContent": { - "description": "The general text content. The text must reside in a compatible shape (e.g.\ntext box or rectangle) or a table cell in a page.", - "type": "object", - "properties": { - "lists": { - "description": "The bulleted lists contained in this text, keyed by list ID.", - "additionalProperties": { - "$ref": "List" - }, - "type": "object" + "placeholder": { + "description": "Placeholders are shapes that are inherit from corresponding placeholders on\nlayouts and masters.\n\nIf set, the shape is a placeholder shape and any inherited properties\ncan be resolved by looking at the parent placeholder identified by the\nPlaceholder.parent_object_id field.", + "$ref": "Placeholder" }, - "textElements": { - "description": "The text contents broken down into its component parts, including styling\ninformation. This property is read-only.", - "type": "array", - "items": { - "$ref": "TextElement" - } + "shapeProperties": { + "$ref": "ShapeProperties", + "description": "The properties of the shape." } }, - "id": "TextContent" + "id": "Shape" }, - "PageElement": { - "description": "A visual element rendered on a page.", + "Image": { + "description": "A PageElement kind representing an\nimage.", "type": "object", "properties": { - "description": { - "description": "The description of the page element. Combined with title to display alt\ntext.", - "type": "string" - }, - "title": { - "description": "The title of the page element. Combined with description to display alt\ntext.", - "type": "string" - }, - "transform": { - "description": "The transform of the page element.", - "$ref": "AffineTransform" - }, - "video": { - "description": "A video page element.", - "$ref": "Video" - }, - "sheetsChart": { - "description": "A linked chart embedded from Google Sheets. Unlinked charts are\nrepresented as images.", - "$ref": "SheetsChart" - }, - "line": { - "description": "A line page element.", - "$ref": "Line" - }, - "table": { - "description": "A table page element.", - "$ref": "Table" - }, - "wordArt": { - "description": "A word art page element.", - "$ref": "WordArt" - }, - "shape": { - "description": "A generic shape.", - "$ref": "Shape" - }, - "elementGroup": { - "description": "A collection of page elements joined as a single unit.", - "$ref": "Group" - }, - "image": { - "description": "An image page element.", - "$ref": "Image" + "imageProperties": { + "description": "The properties of the image.", + "$ref": "ImageProperties" }, - "objectId": { - "description": "The object ID for this page element. Object IDs used by\ngoogle.apps.slides.v1.Page and\ngoogle.apps.slides.v1.PageElement share the same namespace.", + "contentUrl": { + "description": "An URL to an image with a default lifetime of 30 minutes.\nThis URL is tagged with the account of the requester. Anyone with the URL\neffectively accesses the image as the original requester. Access to the\nimage may be lost if the presentation's sharing settings change.", "type": "string" - }, - "size": { - "description": "The size of the page element.", - "$ref": "Size" } }, - "id": "PageElement" + "id": "Image" }, - "UpdatePagePropertiesRequest": { - "description": "Updates the properties of a Page.", + "InsertTextRequest": { + "description": "Inserts text into a shape or a table cell.", "type": "object", "properties": { + "cellLocation": { + "$ref": "TableCellLocation", + "description": "The optional table cell location if the text is to be inserted into a table\ncell. If present, the object_id must refer to a table." + }, "objectId": { - "description": "The object ID of the page the update is applied to.", + "description": "The object ID of the shape or table where the text will be inserted.", "type": "string" }, - "fields": { - "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `pageProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the page background solid fill color, set `fields`\nto `\"pageBackgroundFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", - "type": "string", - "format": "google-fieldmask" + "text": { + "description": "The text to be inserted.\n\nInserting a newline character will implicitly create a new\nParagraphMarker at that index.\nThe paragraph style of the new paragraph will be copied from the paragraph\nat the current insertion index, including lists and bullets.\n\nText styles for inserted text will be determined automatically, generally\npreserving the styling of neighboring text. In most cases, the text will be\nadded to the TextRun that exists at the\ninsertion index.\n\nSome control characters (U+0000-U+0008, U+000C-U+001F) and characters\nfrom the Unicode Basic Multilingual Plane Private Use Area (U+E000-U+F8FF)\nwill be stripped out of the inserted text.", + "type": "string" }, - "pageProperties": { - "description": "The page properties to update.", - "$ref": "PageProperties" + "insertionIndex": { + "description": "The index where the text will be inserted, in Unicode code units, based\non TextElement indexes.\n\nThe index is zero-based and is computed from the start of the string.\nThe index may be adjusted to prevent insertions inside Unicode grapheme\nclusters. In these cases, the text will be inserted immediately after the\ngrapheme cluster.", + "format": "int32", + "type": "integer" } }, - "id": "UpdatePagePropertiesRequest" + "id": "InsertTextRequest" }, - "UpdateTableCellPropertiesRequest": { - "description": "Update the properties of a TableCell.", + "AffineTransform": { + "description": "AffineTransform uses a 3x3 matrix with an implied last row of [ 0 0 1 ]\nto transform source coordinates (x,y) into destination coordinates (x', y')\naccording to:\n\n x' x = shear_y scale_y translate_y \n 1 [ 1 ]\n\nAfter transformation,\n\n x' = scale_x * x + shear_x * y + translate_x;\n y' = scale_y * y + shear_y * x + translate_y;\n\nThis message is therefore composed of these six matrix elements.", "type": "object", "properties": { - "objectId": { - "description": "The object ID of the table.", - "type": "string" + "unit": { + "description": "The units for translate elements.", + "type": "string", + "enumDescriptions": [ + "The units are unknown.", + "An English Metric Unit (EMU) is defined as 1/360,000 of a centimeter\nand thus there are 914,400 EMUs per inch, and 12,700 EMUs per point.", + "A point, 1/72 of an inch." + ], + "enum": [ + "UNIT_UNSPECIFIED", + "EMU", + "PT" + ] }, - "tableRange": { - "description": "The table range representing the subset of the table to which the updates\nare applied. If a table range is not specified, the updates will apply to\nthe entire table.", - "$ref": "TableRange" + "scaleX": { + "description": "The X coordinate scaling element.", + "format": "double", + "type": "number" }, - "fields": { - "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `tableCellProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the table cell background solid fill color, set\n`fields` to `\"tableCellBackgroundFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "shearX": { + "description": "The X coordinate shearing element.", + "format": "double", + "type": "number" + }, + "scaleY": { + "description": "The Y coordinate scaling element.", + "format": "double", + "type": "number" + }, + "translateY": { + "description": "The Y coordinate translation element.", + "format": "double", + "type": "number" + }, + "translateX": { + "description": "The X coordinate translation element.", + "format": "double", + "type": "number" + }, + "shearY": { + "description": "The Y coordinate shearing element.", + "format": "double", + "type": "number" + } + }, + "id": "AffineTransform" + }, + "AutoText": { + "description": "A TextElement kind that represents auto text.", + "type": "object", + "properties": { + "type": { + "description": "The type of this auto text.", "type": "string", - "format": "google-fieldmask" + "enumDescriptions": [ + "An unspecified autotext type.", + "Type for autotext that represents the current slide number." + ], + "enum": [ + "TYPE_UNSPECIFIED", + "SLIDE_NUMBER" + ] }, - "tableCellProperties": { - "description": "The table cell properties to update.", - "$ref": "TableCellProperties" + "content": { + "description": "The rendered content of this auto text, if available.", + "type": "string" + }, + "style": { + "$ref": "TextStyle", + "description": "The styling applied to this auto text." } }, - "id": "UpdateTableCellPropertiesRequest" + "id": "AutoText" }, - "BatchUpdatePresentationRequest": { - "description": "Request message for PresentationsService.BatchUpdatePresentation.", + "CreateVideoResponse": { + "description": "The result of creating a video.", "type": "object", "properties": { - "requests": { - "description": "A list of updates to apply to the presentation.", - "type": "array", - "items": { - "$ref": "Request" - } + "objectId": { + "description": "The object ID of the created video.", + "type": "string" } }, - "id": "BatchUpdatePresentationRequest" + "id": "CreateVideoResponse" }, - "Dimension": { - "description": "A magnitude in a single direction in the specified units.", + "DeleteTextRequest": { + "description": "Deletes text from a shape or a table cell.", "type": "object", "properties": { - "unit": { - "description": "The units for magnitude.", - "enum": [ - "UNIT_UNSPECIFIED", - "EMU", - "PT" - ], - "enumDescriptions": [ - "The units are unknown.", - "An English Metric Unit (EMU) is defined as 1/360,000 of a centimeter\nand thus there are 914,400 EMUs per inch, and 12,700 EMUs per point.", - "A point, 1/72 of an inch." - ], + "objectId": { + "description": "The object ID of the shape or table from which the text will be deleted.", "type": "string" }, - "magnitude": { - "description": "The magnitude.", - "type": "number", - "format": "double" + "textRange": { + "$ref": "Range", + "description": "The range of text to delete, based on TextElement indexes.\n\nThere is always an implicit newline character at the end of a shape's or\ntable cell's text that cannot be deleted. `Range.Type.ALL` will use the\ncorrect bounds, but care must be taken when specifying explicit bounds for\nrange types `FROM_START_INDEX` and `FIXED_RANGE`. For example, if the text\nis \"ABC\", followed by an implicit newline, then the maximum value is 2 for\n`text_range.start_index` and 3 for `text_range.end_index`.\n\nDeleting text that crosses a paragraph boundary may result in changes\nto paragraph styles and lists as the two paragraphs are merged.\n\nRanges that include only one code unit of a surrogate pair are expanded to\ninclude both code units." + }, + "cellLocation": { + "description": "The optional table cell location if the text is to be deleted from a table\ncell. If present, the object_id must refer to a table.", + "$ref": "TableCellLocation" } }, - "id": "Dimension" + "id": "DeleteTextRequest" }, - "Placeholder": { - "description": "The placeholder information that uniquely identifies a placeholder shape.", + "UpdatePageElementTransformRequest": { + "description": "Updates the transform of a page element.", "type": "object", "properties": { - "index": { - "description": "The index of the placeholder. If the same placeholder types are the present\nin the same page, they would have different index values.", - "type": "integer", - "format": "int32" + "objectId": { + "description": "The object ID of the page element to update.", + "type": "string" }, - "type": { - "description": "The type of the placeholder.", - "enum": [ - "NONE", - "BODY", - "CHART", - "CLIP_ART", - "CENTERED_TITLE", - "DIAGRAM", - "DATE_AND_TIME", - "FOOTER", - "HEADER", - "MEDIA", - "OBJECT", - "PICTURE", - "SLIDE_NUMBER", - "SUBTITLE", - "TABLE", - "TITLE", - "SLIDE_IMAGE" - ], + "transform": { + "description": "The input transform matrix used to update the page element.", + "$ref": "AffineTransform" + }, + "applyMode": { "enumDescriptions": [ - "Default value, signifies it is not a placeholder.", - "Body text.", - "Chart or graph.", - "Clip art image.", - "Title centered.", - "Diagram.", - "Date and time.", - "Footer text.", - "Header text.", - "Multimedia.", - "Any content type.", - "Picture.", - "Number of a slide.", - "Subtitle.", - "Table.", - "Slide title.", - "Slide image." + "Unspecified mode.", + "Applies the new AffineTransform matrix to the existing one, and\nreplaces the existing one with the resulting concatenation.", + "Replaces the existing AffineTransform matrix with the new one." ], - "type": "string" - }, - "parentObjectId": { - "description": "The object ID of this shape's parent placeholder.\nIf unset, the parent placeholder shape does not exist, so the shape does\nnot inherit properties from any other shape.", + "enum": [ + "APPLY_MODE_UNSPECIFIED", + "RELATIVE", + "ABSOLUTE" + ], + "description": "The apply mode of the transform update.", "type": "string" } }, - "id": "Placeholder" + "id": "UpdatePageElementTransformRequest" }, - "CreateSheetsChartRequest": { - "description": "Creates an embedded Google Sheets chart.\n\nNOTE: Chart creation requires at least one of the spreadsheets.readonly,\nspreadsheets, drive.readonly, or drive OAuth scopes.", + "DeleteObjectRequest": { + "description": "Deletes an object, either pages or\npage elements, from the\npresentation.", "type": "object", "properties": { - "chartId": { - "description": "The ID of the specific chart in the Google Sheets spreadsheet.", - "type": "integer", - "format": "int32" - }, "objectId": { - "description": "A user-supplied object ID.\n\nIf specified, the ID must be unique among all pages and page elements in\nthe presentation. The ID should start with a word character [a-zA-Z0-9_]\nand then followed by any number of the following characters [a-zA-Z0-9_-:].\nThe length of the ID should not be less than 5 or greater than 50.\nIf empty, a unique identifier will be generated.", + "description": "The object ID of the page or page element to delete.\n\nIf after a delete operation a group contains\nonly 1 or no page elements, the group is also deleted.\n\nIf a placeholder is deleted on a layout, any empty inheriting shapes are\nalso deleted.", "type": "string" + } + }, + "id": "DeleteObjectRequest" + }, + "TextElement": { + "description": "A TextElement describes the content of a range of indices in the text content\nof a Shape or TableCell.", + "type": "object", + "properties": { + "textRun": { + "description": "A TextElement representing a run of text where all of the characters\nin the run have the same TextStyle.\n\nThe `start_index` and `end_index` of TextRuns will always be fully\ncontained in the index range of a single `paragraph_marker` TextElement.\nIn other words, a TextRun will never span multiple paragraphs.", + "$ref": "TextRun" }, - "spreadsheetId": { - "description": "The ID of the Google Sheets spreadsheet that contains the chart.", - "type": "string" + "autoText": { + "description": "A TextElement representing a spot in the text that is dynamically\nreplaced with content that can change over time.", + "$ref": "AutoText" }, - "elementProperties": { - "description": "The element properties for the chart.\n\nWhen the aspect ratio of the provided size does not match the chart aspect\nratio, the chart is scaled and centered with respect to the size in order\nto maintain aspect ratio. The provided transform is applied after this\noperation.", - "$ref": "PageElementProperties" + "paragraphMarker": { + "description": "A marker representing the beginning of a new paragraph.\n\nThe `start_index` and `end_index` of this TextElement represent the\nrange of the paragraph. Other TextElements with an index range contained\ninside this paragraph's range are considered to be part of this\nparagraph. The range of indices of two separate paragraphs will never\noverlap.", + "$ref": "ParagraphMarker" }, - "linkingMode": { - "description": "The mode with which the chart is linked to the source spreadsheet. When\nnot specified, the chart will be an image that is not linked.", - "enum": [ - "NOT_LINKED_IMAGE", - "LINKED" - ], + "startIndex": { + "description": "The zero-based start index of this text element, in Unicode code units.", + "format": "int32", + "type": "integer" + }, + "endIndex": { + "description": "The zero-based end index of this text element, exclusive, in Unicode code\nunits.", + "format": "int32", + "type": "integer" + } + }, + "id": "TextElement" + }, + "Dimension": { + "description": "A magnitude in a single direction in the specified units.", + "type": "object", + "properties": { + "magnitude": { + "description": "The magnitude.", + "format": "double", + "type": "number" + }, + "unit": { "enumDescriptions": [ - "The chart is not associated with the source spreadsheet and cannot be\nupdated. A chart that is not linked will be inserted as an image.", - "Linking the chart allows it to be updated, and other collaborators will\nsee a link to the spreadsheet." + "The units are unknown.", + "An English Metric Unit (EMU) is defined as 1/360,000 of a centimeter\nand thus there are 914,400 EMUs per inch, and 12,700 EMUs per point.", + "A point, 1/72 of an inch." ], + "enum": [ + "UNIT_UNSPECIFIED", + "EMU", + "PT" + ], + "description": "The units for magnitude.", "type": "string" } }, - "id": "CreateSheetsChartRequest" + "id": "Dimension" }, - "DeleteTableRowRequest": { - "description": "Deletes a row from a table.", + "LineFill": { + "description": "The fill of the line.", "type": "object", "properties": { - "tableObjectId": { - "description": "The table to delete rows from.", - "type": "string" - }, - "cellLocation": { - "description": "The reference table cell location from which a row will be deleted.\n\nThe row this cell spans will be deleted. If this is a merged cell, multiple\nrows will be deleted. If no rows remain in the table after this deletion,\nthe whole table is deleted.", - "$ref": "TableCellLocation" + "solidFill": { + "description": "Solid color fill.", + "$ref": "SolidFill" } }, - "id": "DeleteTableRowRequest" + "id": "LineFill" }, - "Video": { - "description": "A PageElement kind representing a\nvideo.", + "VideoProperties": { + "description": "The properties of the Video.", "type": "object", "properties": { - "url": { - "description": "An URL to a video. The URL is valid as long as the source video\nexists and sharing settings do not change.", - "type": "string" + "outline": { + "description": "The outline of the video. The default outline matches the defaults for new\nvideos created in the Slides editor.", + "$ref": "Outline" + } + }, + "id": "VideoProperties" + }, + "InsertTableRowsRequest": { + "description": "Inserts rows into a table.", + "type": "object", + "properties": { + "number": { + "description": "The number of rows to be inserted. Maximum 20 per request.", + "format": "int32", + "type": "integer" }, - "videoProperties": { - "description": "The properties of the video.", - "$ref": "VideoProperties" + "cellLocation": { + "$ref": "TableCellLocation", + "description": "The reference table cell location from which rows will be inserted.\n\nA new row will be inserted above (or below) the row where the reference\ncell is. If the reference cell is a merged cell, a new row will be\ninserted above (or below) the merged cell." }, - "source": { - "description": "The video source.", - "enum": [ - "SOURCE_UNSPECIFIED", - "YOUTUBE" - ], - "enumDescriptions": [ - "The video source is unspecified.", - "The video source is YouTube." - ], + "tableObjectId": { + "description": "The table to insert rows into.", "type": "string" }, - "id": { - "description": "The video source's unique identifier for this video.", - "type": "string" + "insertBelow": { + "description": "Whether to insert new rows below the reference cell location.\n\n- `True`: insert below the cell.\n- `False`: insert above the cell.", + "type": "boolean" } }, - "id": "Video" + "id": "InsertTableRowsRequest" }, - "Link": { - "description": "A hypertext link.", + "LayoutProperties": { + "description": "The properties of Page are only\nrelevant for pages with page_type LAYOUT.", "type": "object", "properties": { - "url": { - "description": "If set, indicates this is a link to the external web page at this URL.", + "displayName": { + "description": "The human readable name of the layout in the presentation's locale.", "type": "string" }, - "relativeLink": { - "description": "If set, indicates this is a link to a slide in this presentation,\naddressed by its position.", - "enum": [ - "RELATIVE_SLIDE_LINK_UNSPECIFIED", - "NEXT_SLIDE", - "PREVIOUS_SLIDE", - "FIRST_SLIDE", - "LAST_SLIDE" - ], - "enumDescriptions": [ - "An unspecified relative slide link.", - "A link to the next slide.", - "A link to the previous slide.", - "A link to the first slide in the presentation.", - "A link to the last slide in the presentation." - ], + "masterObjectId": { + "description": "The object ID of the master that this layout is based on.", "type": "string" }, - "slideIndex": { - "description": "If set, indicates this is a link to the slide at this zero-based index\nin the presentation. There may not be a slide at this index.", - "type": "integer", - "format": "int32" - }, - "pageObjectId": { - "description": "If set, indicates this is a link to the specific page in this\npresentation with this ID. A page with this ID may not exist.", + "name": { + "description": "The name of the layout.", "type": "string" } }, - "id": "Link" + "id": "LayoutProperties" }, - "PageBackgroundFill": { - "description": "The page background fill.", + "Presentation": { + "description": "A Google Slides presentation.", "type": "object", "properties": { - "stretchedPictureFill": { - "description": "Stretched picture fill.", - "$ref": "StretchedPictureFill" + "slides": { + "description": "The slides in the presentation.\nA slide inherits properties from a slide layout.", + "type": "array", + "items": { + "$ref": "Page" + } }, - "propertyState": { - "description": "The background fill property state.\n\nUpdating the the fill on a page will implicitly update this field to\n`RENDERED`, unless another value is specified in the same request. To\nhave no fill on a page, set this field to `NOT_RENDERED`. In this case,\nany other fill fields set in the same request will be ignored.", - "enum": [ - "RENDERED", - "NOT_RENDERED", - "INHERIT" - ], - "enumDescriptions": [ - "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", - "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", - "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." - ], + "notesMaster": { + "description": "The notes master in the presentation. It serves three purposes:\n\n- Placeholder shapes on a notes master contain the default text styles and\n shape properties of all placeholder shapes on notes pages. Specifically,\n a SLIDE_IMAGE placeholder shape is defined to contain the slide\n thumbnail, and a BODY placeholder shape is defined to contain the speaker\n notes.\n- The notes master page properties define the common page properties\n inherited by all notes pages.\n- Any other shapes on the notes master will appear on all notes pages.\n\nThe notes master is read-only.", + "$ref": "Page" + }, + "layouts": { + "description": "The layouts in the presentation. A layout is a template that determines\nhow content is arranged and styled on the slides that inherit from that\nlayout.", + "type": "array", + "items": { + "$ref": "Page" + } + }, + "title": { + "description": "The title of the presentation.", "type": "string" }, - "solidFill": { - "description": "Solid color fill.", - "$ref": "SolidFill" + "masters": { + "description": "The slide masters in the presentation. A slide master contains all common\npage elements and the common properties for a set of layouts. They serve\nthree purposes:\n\n- Placeholder shapes on a master contain the default text styles and shape\n properties of all placeholder shapes on pages that use that master.\n- The master page properties define the common page properties inherited by\n its layouts.\n- Any other shapes on the master slide will appear on all slides using that\n master, regardless of their layout.", + "type": "array", + "items": { + "$ref": "Page" + } + }, + "locale": { + "description": "The locale of the presentation, as an IETF BCP 47 language tag.", + "type": "string" + }, + "pageSize": { + "description": "The size of pages in the presentation.", + "$ref": "Size" + }, + "presentationId": { + "description": "The ID of the presentation.", + "type": "string" } }, - "id": "PageBackgroundFill" + "id": "Presentation" }, - "ColorStop": { - "description": "A color and position in a gradient band.", + "LineProperties": { + "description": "The properties of the Line.\n\nWhen unset, these fields default to values that match the appearance of\nnew lines created in the Slides editor.", "type": "object", "properties": { - "color": { - "description": "The color of the gradient stop.", - "$ref": "OpaqueColor" + "weight": { + "$ref": "Dimension", + "description": "The thickness of the line." }, - "position": { - "description": "The relative position of the color stop in the gradient band measured\nin percentage. The value should be in the interval [0.0, 1.0].", - "type": "number", - "format": "float" + "lineFill": { + "description": "The fill of the line. The default line fill matches the defaults for new\nlines created in the Slides editor.", + "$ref": "LineFill" }, - "alpha": { - "description": "The alpha value of this color in the gradient band. Defaults to 1.0,\nfully opaque.", - "type": "number", - "format": "float" + "link": { + "$ref": "Link", + "description": "The hyperlink destination of the line. If unset, there is no link." + }, + "dashStyle": { + "description": "The dash style of the line.", + "type": "string", + "enumDescriptions": [ + "Unspecified dash style.", + "Solid line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'solid'.\nThis is the default dash style.", + "Dotted line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dot'.", + "Dashed line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dash'.", + "Alternating dashes and dots. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'dashDot'.", + "Line with large dashes. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'lgDash'.", + "Alternating large dashes and dots. Corresponds to ECMA-376\nST_PresetLineDashVal value 'lgDashDot'." + ], + "enum": [ + "DASH_STYLE_UNSPECIFIED", + "SOLID", + "DOT", + "DASH", + "DASH_DOT", + "LONG_DASH", + "LONG_DASH_DOT" + ] + }, + "endArrow": { + "description": "The style of the arrow at the end of the line.", + "type": "string", + "enumDescriptions": [ + "An unspecified arrow style.", + "No arrow.", + "Arrow with notched back. Corresponds to ECMA-376 ST_LineEndType value\n'stealth'.", + "Filled arrow. Corresponds to ECMA-376 ST_LineEndType value 'triangle'.", + "Filled circle. Corresponds to ECMA-376 ST_LineEndType value 'oval'.", + "Filled square.", + "Filled diamond. Corresponds to ECMA-376 ST_LineEndType value 'diamond'.", + "Hollow arrow.", + "Hollow circle.", + "Hollow square.", + "Hollow diamond." + ], + "enum": [ + "ARROW_STYLE_UNSPECIFIED", + "NONE", + "STEALTH_ARROW", + "FILL_ARROW", + "FILL_CIRCLE", + "FILL_SQUARE", + "FILL_DIAMOND", + "OPEN_ARROW", + "OPEN_CIRCLE", + "OPEN_SQUARE", + "OPEN_DIAMOND" + ] + }, + "startArrow": { + "description": "The style of the arrow at the beginning of the line.", + "type": "string", + "enumDescriptions": [ + "An unspecified arrow style.", + "No arrow.", + "Arrow with notched back. Corresponds to ECMA-376 ST_LineEndType value\n'stealth'.", + "Filled arrow. Corresponds to ECMA-376 ST_LineEndType value 'triangle'.", + "Filled circle. Corresponds to ECMA-376 ST_LineEndType value 'oval'.", + "Filled square.", + "Filled diamond. Corresponds to ECMA-376 ST_LineEndType value 'diamond'.", + "Hollow arrow.", + "Hollow circle.", + "Hollow square.", + "Hollow diamond." + ], + "enum": [ + "ARROW_STYLE_UNSPECIFIED", + "NONE", + "STEALTH_ARROW", + "FILL_ARROW", + "FILL_CIRCLE", + "FILL_SQUARE", + "FILL_DIAMOND", + "OPEN_ARROW", + "OPEN_CIRCLE", + "OPEN_SQUARE", + "OPEN_DIAMOND" + ] } }, - "id": "ColorStop" + "id": "LineProperties" }, - "ThemeColorPair": { - "description": "A pair mapping a theme color type to the concrete color it represents.", + "OpaqueColor": { + "description": "A themeable solid color value.", "type": "object", "properties": { - "color": { - "description": "The concrete color corresponding to the theme color type above.", - "$ref": "RgbColor" + "rgbColor": { + "$ref": "RgbColor", + "description": "An opaque RGB color." }, - "type": { - "description": "The type of the theme color.", - "enum": [ - "THEME_COLOR_TYPE_UNSPECIFIED", - "DARK1", - "LIGHT1", - "DARK2", - "LIGHT2", - "ACCENT1", - "ACCENT2", - "ACCENT3", - "ACCENT4", - "ACCENT5", - "ACCENT6", - "HYPERLINK", - "FOLLOWED_HYPERLINK", - "TEXT1", - "BACKGROUND1", - "TEXT2", - "BACKGROUND2" - ], + "themeColor": { + "description": "An opaque theme color.", + "type": "string", "enumDescriptions": [ "Unspecified theme color. This value should not be used.", "Represents the first dark color.", @@ -1413,48 +942,175 @@ "Represents the second text color.", "Represents the second background color." ], - "type": "string" + "enum": [ + "THEME_COLOR_TYPE_UNSPECIFIED", + "DARK1", + "LIGHT1", + "DARK2", + "LIGHT2", + "ACCENT1", + "ACCENT2", + "ACCENT3", + "ACCENT4", + "ACCENT5", + "ACCENT6", + "HYPERLINK", + "FOLLOWED_HYPERLINK", + "TEXT1", + "BACKGROUND1", + "TEXT2", + "BACKGROUND2" + ] } }, - "id": "ThemeColorPair" + "id": "OpaqueColor" }, - "ReplaceAllShapesWithImageRequest": { - "description": "Replaces all shapes that match the given criteria with the provided image.", + "ImageProperties": { + "description": "The properties of the Image.", "type": "object", "properties": { - "replaceMethod": { - "description": "The replace method.", - "enum": [ - "CENTER_INSIDE", - "CENTER_CROP" - ], + "recolor": { + "$ref": "Recolor", + "description": "The recolor effect of the image. If not set, the image is not recolored.\nThis property is read-only." + }, + "cropProperties": { + "description": "The crop properties of the image. If not set, the image is not cropped.\nThis property is read-only.", + "$ref": "CropProperties" + }, + "outline": { + "$ref": "Outline", + "description": "The outline of the image. If not set, the the image has no outline." + }, + "brightness": { + "description": "The brightness effect of the image. The value should be in the interval\n[-1.0, 1.0], where 0 means no effect. This property is read-only.", + "format": "float", + "type": "number" + }, + "transparency": { + "description": "The transparency effect of the image. The value should be in the interval\n[0.0, 1.0], where 0 means no effect and 1 means completely transparent.\nThis property is read-only.", + "format": "float", + "type": "number" + }, + "shadow": { + "description": "The shadow of the image. If not set, the image has no shadow. This property\nis read-only.", + "$ref": "Shadow" + }, + "contrast": { + "description": "The contrast effect of the image. The value should be in the interval\n[-1.0, 1.0], where 0 means no effect. This property is read-only.", + "format": "float", + "type": "number" + }, + "link": { + "description": "The hyperlink destination of the image. If unset, there is no link.", + "$ref": "Link" + } + }, + "id": "ImageProperties" + }, + "ReplaceAllShapesWithImageResponse": { + "description": "The result of replacing shapes with an image.", + "type": "object", + "properties": { + "occurrencesChanged": { + "description": "The number of shapes replaced with images.", + "format": "int32", + "type": "integer" + } + }, + "id": "ReplaceAllShapesWithImageResponse" + }, + "Line": { + "description": "A PageElement kind representing a\nline, curved connector, or bent connector.", + "type": "object", + "properties": { + "lineType": { + "description": "The type of the line.", + "type": "string", "enumDescriptions": [ - "Scales and centers the image to fit within the bounds of the original\nshape and maintains the image's aspect ratio. The rendered size of the\nimage may be smaller than the size of the shape. This is the default\nmethod when one is not specified.", - "Scales and centers the image to fill the bounds of the original shape.\nThe image may be cropped in order to fill the shape. The rendered size of\nthe image will be the same as that of the original shape." + "An unspecified line type.", + "Straight connector 1 form. Corresponds to ECMA-376 ST_ShapeType\n'straightConnector1'.", + "Bent connector 2 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector2'.", + "Bent connector 3 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector3'.", + "Bent connector 4 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector4'.", + "Bent connector 5 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector5'.", + "Curved connector 2 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector2'.", + "Curved connector 3 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector3'.", + "Curved connector 4 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector4'.", + "Curved connector 5 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector5'." ], + "enum": [ + "TYPE_UNSPECIFIED", + "STRAIGHT_CONNECTOR_1", + "BENT_CONNECTOR_2", + "BENT_CONNECTOR_3", + "BENT_CONNECTOR_4", + "BENT_CONNECTOR_5", + "CURVED_CONNECTOR_2", + "CURVED_CONNECTOR_3", + "CURVED_CONNECTOR_4", + "CURVED_CONNECTOR_5" + ] + }, + "lineProperties": { + "$ref": "LineProperties", + "description": "The properties of the line." + } + }, + "id": "Line" + }, + "CreateSheetsChartRequest": { + "description": "Creates an embedded Google Sheets chart.\n\nNOTE: Chart creation requires at least one of the spreadsheets.readonly,\nspreadsheets, drive.readonly, or drive OAuth scopes.", + "type": "object", + "properties": { + "objectId": { + "description": "A user-supplied object ID.\n\nIf specified, the ID must be unique among all pages and page elements in\nthe presentation. The ID should start with a word character [a-zA-Z0-9_]\nand then followed by any number of the following characters [a-zA-Z0-9_-:].\nThe length of the ID should not be less than 5 or greater than 50.\nIf empty, a unique identifier will be generated.", "type": "string" }, - "containsText": { - "description": "If set, this request will replace all of the shapes that contain the\ngiven text.", - "$ref": "SubstringMatchCriteria" + "elementProperties": { + "description": "The element properties for the chart.\n\nWhen the aspect ratio of the provided size does not match the chart aspect\nratio, the chart is scaled and centered with respect to the size in order\nto maintain aspect ratio. The provided transform is applied after this\noperation.", + "$ref": "PageElementProperties" }, - "imageUrl": { - "description": "The image URL.\n\nThe image is fetched once at insertion time and a copy is stored for\ndisplay inside the presentation. Images must be less than 50MB in size,\ncannot exceed 25 megapixels, and must be in either in PNG, JPEG, or GIF\nformat.", + "spreadsheetId": { + "description": "The ID of the Google Sheets spreadsheet that contains the chart.", + "type": "string" + }, + "linkingMode": { + "enumDescriptions": [ + "The chart is not associated with the source spreadsheet and cannot be\nupdated. A chart that is not linked will be inserted as an image.", + "Linking the chart allows it to be updated, and other collaborators will\nsee a link to the spreadsheet." + ], + "enum": [ + "NOT_LINKED_IMAGE", + "LINKED" + ], + "description": "The mode with which the chart is linked to the source spreadsheet. When\nnot specified, the chart will be an image that is not linked.", "type": "string" + }, + "chartId": { + "description": "The ID of the specific chart in the Google Sheets spreadsheet.", + "format": "int32", + "type": "integer" } }, - "id": "ReplaceAllShapesWithImageRequest" + "id": "CreateSheetsChartRequest" }, - "DeleteObjectRequest": { - "description": "Deletes an object, either pages or\npage elements, from the\npresentation.", + "BatchUpdatePresentationResponse": { + "description": "Response message from a batch update.", "type": "object", "properties": { - "objectId": { - "description": "The object ID of the page or page element to delete.\n\nIf after a delete operation a group contains\nonly 1 or no page elements, the group is also deleted.\n\nIf a placeholder is deleted on a layout, any empty inheriting shapes are\nalso deleted.", + "presentationId": { + "description": "The presentation the updates were applied to.", "type": "string" + }, + "replies": { + "description": "The reply of the updates. This maps 1:1 with the updates, although\nreplies to some requests may be empty.", + "type": "array", + "items": { + "$ref": "Response" + } } }, - "id": "DeleteObjectRequest" + "id": "BatchUpdatePresentationResponse" }, "CreateImageResponse": { "description": "The result of creating an image.", @@ -1467,1285 +1123,1485 @@ }, "id": "CreateImageResponse" }, - "Recolor": { - "description": "A recolor effect applied on an image.", + "SlideProperties": { + "description": "The properties of Page that are only\nrelevant for pages with page_type SLIDE.", "type": "object", "properties": { - "recolorStops": { - "description": "The recolor effect is represented by a gradient, which is a list of color\nstops. This property is read-only.", - "type": "array", - "items": { - "$ref": "ColorStop" - } + "notesPage": { + "description": "The notes page that this slide is associated with. It defines the visual\nappearance of a notes page when printing or exporting slides with speaker\nnotes. A notes page inherits properties from the\nnotes mater.\nThe placeholder shape with type BODY on the notes page contains the speaker\nnotes for this slide. The ID of this shape is identified by the\nspeaker notes object id field.\nThe notes page is read-only except for the text content and styles of the\nspeaker notes shape.", + "$ref": "Page" + }, + "layoutObjectId": { + "description": "The object ID of the layout that this slide is based on.", + "type": "string" + }, + "masterObjectId": { + "description": "The object ID of the master that this slide is based on.", + "type": "string" } }, - "id": "Recolor" + "id": "SlideProperties" }, - "TextStyle": { - "description": "Represents the styling that can be applied to a TextRun.\n\nIf this text is contained in a shape with a parent placeholder, then these text styles may be\ninherited from the parent. Which text styles are inherited depend on the\nnesting level of lists:\n\n* A text run in a paragraph that is not in a list will inherit its text style\n from the the newline character in the paragraph at the 0 nesting level of\n the list inside the parent placeholder.\n* A text run in a paragraph that is in a list will inherit its text style\n from the newline character in the paragraph at its corresponding nesting\n level of the list inside the parent placeholder.\n\nInherited text styles are represented as unset fields in this message. If\ntext is contained in a shape without a parent placeholder, unsetting these\nfields will revert the style to a value matching the defaults in the Slides\neditor.", + "Response": { + "description": "A single response from an update.", "type": "object", "properties": { - "bold": { - "description": "Whether or not the text is bold.", - "type": "boolean" - }, - "italic": { - "description": "Whether or not the text is italicized.", - "type": "boolean" + "createImage": { + "$ref": "CreateImageResponse", + "description": "The result of creating an image." }, - "baselineOffset": { - "description": "The text's vertical offset from its normal position.\n\nText with `SUPERSCRIPT` or `SUBSCRIPT` baseline offsets is automatically\nrendered in a smaller font size, computed based on the `font_size` field.\nThe `font_size` itself is not affected by changes in this field.", - "enum": [ - "BASELINE_OFFSET_UNSPECIFIED", - "NONE", - "SUPERSCRIPT", - "SUBSCRIPT" - ], - "enumDescriptions": [ - "The text's baseline offset is inherited from the parent.", - "The text is not vertically offset.", - "The text is vertically offset upwards (superscript).", - "The text is vertically offset downwards (subscript)." - ], - "type": "string" + "createVideo": { + "description": "The result of creating a video.", + "$ref": "CreateVideoResponse" }, - "foregroundColor": { - "description": "The color of the text itself. If set, the color is either opaque or\ntransparent, depending on if the `opaque_color` field in it is set.", - "$ref": "OptionalColor" + "replaceAllShapesWithSheetsChart": { + "description": "The result of replacing all shapes matching some criteria with a Google\nSheets chart.", + "$ref": "ReplaceAllShapesWithSheetsChartResponse" }, - "fontFamily": { - "description": "The font family of the text.\n\nThe font family can be any font from the Font menu in Slides or from\n[Google Fonts] (https://fonts.google.com/). If the font name is\nunrecognized, the text is rendered in `Arial`.\n\nSome fonts can affect the weight of the text. If an update request\nspecifies values for both `font_family` and `bold`, the explicitly-set\n`bold` value is used.", - "type": "string" + "createSheetsChart": { + "$ref": "CreateSheetsChartResponse", + "description": "The result of creating a Google Sheets chart." }, - "strikethrough": { - "description": "Whether or not the text is struck through.", - "type": "boolean" + "replaceAllShapesWithImage": { + "description": "The result of replacing all shapes matching some criteria with an\nimage.", + "$ref": "ReplaceAllShapesWithImageResponse" }, - "link": { - "description": "The hyperlink destination of the text. If unset, there is no link. Links\nare not inherited from parent text.\n\nChanging the link in an update request causes some other changes to the\ntext style of the range:\n\n* When setting a link, the text foreground color will be set to\n ThemeColorType.HYPERLINK and the text will\n be underlined. If these fields are modified in the same\n request, those values will be used instead of the link defaults.\n* Setting a link on a text range that overlaps with an existing link will\n also update the existing link to point to the new URL.\n* Links are not settable on newline characters. As a result, setting a link\n on a text range that crosses a paragraph boundary, such as `\"ABC\\n123\"`,\n will separate the newline character(s) into their own text runs. The\n link will be applied separately to the runs before and after the newline.\n* Removing a link will update the text style of the range to match the\n style of the preceding text (or the default text styles if the preceding\n text is another link) unless different styles are being set in the same\n request.", - "$ref": "Link" + "createTable": { + "description": "The result of creating a table.", + "$ref": "CreateTableResponse" }, - "smallCaps": { - "description": "Whether or not the text is in small capital letters.", - "type": "boolean" + "replaceAllText": { + "$ref": "ReplaceAllTextResponse", + "description": "The result of replacing text." }, - "backgroundColor": { - "description": "The background color of the text. If set, the color is either opaque or\ntransparent, depending on if the `opaque_color` field in it is set.", - "$ref": "OptionalColor" + "createSlide": { + "$ref": "CreateSlideResponse", + "description": "The result of creating a slide." }, - "fontSize": { - "description": "The size of the text's font. When read, the `font_size` will specified in\npoints.", - "$ref": "Dimension" + "duplicateObject": { + "description": "The result of duplicating an object.", + "$ref": "DuplicateObjectResponse" }, - "underline": { - "description": "Whether or not the text is underlined.", - "type": "boolean" + "createShape": { + "description": "The result of creating a shape.", + "$ref": "CreateShapeResponse" + }, + "createLine": { + "description": "The result of creating a line.", + "$ref": "CreateLineResponse" } }, - "id": "TextStyle" + "id": "Response" }, - "UpdateLinePropertiesRequest": { - "description": "Updates the properties of a Line.", + "SubstringMatchCriteria": { + "description": "A criteria that matches a specific string of text in a shape or table.", "type": "object", "properties": { - "objectId": { - "description": "The object ID of the line the update is applied to.", + "text": { + "description": "The text to search for in the shape or table.", "type": "string" }, - "lineProperties": { - "description": "The line properties to update.", - "$ref": "LineProperties" - }, - "fields": { - "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `lineProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the line solid fill color, set `fields` to\n`\"lineFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", - "type": "string", - "format": "google-fieldmask" + "matchCase": { + "description": "Indicates whether the search should respect case:\n\n- `True`: the search is case sensitive.\n- `False`: the search is case insensitive.", + "type": "boolean" } }, - "id": "UpdateLinePropertiesRequest" + "id": "SubstringMatchCriteria" }, - "TableCellBackgroundFill": { - "description": "The table cell background fill.", + "LayoutReference": { + "description": "Slide layout reference. This may reference either:\n\n- A predefined layout\n- One of the layouts in the presentation.", "type": "object", "properties": { - "propertyState": { - "description": "The background fill property state.\n\nUpdating the the fill on a table cell will implicitly update this field\nto `RENDERED`, unless another value is specified in the same request. To\nhave no fill on a table cell, set this field to `NOT_RENDERED`. In this\ncase, any other fill fields set in the same request will be ignored.", - "enum": [ - "RENDERED", - "NOT_RENDERED", - "INHERIT" - ], + "layoutId": { + "description": "Layout ID: the object ID of one of the layouts in the presentation.", + "type": "string" + }, + "predefinedLayout": { "enumDescriptions": [ - "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", - "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", - "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." + "Unspecified layout.", + "Blank layout, with no placeholders.", + "Layout with a caption at the bottom.", + "Layout with a title and a subtitle.", + "Layout with a title and body.", + "Layout with a title and two columns.", + "Layout with only a title.", + "Layout with a section title.", + "Layout with a title and subtitle on one side and description on the other.", + "Layout with one title and one body, arranged in a single column.", + "Layout with a main point.", + "Layout with a big number heading." + ], + "enum": [ + "PREDEFINED_LAYOUT_UNSPECIFIED", + "BLANK", + "CAPTION_ONLY", + "TITLE", + "TITLE_AND_BODY", + "TITLE_AND_TWO_COLUMNS", + "TITLE_ONLY", + "SECTION_HEADER", + "SECTION_TITLE_AND_DESCRIPTION", + "ONE_COLUMN_TEXT", + "MAIN_POINT", + "BIG_NUMBER" ], + "description": "Predefined layout.", + "type": "string" + } + }, + "id": "LayoutReference" + }, + "TextRun": { + "description": "A TextElement kind that represents a run of text that all has the same\nstyling.", + "type": "object", + "properties": { + "content": { + "description": "The text of this run.", "type": "string" }, - "solidFill": { - "description": "Solid color fill.", - "$ref": "SolidFill" + "style": { + "$ref": "TextStyle", + "description": "The styling applied to this run." } }, - "id": "TableCellBackgroundFill" + "id": "TextRun" }, - "SolidFill": { - "description": "A solid color fill. The page or page element is filled entirely with the\nspecified color value.\n\nIf any field is unset, its value may be inherited from a parent placeholder\nif it exists.", + "TableRange": { + "description": "A table range represents a reference to a subset of a table.\n\nIt's important to note that the cells specified by a table range do not\nnecessarily form a rectangle. For example, let's say we have a 3 x 3 table\nwhere all the cells of the last row are merged together. The table looks\nlike this:\n\n \n [ ]\n\nA table range with location = (0, 0), row span = 3 and column span = 2\nspecifies the following cells:\n\n x x \n [ x ]", "type": "object", "properties": { - "color": { - "description": "The color value of the solid fill.", - "$ref": "OpaqueColor" + "location": { + "$ref": "TableCellLocation", + "description": "The starting location of the table range." }, - "alpha": { - "description": "The fraction of this `color` that should be applied to the pixel.\nThat is, the final pixel color is defined by the equation:\n\n pixel color = alpha * (color) + (1.0 - alpha) * (background color)\n\nThis means that a value of 1.0 corresponds to a solid color, whereas\na value of 0.0 corresponds to a completely transparent color.", - "type": "number", - "format": "float" + "rowSpan": { + "description": "The row span of the table range.", + "format": "int32", + "type": "integer" + }, + "columnSpan": { + "description": "The column span of the table range.", + "format": "int32", + "type": "integer" } }, - "id": "SolidFill" + "id": "TableRange" }, - "DuplicateObjectRequest": { - "description": "Duplicates a slide or page element.\n\nWhen duplicating a slide, the duplicate slide will be created immediately\nfollowing the specified slide. When duplicating a page element, the duplicate\nwill be placed on the same page at the same position as the original.", + "CreateTableResponse": { + "description": "The result of creating a table.", "type": "object", "properties": { "objectId": { - "description": "The ID of the object to duplicate.", + "description": "The object ID of the created table.", "type": "string" - }, - "objectIds": { - "description": "The object being duplicated may contain other objects, for example when\nduplicating a slide or a group page element. This map defines how the IDs\nof duplicated objects are generated: the keys are the IDs of the original\nobjects and its values are the IDs that will be assigned to the\ncorresponding duplicate object. The ID of the source object's duplicate\nmay be specified in this map as well, using the same value of the\n`object_id` field as a key and the newly desired ID as the value.\n\nAll keys must correspond to existing IDs in the presentation. All values\nmust be unique in the presentation and must start with an alphanumeric\ncharacter or an underscore (matches regex `[a-zA-Z0-9_]`); remaining\ncharacters may include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`). The length of the new ID must not be less than 5 or\ngreater than 50.\n\nIf any IDs of source objects are omitted from the map, a new random ID will\nbe assigned. If the map is empty or unset, all duplicate objects will\nreceive a new random ID.", - "additionalProperties": { - "type": "string" - }, - "type": "object" } }, - "id": "DuplicateObjectRequest" + "id": "CreateTableResponse" }, - "SheetsChart": { - "description": "A PageElement kind representing\na linked chart embedded from Google Sheets.", + "CreateTableRequest": { + "description": "Creates a new table.", "type": "object", "properties": { - "chartId": { - "description": "The ID of the specific chart in the Google Sheets spreadsheet that is\nembedded.", - "type": "integer", - "format": "int32" - }, - "spreadsheetId": { - "description": "The ID of the Google Sheets spreadsheet that contains the source chart.", + "objectId": { + "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", "type": "string" }, - "sheetsChartProperties": { - "description": "The properties of the Sheets chart.", - "$ref": "SheetsChartProperties" + "columns": { + "description": "Number of columns in the table.", + "format": "int32", + "type": "integer" }, - "contentUrl": { - "description": "The URL of an image of the embedded chart, with a default lifetime of 30\nminutes. This URL is tagged with the account of the requester. Anyone with\nthe URL effectively accesses the image as the original requester. Access to\nthe image may be lost if the presentation's sharing settings change.", - "type": "string" + "elementProperties": { + "description": "The element properties for the table.\n\nThe table will be created at the provided size, subject to a minimum size.\nIf no size is provided, the table will be automatically sized.\n\nTable transforms must have a scale of 1 and no shear components. If no\ntransform is provided, the table will be centered on the page.", + "$ref": "PageElementProperties" + }, + "rows": { + "description": "Number of rows in the table.", + "format": "int32", + "type": "integer" } }, - "id": "SheetsChart" + "id": "CreateTableRequest" }, - "PageProperties": { - "description": "The properties of the Page.\n\nThe page will inherit properties from the parent page. Depending on the page\ntype the hierarchy is defined in either\nSlideProperties or\nLayoutProperties.", + "Table": { + "description": "A PageElement kind representing a\ntable.", "type": "object", "properties": { - "pageBackgroundFill": { - "description": "The background fill of the page. If unset, the background fill is inherited\nfrom a parent page if it exists. If the page has no parent, then the\nbackground fill defaults to the corresponding fill in the Slides editor.", - "$ref": "PageBackgroundFill" + "rows": { + "description": "Number of rows in the table.", + "format": "int32", + "type": "integer" }, - "colorScheme": { - "description": "The color scheme of the page. If unset, the color scheme is inherited from\na parent page. If the page has no parent, the color scheme uses a default\nSlides color scheme. This field is read-only.", - "$ref": "ColorScheme" + "tableColumns": { + "description": "Properties of each column.", + "type": "array", + "items": { + "$ref": "TableColumnProperties" + } + }, + "columns": { + "description": "Number of columns in the table.", + "format": "int32", + "type": "integer" + }, + "tableRows": { + "description": "Properties and contents of each row.\n\nCells that span multiple rows are contained in only one of these rows and\nhave a row_span greater\nthan 1.", + "type": "array", + "items": { + "$ref": "TableRow" + } } }, - "id": "PageProperties" + "id": "Table" }, - "Shadow": { - "description": "The shadow properties of a page element.\n\nIf these fields are unset, they may be inherited from a parent placeholder\nif it exists. If there is no parent, the fields will default to the value\nused for new page elements created in the Slides editor, which may depend on\nthe page element kind.", + "PageBackgroundFill": { + "description": "The page background fill.", "type": "object", "properties": { - "alignment": { - "description": "The alignment point of the shadow, that sets the origin for translate,\nscale and skew of the shadow.", - "enum": [ - "RECTANGLE_POSITION_UNSPECIFIED", - "TOP_LEFT", - "TOP_CENTER", - "TOP_RIGHT", - "LEFT_CENTER", - "CENTER", - "RIGHT_CENTER", - "BOTTOM_LEFT", - "BOTTOM_CENTER", - "BOTTOM_RIGHT" - ], + "propertyState": { + "description": "The background fill property state.\n\nUpdating the the fill on a page will implicitly update this field to\n`RENDERED`, unless another value is specified in the same request. To\nhave no fill on a page, set this field to `NOT_RENDERED`. In this case,\nany other fill fields set in the same request will be ignored.", + "type": "string", "enumDescriptions": [ - "Unspecified.", - "Top left.", - "Top center.", - "Top right.", - "Left center.", - "Center.", - "Right center.", - "Bottom left.", - "Bottom center.", - "Bottom right." + "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", + "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", + "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." ], - "type": "string" - }, - "rotateWithShape": { - "description": "Whether the shadow should rotate with the shape.", - "type": "boolean" + "enum": [ + "RENDERED", + "NOT_RENDERED", + "INHERIT" + ] }, - "transform": { - "description": "Transform that encodes the translate, scale, and skew of the shadow,\nrelative to the alignment position.", - "$ref": "AffineTransform" + "stretchedPictureFill": { + "description": "Stretched picture fill.", + "$ref": "StretchedPictureFill" }, - "color": { - "description": "The shadow color value.", - "$ref": "OpaqueColor" + "solidFill": { + "$ref": "SolidFill", + "description": "Solid color fill." + } + }, + "id": "PageBackgroundFill" + }, + "SheetsChart": { + "description": "A PageElement kind representing\na linked chart embedded from Google Sheets.", + "type": "object", + "properties": { + "chartId": { + "description": "The ID of the specific chart in the Google Sheets spreadsheet that is\nembedded.", + "format": "int32", + "type": "integer" }, - "blurRadius": { - "description": "The radius of the shadow blur. The larger the radius, the more diffuse the\nshadow becomes.", - "$ref": "Dimension" + "sheetsChartProperties": { + "$ref": "SheetsChartProperties", + "description": "The properties of the Sheets chart." }, - "propertyState": { - "description": "The shadow property state.\n\nUpdating the the shadow on a page element will implicitly update this field\nto `RENDERED`, unless another value is specified in the same request. To\nhave no shadow on a page element, set this field to `NOT_RENDERED`. In this\ncase, any other shadow fields set in the same request will be ignored.", - "enum": [ - "RENDERED", - "NOT_RENDERED", - "INHERIT" - ], - "enumDescriptions": [ - "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", - "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", - "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." - ], + "contentUrl": { + "description": "The URL of an image of the embedded chart, with a default lifetime of 30\nminutes. This URL is tagged with the account of the requester. Anyone with\nthe URL effectively accesses the image as the original requester. Access to\nthe image may be lost if the presentation's sharing settings change.", "type": "string" }, + "spreadsheetId": { + "description": "The ID of the Google Sheets spreadsheet that contains the source chart.", + "type": "string" + } + }, + "id": "SheetsChart" + }, + "SolidFill": { + "description": "A solid color fill. The page or page element is filled entirely with the\nspecified color value.\n\nIf any field is unset, its value may be inherited from a parent placeholder\nif it exists.", + "type": "object", + "properties": { "alpha": { - "description": "The alpha of the shadow's color, from 0.0 to 1.0.", - "type": "number", - "format": "float" + "description": "The fraction of this `color` that should be applied to the pixel.\nThat is, the final pixel color is defined by the equation:\n\n pixel color = alpha * (color) + (1.0 - alpha) * (background color)\n\nThis means that a value of 1.0 corresponds to a solid color, whereas\na value of 0.0 corresponds to a completely transparent color.", + "format": "float", + "type": "number" + }, + "color": { + "$ref": "OpaqueColor", + "description": "The color value of the solid fill." + } + }, + "id": "SolidFill" + }, + "ThemeColorPair": { + "description": "A pair mapping a theme color type to the concrete color it represents.", + "type": "object", + "properties": { + "color": { + "$ref": "RgbColor", + "description": "The concrete color corresponding to the theme color type above." }, "type": { - "description": "The type of the shadow.", - "enum": [ - "SHADOW_TYPE_UNSPECIFIED", - "OUTER" - ], "enumDescriptions": [ - "Unspecified shadow type.", - "Outer shadow." + "Unspecified theme color. This value should not be used.", + "Represents the first dark color.", + "Represents the first light color.", + "Represents the second dark color.", + "Represents the second light color.", + "Represents the first accent color.", + "Represents the second accent color.", + "Represents the third accent color.", + "Represents the fourth accent color.", + "Represents the fifth accent color.", + "Represents the sixth accent color.", + "Represents the color to use for hyperlinks.", + "Represents the color to use for visited hyperlinks.", + "Represents the first text color.", + "Represents the first background color.", + "Represents the second text color.", + "Represents the second background color." + ], + "enum": [ + "THEME_COLOR_TYPE_UNSPECIFIED", + "DARK1", + "LIGHT1", + "DARK2", + "LIGHT2", + "ACCENT1", + "ACCENT2", + "ACCENT3", + "ACCENT4", + "ACCENT5", + "ACCENT6", + "HYPERLINK", + "FOLLOWED_HYPERLINK", + "TEXT1", + "BACKGROUND1", + "TEXT2", + "BACKGROUND2" ], + "description": "The type of the theme color.", "type": "string" } }, - "id": "Shadow" + "id": "ThemeColorPair" }, - "LayoutReference": { - "description": "Slide layout reference. This may reference either:\n\n- A predefined layout\n- One of the layouts in the presentation.", + "OptionalColor": { + "description": "A color that can either be fully opaque or fully transparent.", "type": "object", "properties": { - "predefinedLayout": { - "description": "Predefined layout.", - "enum": [ - "PREDEFINED_LAYOUT_UNSPECIFIED", - "BLANK", - "CAPTION_ONLY", - "TITLE", - "TITLE_AND_BODY", - "TITLE_AND_TWO_COLUMNS", - "TITLE_ONLY", - "SECTION_HEADER", - "SECTION_TITLE_AND_DESCRIPTION", - "ONE_COLUMN_TEXT", - "MAIN_POINT", - "BIG_NUMBER" - ], - "enumDescriptions": [ - "Unspecified layout.", - "Blank layout, with no placeholders.", - "Layout with a caption at the bottom.", - "Layout with a title and a subtitle.", - "Layout with a title and body.", - "Layout with a title and two columns.", - "Layout with only a title.", - "Layout with a section title.", - "Layout with a title and subtitle on one side and description on the other.", - "Layout with one title and one body, arranged in a single column.", - "Layout with a main point.", - "Layout with a big number heading." - ], - "type": "string" - }, - "layoutId": { - "description": "Layout ID: the object ID of one of the layouts in the presentation.", - "type": "string" + "opaqueColor": { + "$ref": "OpaqueColor", + "description": "If set, this will be used as an opaque color. If unset, this represents\na transparent color." } }, - "id": "LayoutReference" + "id": "OptionalColor" }, - "CreateSheetsChartResponse": { - "description": "The result of creating an embedded Google Sheets chart.", + "PageElementProperties": { + "description": "Common properties for a page element.\n\nNote: When you initially create a\nPageElement, the API may modify\nthe values of both `size` and `transform`, but the\nvisual size will be unchanged.", "type": "object", "properties": { - "objectId": { - "description": "The object ID of the created chart.", + "size": { + "description": "The size of the element.", + "$ref": "Size" + }, + "transform": { + "$ref": "AffineTransform", + "description": "The transform for the element." + }, + "pageObjectId": { + "description": "The object ID of the page where the element is located.", "type": "string" } }, - "id": "CreateSheetsChartResponse" + "id": "PageElementProperties" }, - "ReplaceAllTextResponse": { - "description": "The result of replacing text.", + "SheetsChartProperties": { + "description": "The properties of the SheetsChart.", "type": "object", "properties": { - "occurrencesChanged": { - "description": "The number of occurrences changed by replacing all text.", - "type": "integer", - "format": "int32" + "chartImageProperties": { + "description": "The properties of the embedded chart image.", + "$ref": "ImageProperties" } }, - "id": "ReplaceAllTextResponse" + "id": "SheetsChartProperties" }, - "LayoutProperties": { - "description": "The properties of Page are only\nrelevant for pages with page_type LAYOUT.", + "StretchedPictureFill": { + "description": "The stretched picture fill. The page or page element is filled entirely with\nthe specified picture. The picture is stretched to fit its container.", "type": "object", "properties": { - "displayName": { - "description": "The human readable name of the layout in the presentation's locale.", - "type": "string" - }, - "masterObjectId": { - "description": "The object ID of the master that this layout is based on.", + "contentUrl": { + "description": "Reading the content_url:\n\nAn URL to a picture with a default lifetime of 30 minutes.\nThis URL is tagged with the account of the requester. Anyone with the URL\neffectively accesses the picture as the original requester. Access to the\npicture may be lost if the presentation's sharing settings change.\n\nWriting the content_url:\n\nThe picture is fetched once at insertion time and a copy is stored for\ndisplay inside the presentation. Pictures must be less than 50MB in size,\ncannot exceed 25 megapixels, and must be in either in PNG, JPEG, or GIF\nformat.", "type": "string" }, - "name": { - "description": "The name of the layout.", - "type": "string" + "size": { + "$ref": "Size", + "description": "The original size of the picture fill. This field is read-only." } }, - "id": "LayoutProperties" + "id": "StretchedPictureFill" }, - "InsertTableColumnsRequest": { - "description": "Inserts columns into a table.\n\nOther columns in the table will be resized to fit the new column.", + "UpdateTextStyleRequest": { + "description": "Update the styling of text in a Shape or\nTable.", "type": "object", "properties": { - "tableObjectId": { - "description": "The table to insert columns into.", + "objectId": { + "description": "The object ID of the shape or table with the text to be styled.", "type": "string" }, - "insertRight": { - "description": "Whether to insert new columns to the right of the reference cell location.\n\n- `True`: insert to the right.\n- `False`: insert to the left.", - "type": "boolean" + "textRange": { + "description": "The range of text to style.\n\nThe range may be extended to include adjacent newlines.\n\nIf the range fully contains a paragraph belonging to a list, the\nparagraph's bullet is also updated with the matching text style.", + "$ref": "Range" }, "cellLocation": { - "description": "The reference table cell location from which columns will be inserted.\n\nA new column will be inserted to the left (or right) of the column where\nthe reference cell is. If the reference cell is a merged cell, a new\ncolumn will be inserted to the left (or right) of the merged cell.", - "$ref": "TableCellLocation" + "$ref": "TableCellLocation", + "description": "The location of the cell in the table containing the text to style. If\nobject_id refers to a table, cell_location must have a value. Otherwise, it\nmust not." }, - "number": { - "description": "The number of columns to be inserted. Maximum 20 per request.", - "type": "integer", - "format": "int32" + "style": { + "description": "The style(s) to set on the text.\n\nIf the value for a particular style matches that of the parent, that style\nwill be set to inherit.\n\nCertain text style changes may cause other changes meant to mirror the\nbehavior of the Slides editor. See the documentation of\nTextStyle for more information.", + "$ref": "TextStyle" + }, + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `style` is implied and\nshould not be specified. A single `\"*\"` can be used as short-hand for\nlisting every field.\n\nFor example, to update the text style to bold, set `fields` to `\"bold\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", + "type": "string" } }, - "id": "InsertTableColumnsRequest" + "id": "UpdateTextStyleRequest" }, "DeleteTableColumnRequest": { "description": "Deletes a column from a table.", "type": "object", "properties": { + "cellLocation": { + "$ref": "TableCellLocation", + "description": "The reference table cell location from which a column will be deleted.\n\nThe column this cell spans will be deleted. If this is a merged cell,\nmultiple columns will be deleted. If no columns remain in the table after\nthis deletion, the whole table is deleted." + }, "tableObjectId": { "description": "The table to delete columns from.", "type": "string" - }, - "cellLocation": { - "description": "The reference table cell location from which a column will be deleted.\n\nThe column this cell spans will be deleted. If this is a merged cell,\nmultiple columns will be deleted. If no columns remain in the table after\nthis deletion, the whole table is deleted.", - "$ref": "TableCellLocation" } }, "id": "DeleteTableColumnRequest" }, - "TableRow": { - "description": "Properties and contents of each row in a table.", + "List": { + "description": "A List describes the look and feel of bullets belonging to paragraphs\nassociated with a list. A paragraph that is part of a list has an implicit\nreference to that list's ID.", "type": "object", "properties": { - "rowHeight": { - "description": "Height of a row.", - "$ref": "Dimension" + "nestingLevel": { + "description": "A map of nesting levels to the properties of bullets at the associated\nlevel. A list has at most nine levels of nesting, so the possible values\nfor the keys of this map are 0 through 8, inclusive.", + "type": "object", + "additionalProperties": { + "$ref": "NestingLevel" + } + }, + "listId": { + "description": "The ID of the list.", + "type": "string" + } + }, + "id": "List" + }, + "PageElement": { + "description": "A visual element rendered on a page.", + "type": "object", + "properties": { + "video": { + "description": "A video page element.", + "$ref": "Video" + }, + "wordArt": { + "description": "A word art page element.", + "$ref": "WordArt" + }, + "table": { + "$ref": "Table", + "description": "A table page element." + }, + "transform": { + "$ref": "AffineTransform", + "description": "The transform of the page element." + }, + "objectId": { + "description": "The object ID for this page element. Object IDs used by\ngoogle.apps.slides.v1.Page and\ngoogle.apps.slides.v1.PageElement share the same namespace.", + "type": "string" + }, + "shape": { + "description": "A generic shape.", + "$ref": "Shape" + }, + "line": { + "$ref": "Line", + "description": "A line page element." + }, + "description": { + "description": "The description of the page element. Combined with title to display alt\ntext.", + "type": "string" }, - "tableCells": { - "description": "Properties and contents of each cell.\n\nCells that span multiple columns are represented only once with a\ncolumn_span greater\nthan 1. As a result, the length of this collection does not always match\nthe number of columns of the entire table.", - "type": "array", - "items": { - "$ref": "TableCell" - } + "elementGroup": { + "$ref": "Group", + "description": "A collection of page elements joined as a single unit." + }, + "image": { + "description": "An image page element.", + "$ref": "Image" + }, + "size": { + "$ref": "Size", + "description": "The size of the page element." + }, + "sheetsChart": { + "description": "A linked chart embedded from Google Sheets. Unlinked charts are\nrepresented as images.", + "$ref": "SheetsChart" + }, + "title": { + "description": "The title of the page element. Combined with description to display alt\ntext.", + "type": "string" } }, - "id": "TableRow" + "id": "PageElement" }, - "AffineTransform": { - "description": "AffineTransform uses a 3x3 matrix with an implied last row of [ 0 0 1 ]\nto transform source coordinates (x,y) into destination coordinates (x', y')\naccording to:\n\n x' x = shear_y scale_y translate_y \n 1 [ 1 ]\n\nAfter transformation,\n\n x' = scale_x * x + shear_x * y + translate_x;\n y' = scale_y * y + shear_y * x + translate_y;\n\nThis message is therefore composed of these six matrix elements.", + "CreateImageRequest": { + "description": "Creates an image.", "type": "object", "properties": { - "unit": { - "description": "The units for translate elements.", - "enum": [ - "UNIT_UNSPECIFIED", - "EMU", - "PT" - ], - "enumDescriptions": [ - "The units are unknown.", - "An English Metric Unit (EMU) is defined as 1/360,000 of a centimeter\nand thus there are 914,400 EMUs per inch, and 12,700 EMUs per point.", - "A point, 1/72 of an inch." - ], + "objectId": { + "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", "type": "string" }, - "shearY": { - "description": "The Y coordinate shearing element.", - "type": "number", - "format": "double" - }, - "translateX": { - "description": "The X coordinate translation element.", - "type": "number", - "format": "double" - }, - "shearX": { - "description": "The X coordinate shearing element.", - "type": "number", - "format": "double" - }, - "scaleY": { - "description": "The Y coordinate scaling element.", - "type": "number", - "format": "double" - }, - "scaleX": { - "description": "The X coordinate scaling element.", - "type": "number", - "format": "double" + "elementProperties": { + "$ref": "PageElementProperties", + "description": "The element properties for the image.\n\nWhen the aspect ratio of the provided size does not match the image aspect\nratio, the image is scaled and centered with respect to the size in order\nto maintain aspect ratio. The provided transform is applied after this\noperation." }, - "translateY": { - "description": "The Y coordinate translation element.", - "type": "number", - "format": "double" + "url": { + "description": "The image URL.\n\nThe image is fetched once at insertion time and a copy is stored for\ndisplay inside the presentation. Images must be less than 50MB in size,\ncannot exceed 25 megapixels, and must be in either in PNG, JPEG, or GIF\nformat.", + "type": "string" } }, - "id": "AffineTransform" + "id": "CreateImageRequest" }, - "CreateShapeRequest": { - "description": "Creates a new shape.", + "CreateParagraphBulletsRequest": { + "description": "Creates bullets for all of the paragraphs that overlap with the given\ntext index range.\n\nThe nesting level of each paragraph will be determined by counting leading\ntabs in front of each paragraph. To avoid excess space between the bullet and\nthe corresponding paragraph, these leading tabs are removed by this request.\nThis may change the indices of parts of the text.\n\nIf the paragraph immediately before paragraphs being updated is in a list\nwith a matching preset, the paragraphs being updated are added to that\npreceding list.", "type": "object", "properties": { "objectId": { - "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\nIf empty, a unique identifier will be generated.", + "description": "The object ID of the shape or table containing the text to add bullets to.", "type": "string" }, - "shapeType": { - "description": "The shape type.", - "enum": [ - "TYPE_UNSPECIFIED", - "TEXT_BOX", - "RECTANGLE", - "ROUND_RECTANGLE", - "ELLIPSE", - "ARC", - "BENT_ARROW", - "BENT_UP_ARROW", - "BEVEL", - "BLOCK_ARC", - "BRACE_PAIR", - "BRACKET_PAIR", - "CAN", - "CHEVRON", - "CHORD", - "CLOUD", - "CORNER", - "CUBE", - "CURVED_DOWN_ARROW", - "CURVED_LEFT_ARROW", - "CURVED_RIGHT_ARROW", - "CURVED_UP_ARROW", - "DECAGON", - "DIAGONAL_STRIPE", - "DIAMOND", - "DODECAGON", - "DONUT", - "DOUBLE_WAVE", - "DOWN_ARROW", - "DOWN_ARROW_CALLOUT", - "FOLDED_CORNER", - "FRAME", - "HALF_FRAME", - "HEART", - "HEPTAGON", - "HEXAGON", - "HOME_PLATE", - "HORIZONTAL_SCROLL", - "IRREGULAR_SEAL_1", - "IRREGULAR_SEAL_2", - "LEFT_ARROW", - "LEFT_ARROW_CALLOUT", - "LEFT_BRACE", - "LEFT_BRACKET", - "LEFT_RIGHT_ARROW", - "LEFT_RIGHT_ARROW_CALLOUT", - "LEFT_RIGHT_UP_ARROW", - "LEFT_UP_ARROW", - "LIGHTNING_BOLT", - "MATH_DIVIDE", - "MATH_EQUAL", - "MATH_MINUS", - "MATH_MULTIPLY", - "MATH_NOT_EQUAL", - "MATH_PLUS", - "MOON", - "NO_SMOKING", - "NOTCHED_RIGHT_ARROW", - "OCTAGON", - "PARALLELOGRAM", - "PENTAGON", - "PIE", - "PLAQUE", - "PLUS", - "QUAD_ARROW", - "QUAD_ARROW_CALLOUT", - "RIBBON", - "RIBBON_2", - "RIGHT_ARROW", - "RIGHT_ARROW_CALLOUT", - "RIGHT_BRACE", - "RIGHT_BRACKET", - "ROUND_1_RECTANGLE", - "ROUND_2_DIAGONAL_RECTANGLE", - "ROUND_2_SAME_RECTANGLE", - "RIGHT_TRIANGLE", - "SMILEY_FACE", - "SNIP_1_RECTANGLE", - "SNIP_2_DIAGONAL_RECTANGLE", - "SNIP_2_SAME_RECTANGLE", - "SNIP_ROUND_RECTANGLE", - "STAR_10", - "STAR_12", - "STAR_16", - "STAR_24", - "STAR_32", - "STAR_4", - "STAR_5", - "STAR_6", - "STAR_7", - "STAR_8", - "STRIPED_RIGHT_ARROW", - "SUN", - "TRAPEZOID", - "TRIANGLE", - "UP_ARROW", - "UP_ARROW_CALLOUT", - "UP_DOWN_ARROW", - "UTURN_ARROW", - "VERTICAL_SCROLL", - "WAVE", - "WEDGE_ELLIPSE_CALLOUT", - "WEDGE_RECTANGLE_CALLOUT", - "WEDGE_ROUND_RECTANGLE_CALLOUT", - "FLOW_CHART_ALTERNATE_PROCESS", - "FLOW_CHART_COLLATE", - "FLOW_CHART_CONNECTOR", - "FLOW_CHART_DECISION", - "FLOW_CHART_DELAY", - "FLOW_CHART_DISPLAY", - "FLOW_CHART_DOCUMENT", - "FLOW_CHART_EXTRACT", - "FLOW_CHART_INPUT_OUTPUT", - "FLOW_CHART_INTERNAL_STORAGE", - "FLOW_CHART_MAGNETIC_DISK", - "FLOW_CHART_MAGNETIC_DRUM", - "FLOW_CHART_MAGNETIC_TAPE", - "FLOW_CHART_MANUAL_INPUT", - "FLOW_CHART_MANUAL_OPERATION", - "FLOW_CHART_MERGE", - "FLOW_CHART_MULTIDOCUMENT", - "FLOW_CHART_OFFLINE_STORAGE", - "FLOW_CHART_OFFPAGE_CONNECTOR", - "FLOW_CHART_ONLINE_STORAGE", - "FLOW_CHART_OR", - "FLOW_CHART_PREDEFINED_PROCESS", - "FLOW_CHART_PREPARATION", - "FLOW_CHART_PROCESS", - "FLOW_CHART_PUNCHED_CARD", - "FLOW_CHART_PUNCHED_TAPE", - "FLOW_CHART_SORT", - "FLOW_CHART_SUMMING_JUNCTION", - "FLOW_CHART_TERMINATOR", - "ARROW_EAST", - "ARROW_NORTH_EAST", - "ARROW_NORTH", - "SPEECH", - "STARBURST", - "TEARDROP", - "ELLIPSE_RIBBON", - "ELLIPSE_RIBBON_2", - "CLOUD_CALLOUT", - "CUSTOM" + "textRange": { + "description": "The range of text to apply the bullet presets to, based on TextElement indexes.", + "$ref": "Range" + }, + "bulletPreset": { + "description": "The kinds of bullet glyphs to be used. Defaults to the\n`BULLET_DISC_CIRCLE_SQUARE` preset.", + "type": "string", + "enumDescriptions": [ + "A bulleted list with a `DISC`, `CIRCLE` and `SQUARE` bullet glyph for the\nfirst 3 list nesting levels.", + "A bulleted list with a `DIAMONDX`, `ARROW3D` and `SQUARE` bullet glyph for\nthe first 3 list nesting levels.", + "A bulleted list with `CHECKBOX` bullet glyphs for all list nesting levels.", + "A bulleted list with a `ARROW`, `DIAMOND` and `DISC` bullet glyph for\nthe first 3 list nesting levels.", + "A bulleted list with a `STAR`, `CIRCLE` and `DISC` bullet glyph for\nthe first 3 list nesting levels.", + "A bulleted list with a `ARROW3D`, `CIRCLE` and `SQUARE` bullet glyph for\nthe first 3 list nesting levels.", + "A bulleted list with a `LEFTTRIANGLE`, `DIAMOND` and `DISC` bullet glyph\nfor the first 3 list nesting levels.", + "A bulleted list with a `DIAMONDX`, `HOLLOWDIAMOND` and `SQUARE` bullet\nglyph for the first 3 list nesting levels.", + "A bulleted list with a `DIAMOND`, `CIRCLE` and `SQUARE` bullet glyph\nfor the first 3 list nesting levels.", + "A numbered list with `DIGIT`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by periods.", + "A numbered list with `DIGIT`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by parenthesis.", + "A numbered list with `DIGIT` numeric glyphs separated by periods, where\neach nesting level uses the previous nesting level's glyph as a prefix.\nFor example: '1.', '1.1.', '2.', '2.2.'.", + "A numbered list with `UPPERALPHA`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by periods.", + "A numbered list with `UPPERROMAN`, `UPPERALPHA` and `DIGIT` numeric glyphs\nfor the first 3 list nesting levels, followed by periods.", + "A numbered list with `ZERODIGIT`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by periods." ], + "enum": [ + "BULLET_DISC_CIRCLE_SQUARE", + "BULLET_DIAMONDX_ARROW3D_SQUARE", + "BULLET_CHECKBOX", + "BULLET_ARROW_DIAMOND_DISC", + "BULLET_STAR_CIRCLE_SQUARE", + "BULLET_ARROW3D_CIRCLE_SQUARE", + "BULLET_LEFTTRIANGLE_DIAMOND_DISC", + "BULLET_DIAMONDX_HOLLOWDIAMOND_SQUARE", + "BULLET_DIAMOND_CIRCLE_SQUARE", + "NUMBERED_DIGIT_ALPHA_ROMAN", + "NUMBERED_DIGIT_ALPHA_ROMAN_PARENS", + "NUMBERED_DIGIT_NESTED", + "NUMBERED_UPPERALPHA_ALPHA_ROMAN", + "NUMBERED_UPPERROMAN_UPPERALPHA_DIGIT", + "NUMBERED_ZERODIGIT_ALPHA_ROMAN" + ] + }, + "cellLocation": { + "description": "The optional table cell location if the text to be modified is in a table\ncell. If present, the object_id must refer to a table.", + "$ref": "TableCellLocation" + } + }, + "id": "CreateParagraphBulletsRequest" + }, + "TextStyle": { + "description": "Represents the styling that can be applied to a TextRun.\n\nIf this text is contained in a shape with a parent placeholder, then these text styles may be\ninherited from the parent. Which text styles are inherited depend on the\nnesting level of lists:\n\n* A text run in a paragraph that is not in a list will inherit its text style\n from the the newline character in the paragraph at the 0 nesting level of\n the list inside the parent placeholder.\n* A text run in a paragraph that is in a list will inherit its text style\n from the newline character in the paragraph at its corresponding nesting\n level of the list inside the parent placeholder.\n\nInherited text styles are represented as unset fields in this message. If\ntext is contained in a shape without a parent placeholder, unsetting these\nfields will revert the style to a value matching the defaults in the Slides\neditor.", + "type": "object", + "properties": { + "fontFamily": { + "description": "The font family of the text.\n\nThe font family can be any font from the Font menu in Slides or from\n[Google Fonts] (https://fonts.google.com/). If the font name is\nunrecognized, the text is rendered in `Arial`.\n\nSome fonts can affect the weight of the text. If an update request\nspecifies values for both `font_family` and `bold`, the explicitly-set\n`bold` value is used.", + "type": "string" + }, + "strikethrough": { + "description": "Whether or not the text is struck through.", + "type": "boolean" + }, + "italic": { + "description": "Whether or not the text is italicized.", + "type": "boolean" + }, + "fontSize": { + "description": "The size of the text's font. When read, the `font_size` will specified in\npoints.", + "$ref": "Dimension" + }, + "smallCaps": { + "description": "Whether or not the text is in small capital letters.", + "type": "boolean" + }, + "baselineOffset": { + "description": "The text's vertical offset from its normal position.\n\nText with `SUPERSCRIPT` or `SUBSCRIPT` baseline offsets is automatically\nrendered in a smaller font size, computed based on the `font_size` field.\nThe `font_size` itself is not affected by changes in this field.", + "type": "string", "enumDescriptions": [ - "The shape type that is not predefined.", - "Text box shape.", - "Rectangle shape. Corresponds to ECMA-376 ST_ShapeType 'rect'.", - "Round corner rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'roundRect'", - "Ellipse shape. Corresponds to ECMA-376 ST_ShapeType 'ellipse'", - "Curved arc shape. Corresponds to ECMA-376 ST_ShapeType 'arc'", - "Bent arrow shape. Corresponds to ECMA-376 ST_ShapeType 'bentArrow'", - "Bent up arrow shape. Corresponds to ECMA-376 ST_ShapeType 'bentUpArrow'", - "Bevel shape. Corresponds to ECMA-376 ST_ShapeType 'bevel'", - "Block arc shape. Corresponds to ECMA-376 ST_ShapeType 'blockArc'", - "Brace pair shape. Corresponds to ECMA-376 ST_ShapeType 'bracePair'", - "Bracket pair shape. Corresponds to ECMA-376 ST_ShapeType 'bracketPair'", - "Can shape. Corresponds to ECMA-376 ST_ShapeType 'can'", - "Chevron shape. Corresponds to ECMA-376 ST_ShapeType 'chevron'", - "Chord shape. Corresponds to ECMA-376 ST_ShapeType 'chord'", - "Cloud shape. Corresponds to ECMA-376 ST_ShapeType 'cloud'", - "Corner shape. Corresponds to ECMA-376 ST_ShapeType 'corner'", - "Cube shape. Corresponds to ECMA-376 ST_ShapeType 'cube'", - "Curved down arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedDownArrow'", - "Curved left arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedLeftArrow'", - "Curved right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedRightArrow'", - "Curved up arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'curvedUpArrow'", - "Decagon shape. Corresponds to ECMA-376 ST_ShapeType 'decagon'", - "Diagonal stripe shape. Corresponds to ECMA-376 ST_ShapeType 'diagStripe'", - "Diamond shape. Corresponds to ECMA-376 ST_ShapeType 'diamond'", - "Dodecagon shape. Corresponds to ECMA-376 ST_ShapeType 'dodecagon'", - "Donut shape. Corresponds to ECMA-376 ST_ShapeType 'donut'", - "Double wave shape. Corresponds to ECMA-376 ST_ShapeType 'doubleWave'", - "Down arrow shape. Corresponds to ECMA-376 ST_ShapeType 'downArrow'", - "Callout down arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'downArrowCallout'", - "Folded corner shape. Corresponds to ECMA-376 ST_ShapeType 'foldedCorner'", - "Frame shape. Corresponds to ECMA-376 ST_ShapeType 'frame'", - "Half frame shape. Corresponds to ECMA-376 ST_ShapeType 'halfFrame'", - "Heart shape. Corresponds to ECMA-376 ST_ShapeType 'heart'", - "Heptagon shape. Corresponds to ECMA-376 ST_ShapeType 'heptagon'", - "Hexagon shape. Corresponds to ECMA-376 ST_ShapeType 'hexagon'", - "Home plate shape. Corresponds to ECMA-376 ST_ShapeType 'homePlate'", - "Horizontal scroll shape. Corresponds to ECMA-376 ST_ShapeType\n'horizontalScroll'", - "Irregular seal 1 shape. Corresponds to ECMA-376 ST_ShapeType\n'irregularSeal1'", - "Irregular seal 2 shape. Corresponds to ECMA-376 ST_ShapeType\n'irregularSeal2'", - "Left arrow shape. Corresponds to ECMA-376 ST_ShapeType 'leftArrow'", - "Callout left arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftArrowCallout'", - "Left brace shape. Corresponds to ECMA-376 ST_ShapeType 'leftBrace'", - "Left bracket shape. Corresponds to ECMA-376 ST_ShapeType 'leftBracket'", - "Left right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftRightArrow'", - "Callout left right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftRightArrowCallout'", - "Left right up arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'leftRightUpArrow'", - "Left up arrow shape. Corresponds to ECMA-376 ST_ShapeType 'leftUpArrow'", - "Lightning bolt shape. Corresponds to ECMA-376 ST_ShapeType\n'lightningBolt'", - "Divide math shape. Corresponds to ECMA-376 ST_ShapeType 'mathDivide'", - "Equal math shape. Corresponds to ECMA-376 ST_ShapeType 'mathEqual'", - "Minus math shape. Corresponds to ECMA-376 ST_ShapeType 'mathMinus'", - "Multiply math shape. Corresponds to ECMA-376 ST_ShapeType 'mathMultiply'", - "Not equal math shape. Corresponds to ECMA-376 ST_ShapeType 'mathNotEqual'", - "Plus math shape. Corresponds to ECMA-376 ST_ShapeType 'mathPlus'", - "Moon shape. Corresponds to ECMA-376 ST_ShapeType 'moon'", - "No smoking shape. Corresponds to ECMA-376 ST_ShapeType 'noSmoking'", - "Notched right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'notchedRightArrow'", - "Octagon shape. Corresponds to ECMA-376 ST_ShapeType 'octagon'", - "Parallelogram shape. Corresponds to ECMA-376 ST_ShapeType 'parallelogram'", - "Pentagon shape. Corresponds to ECMA-376 ST_ShapeType 'pentagon'", - "Pie shape. Corresponds to ECMA-376 ST_ShapeType 'pie'", - "Plaque shape. Corresponds to ECMA-376 ST_ShapeType 'plaque'", - "Plus shape. Corresponds to ECMA-376 ST_ShapeType 'plus'", - "Quad-arrow shape. Corresponds to ECMA-376 ST_ShapeType 'quadArrow'", - "Callout quad-arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'quadArrowCallout'", - "Ribbon shape. Corresponds to ECMA-376 ST_ShapeType 'ribbon'", - "Ribbon 2 shape. Corresponds to ECMA-376 ST_ShapeType 'ribbon2'", - "Right arrow shape. Corresponds to ECMA-376 ST_ShapeType 'rightArrow'", - "Callout right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'rightArrowCallout'", - "Right brace shape. Corresponds to ECMA-376 ST_ShapeType 'rightBrace'", - "Right bracket shape. Corresponds to ECMA-376 ST_ShapeType 'rightBracket'", - "One round corner rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'round1Rect'", - "Two diagonal round corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'round2DiagRect'", - "Two same-side round corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'round2SameRect'", - "Right triangle shape. Corresponds to ECMA-376 ST_ShapeType 'rtTriangle'", - "Smiley face shape. Corresponds to ECMA-376 ST_ShapeType 'smileyFace'", - "One snip corner rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'snip1Rect'", - "Two diagonal snip corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'snip2DiagRect'", - "Two same-side snip corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'snip2SameRect'", - "One snip one round corner rectangle shape. Corresponds to ECMA-376\nST_ShapeType 'snipRoundRect'", - "Ten pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star10'", - "Twelve pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star12'", - "Sixteen pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star16'", - "Twenty four pointed star shape. Corresponds to ECMA-376 ST_ShapeType\n'star24'", - "Thirty two pointed star shape. Corresponds to ECMA-376 ST_ShapeType\n'star32'", - "Four pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star4'", - "Five pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star5'", - "Six pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star6'", - "Seven pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star7'", - "Eight pointed star shape. Corresponds to ECMA-376 ST_ShapeType 'star8'", - "Striped right arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'stripedRightArrow'", - "Sun shape. Corresponds to ECMA-376 ST_ShapeType 'sun'", - "Trapezoid shape. Corresponds to ECMA-376 ST_ShapeType 'trapezoid'", - "Triangle shape. Corresponds to ECMA-376 ST_ShapeType 'triangle'", - "Up arrow shape. Corresponds to ECMA-376 ST_ShapeType 'upArrow'", - "Callout up arrow shape. Corresponds to ECMA-376 ST_ShapeType\n'upArrowCallout'", - "Up down arrow shape. Corresponds to ECMA-376 ST_ShapeType 'upDownArrow'", - "U-turn arrow shape. Corresponds to ECMA-376 ST_ShapeType 'uturnArrow'", - "Vertical scroll shape. Corresponds to ECMA-376 ST_ShapeType\n'verticalScroll'", - "Wave shape. Corresponds to ECMA-376 ST_ShapeType 'wave'", - "Callout wedge ellipse shape. Corresponds to ECMA-376 ST_ShapeType\n'wedgeEllipseCallout'", - "Callout wedge rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'wedgeRectCallout'", - "Callout wedge round rectangle shape. Corresponds to ECMA-376 ST_ShapeType\n'wedgeRoundRectCallout'", - "Alternate process flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartAlternateProcess'", - "Collate flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartCollate'", - "Connector flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartConnector'", - "Decision flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartDecision'", - "Delay flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartDelay'", - "Display flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartDisplay'", - "Document flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartDocument'", - "Extract flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartExtract'", - "Input output flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartInputOutput'", - "Internal storage flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartInternalStorage'", - "Magnetic disk flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMagneticDisk'", - "Magnetic drum flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMagneticDrum'", - "Magnetic tape flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMagneticTape'", - "Manual input flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartManualInput'", - "Manual operation flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartManualOperation'", - "Merge flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartMerge'", - "Multi-document flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartMultidocument'", - "Offline storage flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartOfflineStorage'", - "Off-page connector flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartOffpageConnector'", - "Online storage flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartOnlineStorage'", - "Or flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartOr'", - "Predefined process flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPredefinedProcess'", - "Preparation flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPreparation'", - "Process flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartProcess'", - "Punched card flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPunchedCard'", - "Punched tape flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartPunchedTape'", - "Sort flow shape. Corresponds to ECMA-376 ST_ShapeType 'flowChartSort'", - "Summing junction flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartSummingJunction'", - "Terminator flow shape. Corresponds to ECMA-376 ST_ShapeType\n'flowChartTerminator'", - "East arrow shape.", - "Northeast arrow shape.", - "North arrow shape.", - "Speech shape.", - "Star burst shape.", - "Teardrop shape. Corresponds to ECMA-376 ST_ShapeType 'teardrop'", - "Ellipse ribbon shape. Corresponds to ECMA-376 ST_ShapeType\n'ellipseRibbon'", - "Ellipse ribbon 2 shape. Corresponds to ECMA-376 ST_ShapeType\n'ellipseRibbon2'", - "Callout cloud shape. Corresponds to ECMA-376 ST_ShapeType 'cloudCallout'", - "Custom shape." + "The text's baseline offset is inherited from the parent.", + "The text is not vertically offset.", + "The text is vertically offset upwards (superscript).", + "The text is vertically offset downwards (subscript)." ], + "enum": [ + "BASELINE_OFFSET_UNSPECIFIED", + "NONE", + "SUPERSCRIPT", + "SUBSCRIPT" + ] + }, + "backgroundColor": { + "description": "The background color of the text. If set, the color is either opaque or\ntransparent, depending on if the `opaque_color` field in it is set.", + "$ref": "OptionalColor" + }, + "link": { + "description": "The hyperlink destination of the text. If unset, there is no link. Links\nare not inherited from parent text.\n\nChanging the link in an update request causes some other changes to the\ntext style of the range:\n\n* When setting a link, the text foreground color will be set to\n ThemeColorType.HYPERLINK and the text will\n be underlined. If these fields are modified in the same\n request, those values will be used instead of the link defaults.\n* Setting a link on a text range that overlaps with an existing link will\n also update the existing link to point to the new URL.\n* Links are not settable on newline characters. As a result, setting a link\n on a text range that crosses a paragraph boundary, such as `\"ABC\\n123\"`,\n will separate the newline character(s) into their own text runs. The\n link will be applied separately to the runs before and after the newline.\n* Removing a link will update the text style of the range to match the\n style of the preceding text (or the default text styles if the preceding\n text is another link) unless different styles are being set in the same\n request.", + "$ref": "Link" + }, + "underline": { + "description": "Whether or not the text is underlined.", + "type": "boolean" + }, + "bold": { + "description": "Whether or not the text is rendered as bold.", + "type": "boolean" + }, + "foregroundColor": { + "description": "The color of the text itself. If set, the color is either opaque or\ntransparent, depending on if the `opaque_color` field in it is set.", + "$ref": "OptionalColor" + } + }, + "id": "TextStyle" + }, + "Size": { + "description": "A width and height.", + "type": "object", + "properties": { + "width": { + "$ref": "Dimension", + "description": "The width of the object." + }, + "height": { + "description": "The height of the object.", + "$ref": "Dimension" + } + }, + "id": "Size" + }, + "UpdateVideoPropertiesRequest": { + "description": "Update the properties of a Video.", + "type": "object", + "properties": { + "videoProperties": { + "$ref": "VideoProperties", + "description": "The video properties to update." + }, + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `videoProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the video outline color, set `fields` to\n`\"outline.outlineFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", + "type": "string" + }, + "objectId": { + "description": "The object ID of the video the updates are applied to.", + "type": "string" + } + }, + "id": "UpdateVideoPropertiesRequest" + }, + "Request": { + "description": "A single kind of update to apply to a presentation.", + "type": "object", + "properties": { + "updateLineProperties": { + "description": "Updates the properties of a Line.", + "$ref": "UpdateLinePropertiesRequest" + }, + "updateSlidesPosition": { + "$ref": "UpdateSlidesPositionRequest", + "description": "Updates the position of a set of slides in the presentation." + }, + "deleteTableRow": { + "description": "Deletes a row from a table.", + "$ref": "DeleteTableRowRequest" + }, + "updateShapeProperties": { + "$ref": "UpdateShapePropertiesRequest", + "description": "Updates the properties of a Shape." + }, + "insertText": { + "description": "Inserts text into a shape or table cell.", + "$ref": "InsertTextRequest" + }, + "deleteText": { + "$ref": "DeleteTextRequest", + "description": "Deletes text from a shape or a table cell." + }, + "updatePageProperties": { + "description": "Updates the properties of a Page.", + "$ref": "UpdatePagePropertiesRequest" + }, + "createShape": { + "description": "Creates a new shape.", + "$ref": "CreateShapeRequest" + }, + "deleteParagraphBullets": { + "$ref": "DeleteParagraphBulletsRequest", + "description": "Deletes bullets from paragraphs." + }, + "insertTableColumns": { + "description": "Inserts columns into a table.", + "$ref": "InsertTableColumnsRequest" + }, + "refreshSheetsChart": { + "description": "Refreshes a Google Sheets chart.", + "$ref": "RefreshSheetsChartRequest" + }, + "updateTableCellProperties": { + "description": "Updates the properties of a TableCell.", + "$ref": "UpdateTableCellPropertiesRequest" + }, + "createTable": { + "description": "Creates a new table.", + "$ref": "CreateTableRequest" + }, + "deleteObject": { + "description": "Deletes a page or page element from the presentation.", + "$ref": "DeleteObjectRequest" + }, + "updateParagraphStyle": { + "$ref": "UpdateParagraphStyleRequest", + "description": "Updates the styling of paragraphs within a Shape or Table." + }, + "duplicateObject": { + "$ref": "DuplicateObjectRequest", + "description": "Duplicates a slide or page element." + }, + "deleteTableColumn": { + "$ref": "DeleteTableColumnRequest", + "description": "Deletes a column from a table." + }, + "createLine": { + "$ref": "CreateLineRequest", + "description": "Creates a line." + }, + "updateVideoProperties": { + "description": "Updates the properties of a Video.", + "$ref": "UpdateVideoPropertiesRequest" + }, + "createImage": { + "description": "Creates an image.", + "$ref": "CreateImageRequest" + }, + "createParagraphBullets": { + "$ref": "CreateParagraphBulletsRequest", + "description": "Creates bullets for paragraphs." + }, + "createVideo": { + "description": "Creates a video.", + "$ref": "CreateVideoRequest" + }, + "createSheetsChart": { + "description": "Creates an embedded Google Sheets chart.", + "$ref": "CreateSheetsChartRequest" + }, + "replaceAllShapesWithSheetsChart": { + "$ref": "ReplaceAllShapesWithSheetsChartRequest", + "description": "Replaces all shapes matching some criteria with a Google Sheets chart." + }, + "updatePageElementTransform": { + "$ref": "UpdatePageElementTransformRequest", + "description": "Updates the transform of a page element." + }, + "updateTextStyle": { + "$ref": "UpdateTextStyleRequest", + "description": "Updates the styling of text within a Shape or Table." + }, + "replaceAllShapesWithImage": { + "description": "Replaces all shapes matching some criteria with an image.", + "$ref": "ReplaceAllShapesWithImageRequest" + }, + "replaceAllText": { + "$ref": "ReplaceAllTextRequest", + "description": "Replaces all instances of specified text." + }, + "updateImageProperties": { + "$ref": "UpdateImagePropertiesRequest", + "description": "Updates the properties of an Image." + }, + "createSlide": { + "$ref": "CreateSlideRequest", + "description": "Creates a new slide." + }, + "insertTableRows": { + "description": "Inserts rows into a table.", + "$ref": "InsertTableRowsRequest" + } + }, + "id": "Request" + }, + "UpdateImagePropertiesRequest": { + "description": "Update the properties of an Image.", + "type": "object", + "properties": { + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `imageProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the image outline color, set `fields` to\n`\"outline.outlineFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", "type": "string" }, - "elementProperties": { - "description": "The element properties for the shape.", - "$ref": "PageElementProperties" + "imageProperties": { + "$ref": "ImageProperties", + "description": "The image properties to update." + }, + "objectId": { + "description": "The object ID of the image the updates are applied to.", + "type": "string" } }, - "id": "CreateShapeRequest" + "id": "UpdateImagePropertiesRequest" }, - "ShapeBackgroundFill": { - "description": "The shape background fill.", + "ParagraphStyle": { + "description": "Styles that apply to a whole paragraph.\n\nIf this text is contained in a shape with a parent placeholder, then these paragraph styles may be\ninherited from the parent. Which paragraph styles are inherited depend on the\nnesting level of lists:\n\n* A paragraph not in a list will inherit its paragraph style from the\n paragraph at the 0 nesting level of the list inside the parent placeholder.\n* A paragraph in a list will inherit its paragraph style from the paragraph\n at its corresponding nesting level of the list inside the parent\n placeholder.\n\nInherited paragraph styles are represented as unset fields in this message.", "type": "object", "properties": { - "propertyState": { - "description": "The background fill property state.\n\nUpdating the the fill on a shape will implicitly update this field to\n`RENDERED`, unless another value is specified in the same request. To\nhave no fill on a shape, set this field to `NOT_RENDERED`. In this case,\nany other fill fields set in the same request will be ignored.", + "direction": { + "description": "The text direction of this paragraph. If unset, the value defaults to\nLEFT_TO_RIGHT\nsince text direction is not inherited.", + "type": "string", + "enumDescriptions": [ + "The text direction is inherited from the parent.", + "The text goes from left to right.", + "The text goes from right to left." + ], "enum": [ - "RENDERED", - "NOT_RENDERED", - "INHERIT" + "TEXT_DIRECTION_UNSPECIFIED", + "LEFT_TO_RIGHT", + "RIGHT_TO_LEFT" + ] + }, + "spacingMode": { + "enumDescriptions": [ + "The spacing mode is inherited from the parent.", + "Paragraph spacing is always rendered.", + "Paragraph spacing is skipped between list elements." + ], + "enum": [ + "SPACING_MODE_UNSPECIFIED", + "NEVER_COLLAPSE", + "COLLAPSE_LISTS" ], + "description": "The spacing mode for the paragraph.", + "type": "string" + }, + "indentEnd": { + "description": "The amount indentation for the paragraph on the side that corresponds to\nthe end of the text, based on the current text direction. If unset, the\nvalue is inherited from the parent.", + "$ref": "Dimension" + }, + "indentStart": { + "$ref": "Dimension", + "description": "The amount indentation for the paragraph on the side that corresponds to\nthe start of the text, based on the current text direction. If unset, the\nvalue is inherited from the parent." + }, + "spaceAbove": { + "$ref": "Dimension", + "description": "The amount of extra space above the paragraph. If unset, the value is\ninherited from the parent." + }, + "alignment": { "enumDescriptions": [ - "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", - "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", - "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." + "The paragraph alignment is inherited from the parent.", + "The paragraph is aligned to the start of the line. Left-aligned for\nLTR text, right-aligned otherwise.", + "The paragraph is centered.", + "The paragraph is aligned to the end of the line. Right-aligned for\nLTR text, left-aligned otherwise.", + "The paragraph is justified." + ], + "enum": [ + "ALIGNMENT_UNSPECIFIED", + "START", + "CENTER", + "END", + "JUSTIFIED" ], + "description": "The text alignment for this paragraph.", "type": "string" }, - "solidFill": { - "description": "Solid color fill.", - "$ref": "SolidFill" + "lineSpacing": { + "description": "The amount of space between lines, as a percentage of normal, where normal\nis represented as 100.0. If unset, the value is inherited from the parent.", + "format": "float", + "type": "number" + }, + "indentFirstLine": { + "description": "The amount of indentation for the start of the first line of the paragraph.\nIf unset, the value is inherited from the parent.", + "$ref": "Dimension" + }, + "spaceBelow": { + "$ref": "Dimension", + "description": "The amount of extra space above the paragraph. If unset, the value is\ninherited from the parent." } }, - "id": "ShapeBackgroundFill" + "id": "ParagraphStyle" }, - "Line": { - "description": "A PageElement kind representing a\nline, curved connector, or bent connector.", + "ReplaceAllShapesWithSheetsChartResponse": { + "description": "The result of replacing shapes with a Google Sheets chart.", "type": "object", "properties": { - "lineProperties": { - "description": "The properties of the line.", - "$ref": "LineProperties" + "occurrencesChanged": { + "description": "The number of shapes replaced with charts.", + "format": "int32", + "type": "integer" + } + }, + "id": "ReplaceAllShapesWithSheetsChartResponse" + }, + "TableCellProperties": { + "description": "The properties of the TableCell.", + "type": "object", + "properties": { + "tableCellBackgroundFill": { + "$ref": "TableCellBackgroundFill", + "description": "The background fill of the table cell. The default fill matches the fill\nfor newly created table cells in the Slides editor." + } + }, + "id": "TableCellProperties" + }, + "RefreshSheetsChartRequest": { + "description": "Refreshes an embedded Google Sheets chart by replacing it with the latest\nversion of the chart from Google Sheets.\n\nNOTE: Refreshing charts requires at least one of the spreadsheets.readonly,\nspreadsheets, drive.readonly, or drive OAuth scopes.", + "type": "object", + "properties": { + "objectId": { + "description": "The object ID of the chart to refresh.", + "type": "string" + } + }, + "id": "RefreshSheetsChartRequest" + }, + "Outline": { + "description": "The outline of a PageElement.\n\nIf these fields are unset, they may be inherited from a parent placeholder\nif it exists. If there is no parent, the fields will default to the value\nused for new page elements created in the Slides editor, which may depend on\nthe page element kind.", + "type": "object", + "properties": { + "outlineFill": { + "description": "The fill of the outline.", + "$ref": "OutlineFill" }, - "lineType": { - "description": "The type of the line.", + "weight": { + "$ref": "Dimension", + "description": "The thickness of the outline." + }, + "dashStyle": { + "enumDescriptions": [ + "Unspecified dash style.", + "Solid line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'solid'.\nThis is the default dash style.", + "Dotted line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dot'.", + "Dashed line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dash'.", + "Alternating dashes and dots. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'dashDot'.", + "Line with large dashes. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'lgDash'.", + "Alternating large dashes and dots. Corresponds to ECMA-376\nST_PresetLineDashVal value 'lgDashDot'." + ], "enum": [ - "TYPE_UNSPECIFIED", - "STRAIGHT_CONNECTOR_1", - "BENT_CONNECTOR_2", - "BENT_CONNECTOR_3", - "BENT_CONNECTOR_4", - "BENT_CONNECTOR_5", - "CURVED_CONNECTOR_2", - "CURVED_CONNECTOR_3", - "CURVED_CONNECTOR_4", - "CURVED_CONNECTOR_5" + "DASH_STYLE_UNSPECIFIED", + "SOLID", + "DOT", + "DASH", + "DASH_DOT", + "LONG_DASH", + "LONG_DASH_DOT" ], + "description": "The dash style of the outline.", + "type": "string" + }, + "propertyState": { + "description": "The outline property state.\n\nUpdating the the outline on a page element will implicitly update this\nfield to`RENDERED`, unless another value is specified in the same request.\nTo have no outline on a page element, set this field to `NOT_RENDERED`. In\nthis case, any other outline fields set in the same request will be\nignored.", + "type": "string", "enumDescriptions": [ - "An unspecified line type.", - "Straight connector 1 form. Corresponds to ECMA-376 ST_ShapeType\n'straightConnector1'.", - "Bent connector 2 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector2'.", - "Bent connector 3 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector3'.", - "Bent connector 4 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector4'.", - "Bent connector 5 form. Corresponds to ECMA-376 ST_ShapeType\n'bentConnector5'.", - "Curved connector 2 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector2'.", - "Curved connector 3 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector3'.", - "Curved connector 4 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector4'.", - "Curved connector 5 form. Corresponds to ECMA-376 ST_ShapeType\n'curvedConnector5'." + "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", + "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", + "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." ], - "type": "string" + "enum": [ + "RENDERED", + "NOT_RENDERED", + "INHERIT" + ] } }, - "id": "Line" + "id": "Outline" }, - "SheetsChartProperties": { - "description": "The properties of the SheetsChart.", + "NotesProperties": { + "description": "The properties of Page that are only\nrelevant for pages with page_type NOTES.", "type": "object", "properties": { - "chartImageProperties": { - "description": "The properties of the embedded chart image.", - "$ref": "ImageProperties" + "speakerNotesObjectId": { + "description": "The object ID of the shape on this notes page that contains the speaker\nnotes for the corresponding slide.\nThe actual shape may not always exist on the notes page. Inserting text\nusing this object ID will automatically create the shape. In this case, the\nactual shape may have different object ID. The `GetPresentation` or\n`GetPage` action will always return the latest object ID.", + "type": "string" } }, - "id": "SheetsChartProperties" + "id": "NotesProperties" }, - "AutoText": { - "description": "A TextElement kind that represents auto text.", + "ShapeProperties": { + "description": "The properties of a Shape.\n\nIf the shape is a placeholder shape as determined by the\nplaceholder field, then these\nproperties may be inherited from a parent placeholder shape.\nDetermining the rendered value of the property depends on the corresponding\nproperty_state field value.", "type": "object", "properties": { - "style": { - "description": "The styling applied to this auto text.", - "$ref": "TextStyle" + "outline": { + "$ref": "Outline", + "description": "The outline of the shape. If unset, the outline is inherited from a\nparent placeholder if it exists. If the shape has no parent, then the\ndefault outline depends on the shape type, matching the defaults for\nnew shapes created in the Slides editor." }, - "type": { - "description": "The type of this auto text.", - "enum": [ - "TYPE_UNSPECIFIED", - "SLIDE_NUMBER" - ], - "enumDescriptions": [ - "An unspecified autotext type.", - "Type for autotext that represents the current slide number." - ], - "type": "string" + "shapeBackgroundFill": { + "description": "The background fill of the shape. If unset, the background fill is\ninherited from a parent placeholder if it exists. If the shape has no\nparent, then the default background fill depends on the shape type,\nmatching the defaults for new shapes created in the Slides editor.", + "$ref": "ShapeBackgroundFill" }, - "content": { - "description": "The rendered content of this auto text, if available.", - "type": "string" + "shadow": { + "$ref": "Shadow", + "description": "The shadow properties of the shape. If unset, the shadow is inherited from\na parent placeholder if it exists. If the shape has no parent, then the\ndefault shadow matches the defaults for new shapes created in the Slides\neditor. This property is read-only." + }, + "link": { + "description": "The hyperlink destination of the shape. If unset, there is no link. Links\nare not inherited from parent placeholders.", + "$ref": "Link" } }, - "id": "AutoText" + "id": "ShapeProperties" }, - "TextElement": { - "description": "A TextElement describes the content of a range of indices in the text content\nof a Shape or TableCell.", + "TableColumnProperties": { + "description": "Properties of each column in a table.", "type": "object", "properties": { - "endIndex": { - "description": "The zero-based end index of this text element, exclusive, in Unicode code\nunits.", - "type": "integer", - "format": "int32" - }, - "textRun": { - "description": "A TextElement representing a run of text where all of the characters\nin the run have the same TextStyle.\n\nThe `start_index` and `end_index` of TextRuns will always be fully\ncontained in the index range of a single `paragraph_marker` TextElement.\nIn other words, a TextRun will never span multiple paragraphs.", - "$ref": "TextRun" - }, - "startIndex": { - "description": "The zero-based start index of this text element, in Unicode code units.", - "type": "integer", - "format": "int32" - }, - "paragraphMarker": { - "description": "A marker representing the beginning of a new paragraph.\n\nThe `start_index` and `end_index` of this TextElement represent the\nrange of the paragraph. Other TextElements with an index range contained\ninside this paragraph's range are considered to be part of this\nparagraph. The range of indices of two separate paragraphs will never\noverlap.", - "$ref": "ParagraphMarker" - }, - "autoText": { - "description": "A TextElement representing a spot in the text that is dynamically\nreplaced with content that can change over time.", - "$ref": "AutoText" + "columnWidth": { + "$ref": "Dimension", + "description": "Width of a column." } }, - "id": "TextElement" + "id": "TableColumnProperties" }, - "UpdateSlidesPositionRequest": { - "description": "Updates the position of slides in the presentation.", + "TableRow": { + "description": "Properties and contents of each row in a table.", "type": "object", "properties": { - "insertionIndex": { - "description": "The index where the slides should be inserted, based on the slide\narrangement before the move takes place. Must be between zero and the\nnumber of slides in the presentation, inclusive.", - "type": "integer", - "format": "int32" + "rowHeight": { + "description": "Height of a row.", + "$ref": "Dimension" }, - "slideObjectIds": { - "description": "The IDs of the slides in the presentation that should be moved.\nThe slides in this list must be in existing presentation order, without\nduplicates.", + "tableCells": { + "description": "Properties and contents of each cell.\n\nCells that span multiple columns are represented only once with a\ncolumn_span greater\nthan 1. As a result, the length of this collection does not always match\nthe number of columns of the entire table.", "type": "array", "items": { - "type": "string" + "$ref": "TableCell" } } }, - "id": "UpdateSlidesPositionRequest" + "id": "TableRow" }, - "ReplaceAllTextRequest": { - "description": "Replaces all instances of text matching a criteria with replace text.", + "UpdateTableCellPropertiesRequest": { + "description": "Update the properties of a TableCell.", "type": "object", "properties": { - "containsText": { - "description": "Finds text in a shape matching this substring.", - "$ref": "SubstringMatchCriteria" + "objectId": { + "description": "The object ID of the table.", + "type": "string" }, - "replaceText": { - "description": "The text that will replace the matched text.", + "tableRange": { + "$ref": "TableRange", + "description": "The table range representing the subset of the table to which the updates\nare applied. If a table range is not specified, the updates will apply to\nthe entire table." + }, + "tableCellProperties": { + "$ref": "TableCellProperties", + "description": "The table cell properties to update." + }, + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `tableCellProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the table cell background solid fill color, set\n`fields` to `\"tableCellBackgroundFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", "type": "string" } }, - "id": "ReplaceAllTextRequest" + "id": "UpdateTableCellPropertiesRequest" }, - "ShapeProperties": { - "description": "The properties of a Shape.\n\nIf the shape is a placeholder shape as determined by the\nplaceholder field, then these\nproperties may be inherited from a parent placeholder shape.\nDetermining the rendered value of the property depends on the corresponding\nproperty_state field value.", + "CreateSlideRequest": { + "description": "Creates a new slide.", "type": "object", "properties": { - "outline": { - "description": "The outline of the shape. If unset, the outline is inherited from a\nparent placeholder if it exists. If the shape has no parent, then the\ndefault outline depends on the shape type, matching the defaults for\nnew shapes created in the Slides editor.", - "$ref": "Outline" + "placeholderIdMappings": { + "description": "An optional list of object ID mappings from the placeholder(s) on the layout to the placeholder(s)\nthat will be created on the new slide from that specified layout. Can only\nbe used when `slide_layout_reference` is specified.", + "type": "array", + "items": { + "$ref": "LayoutPlaceholderIdMapping" + } }, - "link": { - "description": "The hyperlink destination of the shape. If unset, there is no link. Links\nare not inherited from parent placeholders.", - "$ref": "Link" + "slideLayoutReference": { + "$ref": "LayoutReference", + "description": "Layout reference of the slide to be inserted, based on the *current\nmaster*, which is one of the following:\n\n- The master of the previous slide index.\n- The master of the first slide, if the insertion_index is zero.\n- The first master in the presentation, if there are no slides.\n\nIf the LayoutReference is not found in the current master, a 400 bad\nrequest error is returned.\n\nIf you don't specify a layout reference, then the new slide will use the\npredefined layout `BLANK`." }, - "shapeBackgroundFill": { - "description": "The background fill of the shape. If unset, the background fill is\ninherited from a parent placeholder if it exists. If the shape has no\nparent, then the default background fill depends on the shape type,\nmatching the defaults for new shapes created in the Slides editor.", - "$ref": "ShapeBackgroundFill" + "objectId": { + "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", + "type": "string" }, - "shadow": { - "description": "The shadow properties of the shape. If unset, the shadow is inherited from\na parent placeholder if it exists. If the shape has no parent, then the\ndefault shadow matches the defaults for new shapes created in the Slides\neditor. This property is read-only.", - "$ref": "Shadow" + "insertionIndex": { + "description": "The optional zero-based index indicating where to insert the slides.\n\nIf you don't specify an index, the new slide is created at the end.", + "format": "int32", + "type": "integer" } }, - "id": "ShapeProperties" + "id": "CreateSlideRequest" }, - "CreateLineRequest": { - "description": "Creates a line.", + "BatchUpdatePresentationRequest": { + "description": "Request message for PresentationsService.BatchUpdatePresentation.", "type": "object", "properties": { - "objectId": { - "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", - "type": "string" - }, - "elementProperties": { - "description": "The element properties for the line.", - "$ref": "PageElementProperties" + "requests": { + "description": "A list of updates to apply to the presentation.", + "type": "array", + "items": { + "$ref": "Request" + } + } + }, + "id": "BatchUpdatePresentationRequest" + }, + "TextContent": { + "description": "The general text content. The text must reside in a compatible shape (e.g.\ntext box or rectangle) or a table cell in a page.", + "type": "object", + "properties": { + "textElements": { + "description": "The text contents broken down into its component parts, including styling\ninformation. This property is read-only.", + "type": "array", + "items": { + "$ref": "TextElement" + } }, - "lineCategory": { - "description": "The category of line to be created.", - "enum": [ - "STRAIGHT", - "BENT", - "CURVED" - ], - "enumDescriptions": [ - "Straight connectors, including straight connector 1. The is the default\ncategory when one is not specified.", - "Bent connectors, including bent connector 2 to 5.", - "Curved connectors, including curved connector 2 to 5." - ], - "type": "string" + "lists": { + "description": "The bulleted lists contained in this text, keyed by list ID.", + "type": "object", + "additionalProperties": { + "$ref": "List" + } } }, - "id": "CreateLineRequest" + "id": "TextContent" }, - "CreateShapeResponse": { - "description": "The result of creating a shape.", + "CreateSheetsChartResponse": { + "description": "The result of creating an embedded Google Sheets chart.", "type": "object", "properties": { "objectId": { - "description": "The object ID of the created shape.", + "description": "The object ID of the created chart.", "type": "string" } }, - "id": "CreateShapeResponse" + "id": "CreateSheetsChartResponse" }, - "CreateSlideResponse": { - "description": "The result of creating a slide.", + "DeleteParagraphBulletsRequest": { + "description": "Deletes bullets from all of the paragraphs that overlap with the given text\nindex range.\n\nThe nesting level of each paragraph will be visually preserved by adding\nindent to the start of the corresponding paragraph.", "type": "object", "properties": { "objectId": { - "description": "The object ID of the created slide.", + "description": "The object ID of the shape or table containing the text to delete bullets\nfrom.", "type": "string" + }, + "textRange": { + "description": "The range of text to delete bullets from, based on TextElement indexes.", + "$ref": "Range" + }, + "cellLocation": { + "description": "The optional table cell location if the text to be modified is in a table\ncell. If present, the object_id must refer to a table.", + "$ref": "TableCellLocation" } }, - "id": "CreateSlideResponse" + "id": "DeleteParagraphBulletsRequest" }, - "UpdateImagePropertiesRequest": { - "description": "Update the properties of an Image.", + "ParagraphMarker": { + "description": "A TextElement kind that represents the beginning of a new paragraph.", "type": "object", "properties": { - "objectId": { - "description": "The object ID of the image the updates are applied to.", - "type": "string" + "style": { + "description": "The paragraph's style", + "$ref": "ParagraphStyle" }, - "fields": { - "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `imageProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the image outline color, set `fields` to\n`\"outline.outlineFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", - "type": "string", - "format": "google-fieldmask" + "bullet": { + "$ref": "Bullet", + "description": "The bullet for this paragraph. If not present, the paragraph does not\nbelong to a list." + } + }, + "id": "ParagraphMarker" + }, + "InsertTableColumnsRequest": { + "description": "Inserts columns into a table.\n\nOther columns in the table will be resized to fit the new column.", + "type": "object", + "properties": { + "number": { + "description": "The number of columns to be inserted. Maximum 20 per request.", + "format": "int32", + "type": "integer" }, - "imageProperties": { - "description": "The image properties to update.", - "$ref": "ImageProperties" + "cellLocation": { + "description": "The reference table cell location from which columns will be inserted.\n\nA new column will be inserted to the left (or right) of the column where\nthe reference cell is. If the reference cell is a merged cell, a new\ncolumn will be inserted to the left (or right) of the merged cell.", + "$ref": "TableCellLocation" + }, + "insertRight": { + "description": "Whether to insert new columns to the right of the reference cell location.\n\n- `True`: insert to the right.\n- `False`: insert to the left.", + "type": "boolean" + }, + "tableObjectId": { + "description": "The table to insert columns into.", + "type": "string" } }, - "id": "UpdateImagePropertiesRequest" + "id": "InsertTableColumnsRequest" }, - "CreateVideoRequest": { - "description": "Creates a video.", + "LayoutPlaceholderIdMapping": { + "description": "The user-specified ID mapping for a placeholder that will be created on a\nslide from a specified layout.", "type": "object", "properties": { "objectId": { - "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", - "type": "string" - }, - "source": { - "description": "The video source.", - "enum": [ - "SOURCE_UNSPECIFIED", - "YOUTUBE" - ], - "enumDescriptions": [ - "The video source is unspecified.", - "The video source is YouTube." - ], + "description": "A user-supplied object ID for the placeholder identified above that to be\ncreated onto a slide.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", "type": "string" }, - "elementProperties": { - "description": "The element properties for the video.", - "$ref": "PageElementProperties" + "layoutPlaceholder": { + "description": "The placeholder on a layout that will be applied to a slide. Only type and index are needed. For example, a\npredefined `TITLE_AND_BODY` layout may usually have a TITLE placeholder\nwith index 0 and a BODY placeholder with index 0.", + "$ref": "Placeholder" }, - "id": { - "description": "The video source's unique identifier for this video.\n\ne.g. For YouTube video https://www.youtube.com/watch?v=7U3axjORYZ0,\nthe ID is 7U3axjORYZ0.", + "layoutPlaceholderObjectId": { + "description": "The object ID of the placeholder on a layout that will be applied\nto a slide.", "type": "string" } }, - "id": "CreateVideoRequest" + "id": "LayoutPlaceholderIdMapping" }, - "CreateTableRequest": { - "description": "Creates a new table.", + "UpdateShapePropertiesRequest": { + "description": "Update the properties of a Shape.", "type": "object", "properties": { - "columns": { - "description": "Number of columns in the table.", - "type": "integer", - "format": "int32" - }, "objectId": { - "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", + "description": "The object ID of the shape the updates are applied to.", "type": "string" }, - "rows": { - "description": "Number of rows in the table.", - "type": "integer", - "format": "int32" + "shapeProperties": { + "$ref": "ShapeProperties", + "description": "The shape properties to update." }, - "elementProperties": { - "description": "The element properties for the table.\n\nThe table will be created at the provided size, subject to a minimum size.\nIf no size is provided, the table will be automatically sized.\n\nTable transforms must have a scale of 1 and no shear components. If no\ntransform is provided, the table will be centered on the page.", - "$ref": "PageElementProperties" + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `shapeProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the shape background solid fill color, set `fields`\nto `\"shapeBackgroundFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", + "type": "string" } }, - "id": "CreateTableRequest" + "id": "UpdateShapePropertiesRequest" }, - "OptionalColor": { - "description": "A color that can either be fully opaque or fully transparent.", + "WordArt": { + "description": "A PageElement kind representing\nword art.", "type": "object", "properties": { - "opaqueColor": { - "description": "If set, this will be used as an opaque color. If unset, this represents\na transparent color.", - "$ref": "OpaqueColor" + "renderedText": { + "description": "The text rendered as word art.", + "type": "string" } }, - "id": "OptionalColor" + "id": "WordArt" }, - "TextRun": { - "description": "A TextElement kind that represents a run of text that all has the same\nstyling.", + "Recolor": { + "description": "A recolor effect applied on an image.", "type": "object", "properties": { - "style": { - "description": "The styling applied to this run.", - "$ref": "TextStyle" + "recolorStops": { + "description": "The recolor effect is represented by a gradient, which is a list of color\nstops.\n\nThe colors in the gradient will replace the corresponding colors at\nthe same position in the color palette and apply to the image. This\nproperty is read-only.", + "type": "array", + "items": { + "$ref": "ColorStop" + } }, - "content": { - "description": "The text of this run.", - "type": "string" + "name": { + "description": "The name of the recolor effect.\n\nThe name is determined from the `recolor_stops` by matching the gradient\nagainst the colors in the page's current color scheme. This property is\nread-only.", + "type": "string", + "enumDescriptions": [ + "No recolor effect. The default value.", + "A recolor effect that lightens the image using the page's first available\ncolor from its color scheme.", + "A recolor effect that lightens the image using the page's second\navailable color from its color scheme.", + "A recolor effect that lightens the image using the page's third available\ncolor from its color scheme.", + "A recolor effect that lightens the image using the page's forth available\ncolor from its color scheme.", + "A recolor effect that lightens the image using the page's fifth available\ncolor from its color scheme.", + "A recolor effect that lightens the image using the page's sixth available\ncolor from its color scheme.", + "A recolor effect that lightens the image using the page's seventh\navailable color from its color scheme.e.", + "A recolor effect that lightens the image using the page's eighth\navailable color from its color scheme.", + "A recolor effect that lightens the image using the page's ninth available\ncolor from its color scheme.", + "A recolor effect that lightens the image using the page's tenth available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's first available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's second available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's third available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's fourth available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's fifth available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's sixth available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's seventh\navailable color from its color scheme.", + "A recolor effect that darkens the image using the page's eighth available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's ninth available\ncolor from its color scheme.", + "A recolor effect that darkens the image using the page's tenth available\ncolor from its color scheme.", + "A recolor effect that recolors the image to grayscale.", + "A recolor effect that recolors the image to negative grayscale.", + "A recolor effect that recolors the image using the sepia color.", + "Custom recolor effect. Refer to `recolor_stops` for the concrete\ngradient." + ], + "enum": [ + "NONE", + "LIGHT1", + "LIGHT2", + "LIGHT3", + "LIGHT4", + "LIGHT5", + "LIGHT6", + "LIGHT7", + "LIGHT8", + "LIGHT9", + "LIGHT10", + "DARK1", + "DARK2", + "DARK3", + "DARK4", + "DARK5", + "DARK6", + "DARK7", + "DARK8", + "DARK9", + "DARK10", + "GRAYSCALE", + "NEGATIVE", + "SEPIA", + "CUSTOM" + ] } }, - "id": "TextRun" + "id": "Recolor" }, - "Shape": { - "description": "A PageElement kind representing a\ngeneric shape that does not have a more specific classification.", + "Link": { + "description": "A hypertext link.", "type": "object", "properties": { - "text": { - "description": "The text content of the shape.", - "$ref": "TextContent" + "pageObjectId": { + "description": "If set, indicates this is a link to the specific page in this\npresentation with this ID. A page with this ID may not exist.", + "type": "string" }, - "shapeProperties": { - "description": "The properties of the shape.", - "$ref": "ShapeProperties" + "url": { + "description": "If set, indicates this is a link to the external web page at this URL.", + "type": "string" }, - "shapeType": { - "description": "The type of the shape.", + "relativeLink": { + "description": "If set, indicates this is a link to a slide in this presentation,\naddressed by its position.", + "type": "string", + "enumDescriptions": [ + "An unspecified relative slide link.", + "A link to the next slide.", + "A link to the previous slide.", + "A link to the first slide in the presentation.", + "A link to the last slide in the presentation." + ], "enum": [ - "TYPE_UNSPECIFIED", - "TEXT_BOX", - "RECTANGLE", - "ROUND_RECTANGLE", - "ELLIPSE", - "ARC", - "BENT_ARROW", - "BENT_UP_ARROW", - "BEVEL", - "BLOCK_ARC", - "BRACE_PAIR", - "BRACKET_PAIR", - "CAN", - "CHEVRON", - "CHORD", - "CLOUD", - "CORNER", - "CUBE", - "CURVED_DOWN_ARROW", - "CURVED_LEFT_ARROW", - "CURVED_RIGHT_ARROW", - "CURVED_UP_ARROW", - "DECAGON", - "DIAGONAL_STRIPE", - "DIAMOND", - "DODECAGON", - "DONUT", - "DOUBLE_WAVE", - "DOWN_ARROW", - "DOWN_ARROW_CALLOUT", - "FOLDED_CORNER", - "FRAME", - "HALF_FRAME", - "HEART", - "HEPTAGON", - "HEXAGON", - "HOME_PLATE", - "HORIZONTAL_SCROLL", - "IRREGULAR_SEAL_1", - "IRREGULAR_SEAL_2", - "LEFT_ARROW", - "LEFT_ARROW_CALLOUT", - "LEFT_BRACE", - "LEFT_BRACKET", - "LEFT_RIGHT_ARROW", - "LEFT_RIGHT_ARROW_CALLOUT", - "LEFT_RIGHT_UP_ARROW", - "LEFT_UP_ARROW", - "LIGHTNING_BOLT", - "MATH_DIVIDE", - "MATH_EQUAL", - "MATH_MINUS", - "MATH_MULTIPLY", - "MATH_NOT_EQUAL", - "MATH_PLUS", - "MOON", - "NO_SMOKING", - "NOTCHED_RIGHT_ARROW", - "OCTAGON", - "PARALLELOGRAM", - "PENTAGON", - "PIE", - "PLAQUE", - "PLUS", - "QUAD_ARROW", - "QUAD_ARROW_CALLOUT", - "RIBBON", - "RIBBON_2", - "RIGHT_ARROW", - "RIGHT_ARROW_CALLOUT", - "RIGHT_BRACE", - "RIGHT_BRACKET", - "ROUND_1_RECTANGLE", - "ROUND_2_DIAGONAL_RECTANGLE", - "ROUND_2_SAME_RECTANGLE", - "RIGHT_TRIANGLE", - "SMILEY_FACE", - "SNIP_1_RECTANGLE", - "SNIP_2_DIAGONAL_RECTANGLE", - "SNIP_2_SAME_RECTANGLE", - "SNIP_ROUND_RECTANGLE", - "STAR_10", - "STAR_12", - "STAR_16", - "STAR_24", - "STAR_32", - "STAR_4", - "STAR_5", - "STAR_6", - "STAR_7", - "STAR_8", - "STRIPED_RIGHT_ARROW", - "SUN", - "TRAPEZOID", - "TRIANGLE", - "UP_ARROW", - "UP_ARROW_CALLOUT", - "UP_DOWN_ARROW", - "UTURN_ARROW", - "VERTICAL_SCROLL", - "WAVE", - "WEDGE_ELLIPSE_CALLOUT", - "WEDGE_RECTANGLE_CALLOUT", - "WEDGE_ROUND_RECTANGLE_CALLOUT", - "FLOW_CHART_ALTERNATE_PROCESS", - "FLOW_CHART_COLLATE", - "FLOW_CHART_CONNECTOR", - "FLOW_CHART_DECISION", - "FLOW_CHART_DELAY", - "FLOW_CHART_DISPLAY", - "FLOW_CHART_DOCUMENT", - "FLOW_CHART_EXTRACT", - "FLOW_CHART_INPUT_OUTPUT", - "FLOW_CHART_INTERNAL_STORAGE", - "FLOW_CHART_MAGNETIC_DISK", - "FLOW_CHART_MAGNETIC_DRUM", - "FLOW_CHART_MAGNETIC_TAPE", - "FLOW_CHART_MANUAL_INPUT", - "FLOW_CHART_MANUAL_OPERATION", - "FLOW_CHART_MERGE", - "FLOW_CHART_MULTIDOCUMENT", - "FLOW_CHART_OFFLINE_STORAGE", - "FLOW_CHART_OFFPAGE_CONNECTOR", - "FLOW_CHART_ONLINE_STORAGE", - "FLOW_CHART_OR", - "FLOW_CHART_PREDEFINED_PROCESS", - "FLOW_CHART_PREPARATION", - "FLOW_CHART_PROCESS", - "FLOW_CHART_PUNCHED_CARD", - "FLOW_CHART_PUNCHED_TAPE", - "FLOW_CHART_SORT", - "FLOW_CHART_SUMMING_JUNCTION", - "FLOW_CHART_TERMINATOR", - "ARROW_EAST", - "ARROW_NORTH_EAST", - "ARROW_NORTH", - "SPEECH", - "STARBURST", - "TEARDROP", - "ELLIPSE_RIBBON", - "ELLIPSE_RIBBON_2", - "CLOUD_CALLOUT", - "CUSTOM" + "RELATIVE_SLIDE_LINK_UNSPECIFIED", + "NEXT_SLIDE", + "PREVIOUS_SLIDE", + "FIRST_SLIDE", + "LAST_SLIDE" + ] + }, + "slideIndex": { + "description": "If set, indicates this is a link to the slide at this zero-based index\nin the presentation. There may not be a slide at this index.", + "format": "int32", + "type": "integer" + } + }, + "id": "Link" + }, + "RgbColor": { + "description": "An RGB color.", + "type": "object", + "properties": { + "red": { + "description": "The red component of the color, from 0.0 to 1.0.", + "format": "float", + "type": "number" + }, + "green": { + "description": "The green component of the color, from 0.0 to 1.0.", + "format": "float", + "type": "number" + }, + "blue": { + "description": "The blue component of the color, from 0.0 to 1.0.", + "format": "float", + "type": "number" + } + }, + "id": "RgbColor" + }, + "CreateShapeResponse": { + "description": "The result of creating a shape.", + "type": "object", + "properties": { + "objectId": { + "description": "The object ID of the created shape.", + "type": "string" + } + }, + "id": "CreateShapeResponse" + }, + "CreateLineRequest": { + "description": "Creates a line.", + "type": "object", + "properties": { + "objectId": { + "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", + "type": "string" + }, + "elementProperties": { + "$ref": "PageElementProperties", + "description": "The element properties for the line." + }, + "lineCategory": { + "enumDescriptions": [ + "Straight connectors, including straight connector 1. The is the default\ncategory when one is not specified.", + "Bent connectors, including bent connector 2 to 5.", + "Curved connectors, including curved connector 2 to 5." + ], + "enum": [ + "STRAIGHT", + "BENT", + "CURVED" ], + "description": "The category of line to be created.", + "type": "string" + } + }, + "id": "CreateLineRequest" + }, + "CreateSlideResponse": { + "description": "The result of creating a slide.", + "type": "object", + "properties": { + "objectId": { + "description": "The object ID of the created slide.", + "type": "string" + } + }, + "id": "CreateSlideResponse" + }, + "CreateShapeRequest": { + "description": "Creates a new shape.", + "type": "object", + "properties": { + "objectId": { + "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\nIf empty, a unique identifier will be generated.", + "type": "string" + }, + "shapeType": { + "description": "The shape type.", + "type": "string", "enumDescriptions": [ "The shape type that is not predefined.", "Text box shape.", @@ -2891,665 +2747,1027 @@ "Callout cloud shape. Corresponds to ECMA-376 ST_ShapeType 'cloudCallout'", "Custom shape." ], - "type": "string" - }, - "placeholder": { - "description": "Placeholders are shapes that are inherit from corresponding placeholders on\nlayouts and masters.\n\nIf set, the shape is a placeholder shape and any inherited properties\ncan be resolved by looking at the parent placeholder identified by the\nPlaceholder.parent_object_id field.", - "$ref": "Placeholder" - } - }, - "id": "Shape" - }, - "BatchUpdatePresentationResponse": { - "description": "Response message from a batch update.", - "type": "object", - "properties": { - "presentationId": { - "description": "The presentation the updates were applied to.", - "type": "string" - }, - "replies": { - "description": "The reply of the updates. This maps 1:1 with the updates, although\nreplies to some requests may be empty.", - "type": "array", - "items": { - "$ref": "Response" - } - } - }, - "id": "BatchUpdatePresentationResponse" - }, - "ImageProperties": { - "description": "The properties of the Image.", - "type": "object", - "properties": { - "cropProperties": { - "description": "The crop properties of the image. If not set, the image is not cropped.\nThis property is read-only.", - "$ref": "CropProperties" - }, - "outline": { - "description": "The outline of the image. If not set, the the image has no outline.", - "$ref": "Outline" - }, - "shadow": { - "description": "The shadow of the image. If not set, the image has no shadow. This property\nis read-only.", - "$ref": "Shadow" - }, - "transparency": { - "description": "The transparency effect of the image. The value should be in the interval\n[0.0, 1.0], where 0 means no effect and 1 means completely transparent.\nThis property is read-only.", - "type": "number", - "format": "float" - }, - "contrast": { - "description": "The contrast effect of the image. The value should be in the interval\n[-1.0, 1.0], where 0 means no effect. This property is read-only.", - "type": "number", - "format": "float" - }, - "link": { - "description": "The hyperlink destination of the image. If unset, there is no link.", - "$ref": "Link" - }, - "recolor": { - "description": "The recolor effect of the image. If not set, the image is not recolored.\nThis property is read-only.", - "$ref": "Recolor" + "enum": [ + "TYPE_UNSPECIFIED", + "TEXT_BOX", + "RECTANGLE", + "ROUND_RECTANGLE", + "ELLIPSE", + "ARC", + "BENT_ARROW", + "BENT_UP_ARROW", + "BEVEL", + "BLOCK_ARC", + "BRACE_PAIR", + "BRACKET_PAIR", + "CAN", + "CHEVRON", + "CHORD", + "CLOUD", + "CORNER", + "CUBE", + "CURVED_DOWN_ARROW", + "CURVED_LEFT_ARROW", + "CURVED_RIGHT_ARROW", + "CURVED_UP_ARROW", + "DECAGON", + "DIAGONAL_STRIPE", + "DIAMOND", + "DODECAGON", + "DONUT", + "DOUBLE_WAVE", + "DOWN_ARROW", + "DOWN_ARROW_CALLOUT", + "FOLDED_CORNER", + "FRAME", + "HALF_FRAME", + "HEART", + "HEPTAGON", + "HEXAGON", + "HOME_PLATE", + "HORIZONTAL_SCROLL", + "IRREGULAR_SEAL_1", + "IRREGULAR_SEAL_2", + "LEFT_ARROW", + "LEFT_ARROW_CALLOUT", + "LEFT_BRACE", + "LEFT_BRACKET", + "LEFT_RIGHT_ARROW", + "LEFT_RIGHT_ARROW_CALLOUT", + "LEFT_RIGHT_UP_ARROW", + "LEFT_UP_ARROW", + "LIGHTNING_BOLT", + "MATH_DIVIDE", + "MATH_EQUAL", + "MATH_MINUS", + "MATH_MULTIPLY", + "MATH_NOT_EQUAL", + "MATH_PLUS", + "MOON", + "NO_SMOKING", + "NOTCHED_RIGHT_ARROW", + "OCTAGON", + "PARALLELOGRAM", + "PENTAGON", + "PIE", + "PLAQUE", + "PLUS", + "QUAD_ARROW", + "QUAD_ARROW_CALLOUT", + "RIBBON", + "RIBBON_2", + "RIGHT_ARROW", + "RIGHT_ARROW_CALLOUT", + "RIGHT_BRACE", + "RIGHT_BRACKET", + "ROUND_1_RECTANGLE", + "ROUND_2_DIAGONAL_RECTANGLE", + "ROUND_2_SAME_RECTANGLE", + "RIGHT_TRIANGLE", + "SMILEY_FACE", + "SNIP_1_RECTANGLE", + "SNIP_2_DIAGONAL_RECTANGLE", + "SNIP_2_SAME_RECTANGLE", + "SNIP_ROUND_RECTANGLE", + "STAR_10", + "STAR_12", + "STAR_16", + "STAR_24", + "STAR_32", + "STAR_4", + "STAR_5", + "STAR_6", + "STAR_7", + "STAR_8", + "STRIPED_RIGHT_ARROW", + "SUN", + "TRAPEZOID", + "TRIANGLE", + "UP_ARROW", + "UP_ARROW_CALLOUT", + "UP_DOWN_ARROW", + "UTURN_ARROW", + "VERTICAL_SCROLL", + "WAVE", + "WEDGE_ELLIPSE_CALLOUT", + "WEDGE_RECTANGLE_CALLOUT", + "WEDGE_ROUND_RECTANGLE_CALLOUT", + "FLOW_CHART_ALTERNATE_PROCESS", + "FLOW_CHART_COLLATE", + "FLOW_CHART_CONNECTOR", + "FLOW_CHART_DECISION", + "FLOW_CHART_DELAY", + "FLOW_CHART_DISPLAY", + "FLOW_CHART_DOCUMENT", + "FLOW_CHART_EXTRACT", + "FLOW_CHART_INPUT_OUTPUT", + "FLOW_CHART_INTERNAL_STORAGE", + "FLOW_CHART_MAGNETIC_DISK", + "FLOW_CHART_MAGNETIC_DRUM", + "FLOW_CHART_MAGNETIC_TAPE", + "FLOW_CHART_MANUAL_INPUT", + "FLOW_CHART_MANUAL_OPERATION", + "FLOW_CHART_MERGE", + "FLOW_CHART_MULTIDOCUMENT", + "FLOW_CHART_OFFLINE_STORAGE", + "FLOW_CHART_OFFPAGE_CONNECTOR", + "FLOW_CHART_ONLINE_STORAGE", + "FLOW_CHART_OR", + "FLOW_CHART_PREDEFINED_PROCESS", + "FLOW_CHART_PREPARATION", + "FLOW_CHART_PROCESS", + "FLOW_CHART_PUNCHED_CARD", + "FLOW_CHART_PUNCHED_TAPE", + "FLOW_CHART_SORT", + "FLOW_CHART_SUMMING_JUNCTION", + "FLOW_CHART_TERMINATOR", + "ARROW_EAST", + "ARROW_NORTH_EAST", + "ARROW_NORTH", + "SPEECH", + "STARBURST", + "TEARDROP", + "ELLIPSE_RIBBON", + "ELLIPSE_RIBBON_2", + "CLOUD_CALLOUT", + "CUSTOM" + ] }, - "brightness": { - "description": "The brightness effect of the image. The value should be in the interval\n[-1.0, 1.0], where 0 means no effect. This property is read-only.", - "type": "number", - "format": "float" - } - }, - "id": "ImageProperties" - }, - "Group": { - "description": "A PageElement kind representing a\njoined collection of PageElements.", - "type": "object", - "properties": { - "children": { - "description": "The collection of elements in the group. The minimum size of a group is 2.", - "type": "array", - "items": { - "$ref": "PageElement" - } + "elementProperties": { + "description": "The element properties for the shape.", + "$ref": "PageElementProperties" } }, - "id": "Group" + "id": "CreateShapeRequest" }, - "Outline": { - "description": "The outline of a PageElement.\n\nIf these fields are unset, they may be inherited from a parent placeholder\nif it exists. If there is no parent, the fields will default to the value\nused for new page elements created in the Slides editor, which may depend on\nthe page element kind.", + "Video": { + "description": "A PageElement kind representing a\nvideo.", "type": "object", "properties": { - "weight": { - "description": "The thickness of the outline.", - "$ref": "Dimension" + "videoProperties": { + "description": "The properties of the video.", + "$ref": "VideoProperties" }, - "dashStyle": { - "description": "The dash style of the outline.", - "enum": [ - "DASH_STYLE_UNSPECIFIED", - "SOLID", - "DOT", - "DASH", - "DASH_DOT", - "LONG_DASH", - "LONG_DASH_DOT" - ], + "source": { "enumDescriptions": [ - "Unspecified dash style.", - "Solid line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'solid'.\nThis is the default dash style.", - "Dotted line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dot'.", - "Dashed line. Corresponds to ECMA-376 ST_PresetLineDashVal value 'dash'.", - "Alternating dashes and dots. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'dashDot'.", - "Line with large dashes. Corresponds to ECMA-376 ST_PresetLineDashVal\nvalue 'lgDash'.", - "Alternating large dashes and dots. Corresponds to ECMA-376\nST_PresetLineDashVal value 'lgDashDot'." + "The video source is unspecified.", + "The video source is YouTube." ], - "type": "string" - }, - "propertyState": { - "description": "The outline property state.\n\nUpdating the the outline on a page element will implicitly update this\nfield to`RENDERED`, unless another value is specified in the same request.\nTo have no outline on a page element, set this field to `NOT_RENDERED`. In\nthis case, any other outline fields set in the same request will be\nignored.", "enum": [ - "RENDERED", - "NOT_RENDERED", - "INHERIT" - ], - "enumDescriptions": [ - "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", - "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", - "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." + "SOURCE_UNSPECIFIED", + "YOUTUBE" ], + "description": "The video source.", "type": "string" }, - "outlineFill": { - "description": "The fill of the outline.", - "$ref": "OutlineFill" + "url": { + "description": "An URL to a video. The URL is valid as long as the source video\nexists and sharing settings do not change.", + "type": "string" + }, + "id": { + "description": "The video source's unique identifier for this video.", + "type": "string" } }, - "id": "Outline" + "id": "Video" }, - "TableCell": { - "description": "Properties and contents of each table cell.", + "PageProperties": { + "description": "The properties of the Page.\n\nThe page will inherit properties from the parent page. Depending on the page\ntype the hierarchy is defined in either\nSlideProperties or\nLayoutProperties.", "type": "object", "properties": { - "text": { - "description": "The text content of the cell.", - "$ref": "TextContent" - }, - "location": { - "description": "The location of the cell within the table.", - "$ref": "TableCellLocation" - }, - "rowSpan": { - "description": "Row span of the cell.", - "type": "integer", - "format": "int32" - }, - "tableCellProperties": { - "description": "The properties of the table cell.", - "$ref": "TableCellProperties" + "colorScheme": { + "description": "The color scheme of the page. If unset, the color scheme is inherited from\na parent page. If the page has no parent, the color scheme uses a default\nSlides color scheme. This field is read-only.", + "$ref": "ColorScheme" }, - "columnSpan": { - "description": "Column span of the cell.", - "type": "integer", - "format": "int32" + "pageBackgroundFill": { + "description": "The background fill of the page. If unset, the background fill is inherited\nfrom a parent page if it exists. If the page has no parent, then the\nbackground fill defaults to the corresponding fill in the Slides editor.", + "$ref": "PageBackgroundFill" } }, - "id": "TableCell" + "id": "PageProperties" }, - "ReplaceAllShapesWithImageResponse": { - "description": "The result of replacing shapes with an image.", + "NestingLevel": { + "description": "Contains properties describing the look and feel of a list bullet at a given\nlevel of nesting.", "type": "object", "properties": { - "occurrencesChanged": { - "description": "The number of shapes replaced with images.", - "type": "integer", - "format": "int32" + "bulletStyle": { + "$ref": "TextStyle", + "description": "The style of a bullet at this level of nesting." } }, - "id": "ReplaceAllShapesWithImageResponse" + "id": "NestingLevel" }, - "CreateSlideRequest": { - "description": "Creates a new slide.", + "TableCell": { + "description": "Properties and contents of each table cell.", "type": "object", "properties": { - "objectId": { - "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", - "type": "string" + "location": { + "$ref": "TableCellLocation", + "description": "The location of the cell within the table." }, - "insertionIndex": { - "description": "The optional zero-based index indicating where to insert the slides.\n\nIf you don't specify an index, the new slide is created at the end.", - "type": "integer", - "format": "int32" + "rowSpan": { + "description": "Row span of the cell.", + "format": "int32", + "type": "integer" }, - "slideLayoutReference": { - "description": "Layout reference of the slide to be inserted, based on the *current\nmaster*, which is one of the following:\n\n- The master of the previous slide index.\n- The master of the first slide, if the insertion_index is zero.\n- The first master in the presentation, if there are no slides.\n\nIf the LayoutReference is not found in the current master, a 400 bad\nrequest error is returned.\n\nIf you don't specify a layout reference, then the new slide will use the\npredefined layout `BLANK`.", - "$ref": "LayoutReference" - } - }, - "id": "CreateSlideRequest" - }, - "TableCellProperties": { - "description": "The properties of the TableCell.", - "type": "object", - "properties": { - "tableCellBackgroundFill": { - "description": "The background fill of the table cell. The default fill matches the fill\nfor newly created table cells in the Slides editor.", - "$ref": "TableCellBackgroundFill" + "columnSpan": { + "description": "Column span of the cell.", + "format": "int32", + "type": "integer" + }, + "text": { + "description": "The text content of the cell.", + "$ref": "TextContent" + }, + "tableCellProperties": { + "description": "The properties of the table cell.", + "$ref": "TableCellProperties" } }, - "id": "TableCellProperties" + "id": "TableCell" }, - "CreateTableResponse": { - "description": "The result of creating a table.", + "UpdateLinePropertiesRequest": { + "description": "Updates the properties of a Line.", "type": "object", "properties": { - "objectId": { - "description": "The object ID of the created table.", + "lineProperties": { + "description": "The line properties to update.", + "$ref": "LineProperties" + }, + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `lineProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the line solid fill color, set `fields` to\n`\"lineFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", "type": "string" - } - }, - "id": "CreateTableResponse" - }, - "Size": { - "description": "A width and height.", - "type": "object", - "properties": { - "width": { - "description": "The width of the object.", - "$ref": "Dimension" }, - "height": { - "description": "The height of the object.", - "$ref": "Dimension" + "objectId": { + "description": "The object ID of the line the update is applied to.", + "type": "string" } }, - "id": "Size" + "id": "UpdateLinePropertiesRequest" }, - "ColorScheme": { - "description": "The palette of predefined colors for a page.", + "UpdateSlidesPositionRequest": { + "description": "Updates the position of slides in the presentation.", "type": "object", "properties": { - "colors": { - "description": "The ThemeColorType and corresponding concrete color pairs.", + "slideObjectIds": { + "description": "The IDs of the slides in the presentation that should be moved.\nThe slides in this list must be in existing presentation order, without\nduplicates.", "type": "array", "items": { - "$ref": "ThemeColorPair" + "type": "string" } - } - }, - "id": "ColorScheme" - }, - "ParagraphMarker": { - "description": "A TextElement kind that represents the beginning of a new paragraph.", - "type": "object", - "properties": { - "style": { - "description": "The paragraph's style", - "$ref": "ParagraphStyle" }, - "bullet": { - "description": "The bullet for this paragraph. If not present, the paragraph does not\nbelong to a list.", - "$ref": "Bullet" + "insertionIndex": { + "description": "The index where the slides should be inserted, based on the slide\narrangement before the move takes place. Must be between zero and the\nnumber of slides in the presentation, inclusive.", + "format": "int32", + "type": "integer" } }, - "id": "ParagraphMarker" + "id": "UpdateSlidesPositionRequest" }, - "CreateParagraphBulletsRequest": { - "description": "Creates bullets for all of the paragraphs that overlap with the given\ntext index range.\n\nThe nesting level of each paragraph will be determined by counting leading\ntabs in front of each paragraph. To avoid excess space between the bullet and\nthe corresponding paragraph, these leading tabs are removed by this request.\nThis may change the indices of parts of the text.\n\nIf the paragraph immediately before paragraphs being updated is in a list\nwith a matching preset, the paragraphs being updated are added to that\npreceding list.", - "type": "object", - "properties": { - "objectId": { - "description": "The object ID of the shape or table containing the text to add bullets to.", - "type": "string" - }, - "textRange": { - "description": "The range of text to apply the bullet presets to, based on TextElement indexes.", - "$ref": "Range" - }, - "bulletPreset": { - "description": "The kinds of bullet glyphs to be used. Defaults to the\n`BULLET_DISC_CIRCLE_SQUARE` preset.", - "enum": [ - "BULLET_DISC_CIRCLE_SQUARE", - "BULLET_DIAMONDX_ARROW3D_SQUARE", - "BULLET_CHECKBOX", - "BULLET_ARROW_DIAMOND_DISC", - "BULLET_STAR_CIRCLE_SQUARE", - "BULLET_ARROW3D_CIRCLE_SQUARE", - "BULLET_LEFTTRIANGLE_DIAMOND_DISC", - "BULLET_DIAMONDX_HOLLOWDIAMOND_SQUARE", - "BULLET_DIAMOND_CIRCLE_SQUARE", - "NUMBERED_DIGIT_ALPHA_ROMAN", - "NUMBERED_DIGIT_ALPHA_ROMAN_PARENS", - "NUMBERED_DIGIT_NESTED", - "NUMBERED_UPPERALPHA_ALPHA_ROMAN", - "NUMBERED_UPPERROMAN_UPPERALPHA_DIGIT", - "NUMBERED_ZERODIGIT_ALPHA_ROMAN" - ], - "enumDescriptions": [ - "A bulleted list with a `DISC`, `CIRCLE` and `SQUARE` bullet glyph for the\nfirst 3 list nesting levels.", - "A bulleted list with a `DIAMONDX`, `ARROW3D` and `SQUARE` bullet glyph for\nthe first 3 list nesting levels.", - "A bulleted list with `CHECKBOX` bullet glyphs for all list nesting levels.", - "A bulleted list with a `ARROW`, `DIAMOND` and `DISC` bullet glyph for\nthe first 3 list nesting levels.", - "A bulleted list with a `STAR`, `CIRCLE` and `DISC` bullet glyph for\nthe first 3 list nesting levels.", - "A bulleted list with a `ARROW3D`, `CIRCLE` and `SQUARE` bullet glyph for\nthe first 3 list nesting levels.", - "A bulleted list with a `LEFTTRIANGLE`, `DIAMOND` and `DISC` bullet glyph\nfor the first 3 list nesting levels.", - "A bulleted list with a `DIAMONDX`, `HOLLOWDIAMOND` and `SQUARE` bullet\nglyph for the first 3 list nesting levels.", - "A bulleted list with a `DIAMOND`, `CIRCLE` and `SQUARE` bullet glyph\nfor the first 3 list nesting levels.", - "A numbered list with `DIGIT`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by periods.", - "A numbered list with `DIGIT`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by parenthesis.", - "A numbered list with `DIGIT` numeric glyphs separated by periods, where\neach nesting level uses the previous nesting level's glyph as a prefix.\nFor example: '1.', '1.1.', '2.', '2.2.'.", - "A numbered list with `UPPERALPHA`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by periods.", - "A numbered list with `UPPERROMAN`, `UPPERALPHA` and `DIGIT` numeric glyphs\nfor the first 3 list nesting levels, followed by periods.", - "A numbered list with `ZERODIGIT`, `ALPHA` and `ROMAN` numeric glyphs for\nthe first 3 list nesting levels, followed by periods." + "TableCellBackgroundFill": { + "description": "The table cell background fill.", + "type": "object", + "properties": { + "propertyState": { + "enumDescriptions": [ + "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", + "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", + "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." + ], + "enum": [ + "RENDERED", + "NOT_RENDERED", + "INHERIT" ], + "description": "The background fill property state.\n\nUpdating the the fill on a table cell will implicitly update this field\nto `RENDERED`, unless another value is specified in the same request. To\nhave no fill on a table cell, set this field to `NOT_RENDERED`. In this\ncase, any other fill fields set in the same request will be ignored.", "type": "string" }, - "cellLocation": { - "description": "The optional table cell location if the text to be modified is in a table\ncell. If present, the object_id must refer to a table.", - "$ref": "TableCellLocation" + "solidFill": { + "description": "Solid color fill.", + "$ref": "SolidFill" } }, - "id": "CreateParagraphBulletsRequest" + "id": "TableCellBackgroundFill" }, - "SubstringMatchCriteria": { - "description": "A criteria that matches a specific string of text in a shape or table.", + "UpdatePagePropertiesRequest": { + "description": "Updates the properties of a Page.", "type": "object", "properties": { - "text": { - "description": "The text to search for in the shape or table.", + "objectId": { + "description": "The object ID of the page the update is applied to.", "type": "string" }, - "matchCase": { - "description": "Indicates whether the search should respect case:\n\n- `True`: the search is case sensitive.\n- `False`: the search is case insensitive.", - "type": "boolean" + "pageProperties": { + "$ref": "PageProperties", + "description": "The page properties to update." + }, + "fields": { + "description": "The fields that should be updated.\n\nAt least one field must be specified. The root `pageProperties` is\nimplied and should not be specified. A single `\"*\"` can be used as\nshort-hand for listing every field.\n\nFor example to update the page background solid fill color, set `fields`\nto `\"pageBackgroundFill.solidFill.color\"`.\n\nTo reset a property to its default value, include its field name in the\nfield mask but leave the field itself unset.", + "format": "google-fieldmask", + "type": "string" } }, - "id": "SubstringMatchCriteria" + "id": "UpdatePagePropertiesRequest" }, - "WordArt": { - "description": "A PageElement kind representing\nword art.", + "Group": { + "description": "A PageElement kind representing a\njoined collection of PageElements.", "type": "object", "properties": { - "renderedText": { - "description": "The text rendered as word art.", - "type": "string" + "children": { + "description": "The collection of elements in the group. The minimum size of a group is 2.", + "type": "array", + "items": { + "$ref": "PageElement" + } } }, - "id": "WordArt" + "id": "Group" }, - "Range": { - "description": "Specifies a contiguous range of an indexed collection, such as characters in\ntext.", + "Placeholder": { + "description": "The placeholder information that uniquely identifies a placeholder shape.", "type": "object", "properties": { - "endIndex": { - "description": "The optional zero-based index of the end of the collection.\nRequired for `SPECIFIC_RANGE` delete mode.", - "type": "integer", - "format": "int32" - }, - "startIndex": { - "description": "The optional zero-based index of the beginning of the collection.\nRequired for `SPECIFIC_RANGE` and `FROM_START_INDEX` ranges.", - "type": "integer", - "format": "int32" + "index": { + "description": "The index of the placeholder. If the same placeholder types are the present\nin the same page, they would have different index values.", + "format": "int32", + "type": "integer" }, "type": { - "description": "The type of range.", - "enum": [ - "RANGE_TYPE_UNSPECIFIED", - "FIXED_RANGE", - "FROM_START_INDEX", - "ALL" - ], + "description": "The type of the placeholder.", + "type": "string", "enumDescriptions": [ - "Unspecified range type. This value must not be used.", - "A fixed range. Both the `start_index` and\n`end_index` must be specified.", - "Starts the range at `start_index` and continues until the\nend of the collection. The `end_index` must not be specified.", - "Sets the range to be the whole length of the collection. Both the\n`start_index` and the `end_index` must not be\nspecified." + "Default value, signifies it is not a placeholder.", + "Body text.", + "Chart or graph.", + "Clip art image.", + "Title centered.", + "Diagram.", + "Date and time.", + "Footer text.", + "Header text.", + "Multimedia.", + "Any content type.", + "Picture.", + "Number of a slide.", + "Subtitle.", + "Table.", + "Slide title.", + "Slide image." ], + "enum": [ + "NONE", + "BODY", + "CHART", + "CLIP_ART", + "CENTERED_TITLE", + "DIAGRAM", + "DATE_AND_TIME", + "FOOTER", + "HEADER", + "MEDIA", + "OBJECT", + "PICTURE", + "SLIDE_NUMBER", + "SUBTITLE", + "TABLE", + "TITLE", + "SLIDE_IMAGE" + ] + }, + "parentObjectId": { + "description": "The object ID of this shape's parent placeholder.\nIf unset, the parent placeholder shape does not exist, so the shape does\nnot inherit properties from any other shape.", "type": "string" } }, - "id": "Range" + "id": "Placeholder" }, - "TableColumnProperties": { - "description": "Properties of each column in a table.", + "DuplicateObjectRequest": { + "description": "Duplicates a slide or page element.\n\nWhen duplicating a slide, the duplicate slide will be created immediately\nfollowing the specified slide. When duplicating a page element, the duplicate\nwill be placed on the same page at the same position as the original.", "type": "object", "properties": { - "columnWidth": { - "description": "Width of a column.", - "$ref": "Dimension" + "objectId": { + "description": "The ID of the object to duplicate.", + "type": "string" + }, + "objectIds": { + "additionalProperties": { + "type": "string" + }, + "description": "The object being duplicated may contain other objects, for example when\nduplicating a slide or a group page element. This map defines how the IDs\nof duplicated objects are generated: the keys are the IDs of the original\nobjects and its values are the IDs that will be assigned to the\ncorresponding duplicate object. The ID of the source object's duplicate\nmay be specified in this map as well, using the same value of the\n`object_id` field as a key and the newly desired ID as the value.\n\nAll keys must correspond to existing IDs in the presentation. All values\nmust be unique in the presentation and must start with an alphanumeric\ncharacter or an underscore (matches regex `[a-zA-Z0-9_]`); remaining\ncharacters may include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`). The length of the new ID must not be less than 5 or\ngreater than 50.\n\nIf any IDs of source objects are omitted from the map, a new random ID will\nbe assigned. If the map is empty or unset, all duplicate objects will\nreceive a new random ID.", + "type": "object" } }, - "id": "TableColumnProperties" + "id": "DuplicateObjectRequest" }, - "Request": { - "description": "A single kind of update to apply to a presentation.", + "ReplaceAllTextRequest": { + "description": "Replaces all instances of text matching a criteria with replace text.", "type": "object", "properties": { - "createParagraphBullets": { - "description": "Creates bullets for paragraphs.", - "$ref": "CreateParagraphBulletsRequest" - }, - "insertTableColumns": { - "description": "Inserts columns into a table.", - "$ref": "InsertTableColumnsRequest" - }, - "createTable": { - "description": "Creates a new table.", - "$ref": "CreateTableRequest" - }, - "deleteText": { - "description": "Deletes text from a shape or a table cell.", - "$ref": "DeleteTextRequest" - }, - "replaceAllText": { - "description": "Replaces all instances of specified text.", - "$ref": "ReplaceAllTextRequest" - }, - "updateVideoProperties": { - "description": "Updates the properties of a Video.", - "$ref": "UpdateVideoPropertiesRequest" + "replaceText": { + "description": "The text that will replace the matched text.", + "type": "string" }, - "insertText": { - "description": "Inserts text into a shape or table cell.", - "$ref": "InsertTextRequest" + "containsText": { + "description": "Finds text in a shape matching this substring.", + "$ref": "SubstringMatchCriteria" + } + }, + "id": "ReplaceAllTextRequest" + }, + "Page": { + "description": "A page in a presentation.", + "type": "object", + "properties": { + "objectId": { + "description": "The object ID for this page. Object IDs used by\nPage and\nPageElement share the same namespace.", + "type": "string" }, - "deleteTableRow": { - "description": "Deletes a row from a table.", - "$ref": "DeleteTableRowRequest" + "layoutProperties": { + "$ref": "LayoutProperties", + "description": "Layout specific properties. Only set if page_type = LAYOUT." }, - "createLine": { - "description": "Creates a line.", - "$ref": "CreateLineRequest" + "notesProperties": { + "description": "Notes specific properties. Only set if page_type = NOTES.", + "$ref": "NotesProperties" }, - "updateTextStyle": { - "description": "Updates the styling of text within a Shape or Table.", - "$ref": "UpdateTextStyleRequest" + "pageType": { + "description": "The type of the page.", + "type": "string", + "enumDescriptions": [ + "A slide page.", + "A master slide page.", + "A layout page.", + "A notes page.", + "A notes master page." + ], + "enum": [ + "SLIDE", + "MASTER", + "LAYOUT", + "NOTES", + "NOTES_MASTER" + ] }, - "insertTableRows": { - "description": "Inserts rows into a table.", - "$ref": "InsertTableRowsRequest" + "pageElements": { + "description": "The page elements rendered on the page.", + "type": "array", + "items": { + "$ref": "PageElement" + } }, - "updateTableCellProperties": { - "description": "Updates the properties of a TableCell.", - "$ref": "UpdateTableCellPropertiesRequest" + "slideProperties": { + "$ref": "SlideProperties", + "description": "Slide specific properties. Only set if page_type = SLIDE." }, - "refreshSheetsChart": { - "description": "Refreshes a Google Sheets chart.", - "$ref": "RefreshSheetsChartRequest" + "pageProperties": { + "$ref": "PageProperties", + "description": "The properties of the page." + } + }, + "id": "Page" + }, + "ShapeBackgroundFill": { + "description": "The shape background fill.", + "type": "object", + "properties": { + "propertyState": { + "description": "The background fill property state.\n\nUpdating the the fill on a shape will implicitly update this field to\n`RENDERED`, unless another value is specified in the same request. To\nhave no fill on a shape, set this field to `NOT_RENDERED`. In this case,\nany other fill fields set in the same request will be ignored.", + "type": "string", + "enumDescriptions": [ + "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", + "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", + "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." + ], + "enum": [ + "RENDERED", + "NOT_RENDERED", + "INHERIT" + ] }, - "createSheetsChart": { - "description": "Creates an embedded Google Sheets chart.", - "$ref": "CreateSheetsChartRequest" + "solidFill": { + "$ref": "SolidFill", + "description": "Solid color fill." + } + }, + "id": "ShapeBackgroundFill" + }, + "CropProperties": { + "description": "The crop properties of an object enclosed in a container. For example, an\nImage.\n\nThe crop properties is represented by the offsets of four edges which define\na crop rectangle. The offsets are measured in percentage from the\ncorresponding edges of the object's original bounding rectangle towards\ninside, relative to the object's original dimensions.\n\n- If the offset is in the interval (0, 1), the corresponding edge of crop\nrectangle is positioned inside of the object's original bounding rectangle.\n- If the offset is negative or greater than 1, the corresponding edge of crop\nrectangle is positioned outside of the object's original bounding rectangle.\n- If the left edge of the crop rectangle is on the right side of its right\nedge, the object will be flipped horizontally.\n- If the top edge of the crop rectangle is below its bottom edge, the object\nwill be flipped vertically.\n- If all offsets and rotation angle is 0, the object is not cropped.\n\nAfter cropping, the content in the crop rectangle will be stretched to fit\nits container.", + "type": "object", + "properties": { + "angle": { + "description": "The rotation angle of the crop window around its center, in radians.\nRotation angle is applied after the offset.", + "format": "float", + "type": "number" }, - "updatePageProperties": { - "description": "Updates the properties of a Page.", - "$ref": "UpdatePagePropertiesRequest" + "topOffset": { + "description": "The offset specifies the top edge of the crop rectangle that is located\nbelow the original bounding rectangle top edge, relative to the object's\noriginal height.", + "format": "float", + "type": "number" }, - "updateShapeProperties": { - "description": "Updates the properties of a Shape.", - "$ref": "UpdateShapePropertiesRequest" + "leftOffset": { + "description": "The offset specifies the left edge of the crop rectangle that is located to\nthe right of the original bounding rectangle left edge, relative to the\nobject's original width.", + "format": "float", + "type": "number" }, - "createSlide": { - "description": "Creates a new slide.", - "$ref": "CreateSlideRequest" + "rightOffset": { + "description": "The offset specifies the right edge of the crop rectangle that is located\nto the left of the original bounding rectangle right edge, relative to the\nobject's original width.", + "format": "float", + "type": "number" }, - "deleteObject": { - "description": "Deletes a page or page element from the presentation.", - "$ref": "DeleteObjectRequest" + "bottomOffset": { + "description": "The offset specifies the bottom edge of the crop rectangle that is located\nabove the original bounding rectangle bottom edge, relative to the object's\noriginal height.", + "format": "float", + "type": "number" + } + }, + "id": "CropProperties" + }, + "ReplaceAllShapesWithSheetsChartRequest": { + "description": "Replaces all shapes that match the given criteria with the provided Google\nSheets chart. The chart will be scaled and centered to fit within the bounds\nof the original shape.\n\nNOTE: Replacing shapes with a chart requires at least one of the\nspreadsheets.readonly, spreadsheets, drive.readonly, or drive OAuth scopes.", + "type": "object", + "properties": { + "containsText": { + "description": "The criteria that the shapes must match in order to be replaced. The\nrequest will replace all of the shapes that contain the given text.", + "$ref": "SubstringMatchCriteria" }, - "createShape": { - "description": "Creates a new shape.", - "$ref": "CreateShapeRequest" + "chartId": { + "description": "The ID of the specific chart in the Google Sheets spreadsheet.", + "format": "int32", + "type": "integer" }, - "updatePageElementTransform": { - "description": "Updates the transform of a page element.", - "$ref": "UpdatePageElementTransformRequest" + "spreadsheetId": { + "description": "The ID of the Google Sheets spreadsheet that contains the chart.", + "type": "string" }, - "updateSlidesPosition": { - "description": "Updates the position of a set of slides in the presentation.", - "$ref": "UpdateSlidesPositionRequest" + "linkingMode": { + "enumDescriptions": [ + "The chart is not associated with the source spreadsheet and cannot be\nupdated. A chart that is not linked will be inserted as an image.", + "Linking the chart allows it to be updated, and other collaborators will\nsee a link to the spreadsheet." + ], + "enum": [ + "NOT_LINKED_IMAGE", + "LINKED" + ], + "description": "The mode with which the chart is linked to the source spreadsheet. When\nnot specified, the chart will be an image that is not linked.", + "type": "string" + } + }, + "id": "ReplaceAllShapesWithSheetsChartRequest" + }, + "ColorStop": { + "description": "A color and position in a gradient band.", + "type": "object", + "properties": { + "alpha": { + "description": "The alpha value of this color in the gradient band. Defaults to 1.0,\nfully opaque.", + "format": "float", + "type": "number" }, - "replaceAllShapesWithImage": { - "description": "Replaces all shapes matching some criteria with an image.", - "$ref": "ReplaceAllShapesWithImageRequest" + "position": { + "description": "The relative position of the color stop in the gradient band measured\nin percentage. The value should be in the interval [0.0, 1.0].", + "format": "float", + "type": "number" }, - "updateImageProperties": { - "description": "Updates the properties of an Image.", - "$ref": "UpdateImagePropertiesRequest" + "color": { + "$ref": "OpaqueColor", + "description": "The color of the gradient stop." + } + }, + "id": "ColorStop" + }, + "Range": { + "description": "Specifies a contiguous range of an indexed collection, such as characters in\ntext.", + "type": "object", + "properties": { + "type": { + "enumDescriptions": [ + "Unspecified range type. This value must not be used.", + "A fixed range. Both the `start_index` and\n`end_index` must be specified.", + "Starts the range at `start_index` and continues until the\nend of the collection. The `end_index` must not be specified.", + "Sets the range to be the whole length of the collection. Both the\n`start_index` and the `end_index` must not be\nspecified." + ], + "enum": [ + "RANGE_TYPE_UNSPECIFIED", + "FIXED_RANGE", + "FROM_START_INDEX", + "ALL" + ], + "description": "The type of range.", + "type": "string" }, - "createVideo": { - "description": "Creates a video.", - "$ref": "CreateVideoRequest" + "startIndex": { + "description": "The optional zero-based index of the beginning of the collection.\nRequired for `SPECIFIC_RANGE` and `FROM_START_INDEX` ranges.", + "format": "int32", + "type": "integer" }, - "createImage": { - "description": "Creates an image.", - "$ref": "CreateImageRequest" + "endIndex": { + "description": "The optional zero-based index of the end of the collection.\nRequired for `SPECIFIC_RANGE` delete mode.", + "format": "int32", + "type": "integer" + } + }, + "id": "Range" + }, + "CreateVideoRequest": { + "description": "Creates a video.", + "type": "object", + "properties": { + "objectId": { + "description": "A user-supplied object ID.\n\nIf you specify an ID, it must be unique among all pages and page elements\nin the presentation. The ID must start with an alphanumeric character or an\nunderscore (matches regex `[a-zA-Z0-9_]`); remaining characters\nmay include those as well as a hyphen or colon (matches regex\n`[a-zA-Z0-9_-:]`).\nThe length of the ID must not be less than 5 or greater than 50.\n\nIf you don't specify an ID, a unique one is generated.", + "type": "string" }, - "duplicateObject": { - "description": "Duplicates a slide or page element.", - "$ref": "DuplicateObjectRequest" + "source": { + "description": "The video source.", + "type": "string", + "enumDescriptions": [ + "The video source is unspecified.", + "The video source is YouTube." + ], + "enum": [ + "SOURCE_UNSPECIFIED", + "YOUTUBE" + ] }, - "deleteTableColumn": { - "description": "Deletes a column from a table.", - "$ref": "DeleteTableColumnRequest" + "elementProperties": { + "$ref": "PageElementProperties", + "description": "The element properties for the video." }, - "updateLineProperties": { - "description": "Updates the properties of a Line.", - "$ref": "UpdateLinePropertiesRequest" + "id": { + "description": "The video source's unique identifier for this video.\n\ne.g. For YouTube video https://www.youtube.com/watch?v=7U3axjORYZ0,\nthe ID is 7U3axjORYZ0.", + "type": "string" } }, - "id": "Request" + "id": "CreateVideoRequest" }, - "LineFill": { - "description": "The fill of the line.", + "DuplicateObjectResponse": { + "description": "The response of duplicating an object.", "type": "object", "properties": { - "solidFill": { - "description": "Solid color fill.", - "$ref": "SolidFill" + "objectId": { + "description": "The ID of the new duplicate object.", + "type": "string" } }, - "id": "LineFill" + "id": "DuplicateObjectResponse" }, - "OutlineFill": { - "description": "The fill of the outline.", + "ReplaceAllShapesWithImageRequest": { + "description": "Replaces all shapes that match the given criteria with the provided image.", "type": "object", "properties": { - "solidFill": { - "description": "Solid color fill.", - "$ref": "SolidFill" + "imageUrl": { + "description": "The image URL.\n\nThe image is fetched once at insertion time and a copy is stored for\ndisplay inside the presentation. Images must be less than 50MB in size,\ncannot exceed 25 megapixels, and must be in either in PNG, JPEG, or GIF\nformat.", + "type": "string" + }, + "replaceMethod": { + "description": "The replace method.", + "type": "string", + "enumDescriptions": [ + "Scales and centers the image to fit within the bounds of the original\nshape and maintains the image's aspect ratio. The rendered size of the\nimage may be smaller than the size of the shape. This is the default\nmethod when one is not specified.", + "Scales and centers the image to fill the bounds of the original shape.\nThe image may be cropped in order to fill the shape. The rendered size of\nthe image will be the same as that of the original shape." + ], + "enum": [ + "CENTER_INSIDE", + "CENTER_CROP" + ] + }, + "containsText": { + "description": "If set, this request will replace all of the shapes that contain the\ngiven text.", + "$ref": "SubstringMatchCriteria" } }, - "id": "OutlineFill" + "id": "ReplaceAllShapesWithImageRequest" }, - "Bullet": { - "description": "Describes the bullet of a paragraph.", + "Shadow": { + "description": "The shadow properties of a page element.\n\nIf these fields are unset, they may be inherited from a parent placeholder\nif it exists. If there is no parent, the fields will default to the value\nused for new page elements created in the Slides editor, which may depend on\nthe page element kind.", "type": "object", "properties": { - "nestingLevel": { - "description": "The nesting level of this paragraph in the list.", - "type": "integer", - "format": "int32" + "rotateWithShape": { + "description": "Whether the shadow should rotate with the shape.", + "type": "boolean" + }, + "propertyState": { + "description": "The shadow property state.\n\nUpdating the the shadow on a page element will implicitly update this field\nto `RENDERED`, unless another value is specified in the same request. To\nhave no shadow on a page element, set this field to `NOT_RENDERED`. In this\ncase, any other shadow fields set in the same request will be ignored.", + "type": "string", + "enumDescriptions": [ + "If a property's state is RENDERED, then the element has the corresponding\nproperty when rendered on a page. If the element is a placeholder shape as\ndetermined by the placeholder\nfield, and it inherits from a placeholder shape, the corresponding field\nmay be unset, meaning that the property value is inherited from a parent\nplaceholder. If the element does not inherit, then the field will contain\nthe rendered value. This is the default value.", + "If a property's state is NOT_RENDERED, then the element does not have the\ncorresponding property when rendered on a page. However, the field may\nstill be set so it can be inherited by child shapes. To remove a property\nfrom a rendered element, set its property_state to NOT_RENDERED.", + "If a property's state is INHERIT, then the property state uses the value of\ncorresponding `property_state` field on the parent shape. Elements that do\nnot inherit will never have an INHERIT property state." + ], + "enum": [ + "RENDERED", + "NOT_RENDERED", + "INHERIT" + ] + }, + "blurRadius": { + "description": "The radius of the shadow blur. The larger the radius, the more diffuse the\nshadow becomes.", + "$ref": "Dimension" + }, + "type": { + "description": "The type of the shadow.", + "type": "string", + "enumDescriptions": [ + "Unspecified shadow type.", + "Outer shadow." + ], + "enum": [ + "SHADOW_TYPE_UNSPECIFIED", + "OUTER" + ] + }, + "transform": { + "$ref": "AffineTransform", + "description": "Transform that encodes the translate, scale, and skew of the shadow,\nrelative to the alignment position." + }, + "alignment": { + "description": "The alignment point of the shadow, that sets the origin for translate,\nscale and skew of the shadow.", + "type": "string", + "enumDescriptions": [ + "Unspecified.", + "Top left.", + "Top center.", + "Top right.", + "Left center.", + "Center.", + "Right center.", + "Bottom left.", + "Bottom center.", + "Bottom right." + ], + "enum": [ + "RECTANGLE_POSITION_UNSPECIFIED", + "TOP_LEFT", + "TOP_CENTER", + "TOP_RIGHT", + "LEFT_CENTER", + "CENTER", + "RIGHT_CENTER", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT" + ] + }, + "alpha": { + "description": "The alpha of the shadow's color, from 0.0 to 1.0.", + "format": "float", + "type": "number" + }, + "color": { + "description": "The shadow color value.", + "$ref": "OpaqueColor" + } + }, + "id": "Shadow" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "canonicalName": "Slides", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/presentations": { + "description": "View and manage your Google Slides presentations" + }, + "https://www.googleapis.com/auth/presentations.readonly": { + "description": "View your Google Slides presentations" + }, + "https://www.googleapis.com/auth/spreadsheets.readonly": { + "description": "View your Google Spreadsheets" }, - "glyph": { - "description": "The rendered bullet glyph for this paragraph.", - "type": "string" + "https://www.googleapis.com/auth/drive": { + "description": "View and manage the files in your Google Drive" }, - "bulletStyle": { - "description": "The paragraph specific text style applied to this bullet.", - "$ref": "TextStyle" + "https://www.googleapis.com/auth/drive.readonly": { + "description": "View the files in your Google Drive" }, - "listId": { - "description": "The ID of the list this paragraph belongs to.", - "type": "string" + "https://www.googleapis.com/auth/spreadsheets": { + "description": "View and manage your spreadsheets in Google Drive" } - }, - "id": "Bullet" + } } }, - "revision": "20170110", - "basePath": "", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "version_module": "True", - "canonicalName": "Slides", - "discoveryVersion": "v1", - "baseUrl": "https://slides.googleapis.com/", + "rootUrl": "https://slides.googleapis.com/", + "ownerDomain": "google.com", "name": "slides", + "batchPath": "batch", + "title": "Google Slides API", + "ownerName": "Google", + "resources": { + "presentations": { + "methods": { + "get": { + "description": "Gets the latest version of the specified presentation.", + "response": { + "$ref": "Presentation" + }, + "parameterOrder": [ + "presentationId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/presentations", + "https://www.googleapis.com/auth/presentations.readonly" + ], + "parameters": { + "presentationId": { + "location": "path", + "description": "The ID of the presentation to retrieve.", + "required": true, + "type": "string", + "pattern": "^[^/]+$" + } + }, + "flatPath": "v1/presentations/{presentationsId}", + "path": "v1/presentations/{+presentationId}", + "id": "slides.presentations.get" + }, + "create": { + "response": { + "$ref": "Presentation" + }, + "parameterOrder": [], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/presentations" + ], + "parameters": {}, + "flatPath": "v1/presentations", + "path": "v1/presentations", + "id": "slides.presentations.create", + "description": "Creates a new presentation using the title given in the request. Other\nfields in the request are ignored.\nReturns the created presentation.", + "request": { + "$ref": "Presentation" + } + }, + "batchUpdate": { + "response": { + "$ref": "BatchUpdatePresentationResponse" + }, + "parameterOrder": [ + "presentationId" + ], + "httpMethod": "POST", + "parameters": { + "presentationId": { + "description": "The presentation to apply the updates to.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/presentations", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/spreadsheets.readonly" + ], + "flatPath": "v1/presentations/{presentationId}:batchUpdate", + "path": "v1/presentations/{presentationId}:batchUpdate", + "id": "slides.presentations.batchUpdate", + "request": { + "$ref": "BatchUpdatePresentationRequest" + }, + "description": "Applies one or more updates to the presentation.\n\nEach request is validated before\nbeing applied. If any request is not valid, then the entire request will\nfail and nothing will be applied.\n\nSome requests have replies to\ngive you some information about how they are applied. Other requests do\nnot need to return information; these each return an empty reply.\nThe order of replies matches that of the requests.\n\nFor example, suppose you call batchUpdate with four updates, and only the\nthird one returns information. The response would have two empty replies:\nthe reply to the third request, and another empty reply, in that order.\n\nBecause other users may be editing the presentation, the presentation\nmight not exactly reflect your changes: your changes may\nbe altered with respect to collaborator changes. If there are no\ncollaborators, the presentation should reflect your changes. In any case,\nthe updates in your request are guaranteed to be applied together\natomically." + } + }, + "resources": { + "pages": { + "methods": { + "get": { + "flatPath": "v1/presentations/{presentationId}/pages/{pageObjectId}", + "id": "slides.presentations.pages.get", + "path": "v1/presentations/{presentationId}/pages/{pageObjectId}", + "description": "Gets the latest version of the specified page in the presentation.", + "httpMethod": "GET", + "response": { + "$ref": "Page" + }, + "parameterOrder": [ + "presentationId", + "pageObjectId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/presentations", + "https://www.googleapis.com/auth/presentations.readonly" + ], + "parameters": { + "presentationId": { + "description": "The ID of the presentation to retrieve.", + "required": true, + "type": "string", + "location": "path" + }, + "pageObjectId": { + "location": "path", + "description": "The object ID of the page to retrieve.", + "required": true, + "type": "string" + } + } + } + } + } + } + } + }, "parameters": { - "access_token": { - "description": "OAuth access token.", + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", "location": "query" }, "prettyPrint": { "description": "Returns response with indentations and line breaks.", - "default": "true", "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", "default": "true", - "type": "boolean", "location": "query" }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, "fields": { + "location": "query", "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "$.xgafv": { + "description": "V1 error format.", "type": "string", - "location": "query" + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" }, "alt": { - "description": "Data format for response.", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], "location": "query", + "description": "Data format for response.", + "default": "json", "enum": [ "json", "media", "proto" ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], "type": "string" }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], + "access_token": { + "description": "OAuth access token.", "type": "string", "location": "query" }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" }, "oauth_token": { + "location": "query", "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" + "type": "string" }, "bearer_token": { "description": "OAuth bearer token.", "type": "string", "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" } - }, - "documentationLink": "https://developers.google.com/slides/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1", - "rootUrl": "https://slides.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/slides/v1/slides-gen.go b/vendor/google.golang.org/api/slides/v1/slides-gen.go index c85b1c225..ac29b84fb 100644 --- a/vendor/google.golang.org/api/slides/v1/slides-gen.go +++ b/vendor/google.golang.org/api/slides/v1/slides-gen.go @@ -76,9 +76,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Presentations *PresentationsService } @@ -90,6 +91,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewPresentationsService(s *Service) *PresentationsService { rs := &PresentationsService{s: s} rs.Pages = NewPresentationsPagesService(s) @@ -1235,6 +1240,13 @@ type CreateSlideRequest struct { // If you don't specify an ID, a unique one is generated. ObjectId string `json:"objectId,omitempty"` + // PlaceholderIdMappings: An optional list of object ID mappings from + // the placeholder(s) on the layout to the placeholder(s) + // that will be created on the new slide from that specified layout. Can + // only + // be used when `slide_layout_reference` is specified. + PlaceholderIdMappings []*LayoutPlaceholderIdMapping `json:"placeholderIdMappings,omitempty"` + // SlideLayoutReference: Layout reference of the slide to be inserted, // based on the *current // master*, which is one of the following: @@ -1623,6 +1635,51 @@ func (s *DeleteObjectRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DeleteParagraphBulletsRequest: Deletes bullets from all of the +// paragraphs that overlap with the given text +// index range. +// +// The nesting level of each paragraph will be visually preserved by +// adding +// indent to the start of the corresponding paragraph. +type DeleteParagraphBulletsRequest struct { + // CellLocation: The optional table cell location if the text to be + // modified is in a table + // cell. If present, the object_id must refer to a table. + CellLocation *TableCellLocation `json:"cellLocation,omitempty"` + + // ObjectId: The object ID of the shape or table containing the text to + // delete bullets + // from. + ObjectId string `json:"objectId,omitempty"` + + // TextRange: The range of text to delete bullets from, based on + // TextElement indexes. + TextRange *Range `json:"textRange,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CellLocation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CellLocation") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DeleteParagraphBulletsRequest) MarshalJSON() ([]byte, error) { + type noMethod DeleteParagraphBulletsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DeleteTableColumnRequest: Deletes a column from a table. type DeleteTableColumnRequest struct { // CellLocation: The reference table cell location from which a column @@ -2225,6 +2282,63 @@ func (s *InsertTextRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LayoutPlaceholderIdMapping: The user-specified ID mapping for a +// placeholder that will be created on a +// slide from a specified layout. +type LayoutPlaceholderIdMapping struct { + // LayoutPlaceholder: The placeholder on a layout that will be applied + // to a slide. Only type and index are needed. For example, a + // predefined `TITLE_AND_BODY` layout may usually have a TITLE + // placeholder + // with index 0 and a BODY placeholder with index 0. + LayoutPlaceholder *Placeholder `json:"layoutPlaceholder,omitempty"` + + // LayoutPlaceholderObjectId: The object ID of the placeholder on a + // layout that will be applied + // to a slide. + LayoutPlaceholderObjectId string `json:"layoutPlaceholderObjectId,omitempty"` + + // ObjectId: A user-supplied object ID for the placeholder identified + // above that to be + // created onto a slide. + // + // If you specify an ID, it must be unique among all pages and page + // elements + // in the presentation. The ID must start with an alphanumeric character + // or an + // underscore (matches regex `[a-zA-Z0-9_]`); remaining characters + // may include those as well as a hyphen or colon (matches + // regex + // `[a-zA-Z0-9_-:]`). + // The length of the ID must not be less than 5 or greater than 50. + // + // If you don't specify an ID, a unique one is generated. + ObjectId string `json:"objectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LayoutPlaceholder") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LayoutPlaceholder") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LayoutPlaceholderIdMapping) MarshalJSON() ([]byte, error) { + type noMethod LayoutPlaceholderIdMapping + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LayoutProperties: The properties of Page are only // relevant for pages with page_type LAYOUT. type LayoutProperties struct { @@ -2631,6 +2745,46 @@ func (s *NestingLevel) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NotesProperties: The properties of Page that are only +// relevant for pages with page_type NOTES. +type NotesProperties struct { + // SpeakerNotesObjectId: The object ID of the shape on this notes page + // that contains the speaker + // notes for the corresponding slide. + // The actual shape may not always exist on the notes page. Inserting + // text + // using this object ID will automatically create the shape. In this + // case, the + // actual shape may have different object ID. The `GetPresentation` + // or + // `GetPage` action will always return the latest object ID. + SpeakerNotesObjectId string `json:"speakerNotesObjectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SpeakerNotesObjectId") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SpeakerNotesObjectId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NotesProperties) MarshalJSON() ([]byte, error) { + type noMethod NotesProperties + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // OpaqueColor: A themeable solid color value. type OpaqueColor struct { // RgbColor: An opaque RGB color. @@ -2848,6 +3002,10 @@ type Page struct { // LAYOUT. LayoutProperties *LayoutProperties `json:"layoutProperties,omitempty"` + // NotesProperties: Notes specific properties. Only set if page_type = + // NOTES. + NotesProperties *NotesProperties `json:"notesProperties,omitempty"` + // ObjectId: The object ID for this page. Object IDs used by // Page and // PageElement share the same namespace. @@ -2865,6 +3023,8 @@ type Page struct { // "SLIDE" - A slide page. // "MASTER" - A master slide page. // "LAYOUT" - A layout page. + // "NOTES" - A notes page. + // "NOTES_MASTER" - A notes master page. PageType string `json:"pageType,omitempty"` // SlideProperties: Slide specific properties. Only set if page_type = @@ -3179,8 +3339,7 @@ func (s *ParagraphMarker) MarshalJSON() ([]byte, error) { // Inherited paragraph styles are represented as unset fields in this // message. type ParagraphStyle struct { - // Alignment: The text alignment for this paragraph. This property is - // read-only. + // Alignment: The text alignment for this paragraph. // // Possible values: // "ALIGNMENT_UNSPECIFIED" - The paragraph alignment is inherited from @@ -3195,8 +3354,10 @@ type ParagraphStyle struct { // "JUSTIFIED" - The paragraph is justified. Alignment string `json:"alignment,omitempty"` - // Direction: The text direction of this paragraph. This property is - // read-only. + // Direction: The text direction of this paragraph. If unset, the value + // defaults to + // LEFT_TO_RIGHT + // since text direction is not inherited. // // Possible values: // "TEXT_DIRECTION_UNSPECIFIED" - The text direction is inherited from @@ -3209,42 +3370,38 @@ type ParagraphStyle struct { // corresponds to // the end of the text, based on the current text direction. If unset, // the - // value is inherited from the parent. This property is read-only. + // value is inherited from the parent. IndentEnd *Dimension `json:"indentEnd,omitempty"` // IndentFirstLine: The amount of indentation for the start of the first // line of the paragraph. - // If unset, the value is inherited from the parent. This property - // is - // read-only. + // If unset, the value is inherited from the parent. IndentFirstLine *Dimension `json:"indentFirstLine,omitempty"` // IndentStart: The amount indentation for the paragraph on the side // that corresponds to // the start of the text, based on the current text direction. If unset, // the - // value is inherited from the parent. This property is read-only. + // value is inherited from the parent. IndentStart *Dimension `json:"indentStart,omitempty"` // LineSpacing: The amount of space between lines, as a percentage of // normal, where normal // is represented as 100.0. If unset, the value is inherited from the // parent. - // This property is read-only. LineSpacing float64 `json:"lineSpacing,omitempty"` // SpaceAbove: The amount of extra space above the paragraph. If unset, // the value is - // inherited from the parent. This property is read-only. + // inherited from the parent. SpaceAbove *Dimension `json:"spaceAbove,omitempty"` // SpaceBelow: The amount of extra space above the paragraph. If unset, // the value is - // inherited from the parent. This property is read-only. + // inherited from the parent. SpaceBelow *Dimension `json:"spaceBelow,omitempty"` - // SpacingMode: The spacing mode for the paragraph. This property is - // read-only. + // SpacingMode: The spacing mode for the paragraph. // // Possible values: // "SPACING_MODE_UNSPECIFIED" - The spacing mode is inherited from the @@ -3381,6 +3538,25 @@ type Presentation struct { // master, regardless of their layout. Masters []*Page `json:"masters,omitempty"` + // NotesMaster: The notes master in the presentation. It serves three + // purposes: + // + // - Placeholder shapes on a notes master contain the default text + // styles and + // shape properties of all placeholder shapes on notes pages. + // Specifically, + // a SLIDE_IMAGE placeholder shape is defined to contain the slide + // thumbnail, and a BODY placeholder shape is defined to contain the + // speaker + // notes. + // - The notes master page properties define the common page properties + // inherited by all notes pages. + // - Any other shapes on the notes master will appear on all notes + // pages. + // + // The notes master is read-only. + NotesMaster *Page `json:"notesMaster,omitempty"` + // PageSize: The size of pages in the presentation. PageSize *Size `json:"pageSize,omitempty"` @@ -3477,12 +3653,99 @@ func (s *Range) MarshalJSON() ([]byte, error) { // Recolor: A recolor effect applied on an image. type Recolor struct { + // Name: The name of the recolor effect. + // + // The name is determined from the `recolor_stops` by matching the + // gradient + // against the colors in the page's current color scheme. This property + // is + // read-only. + // + // Possible values: + // "NONE" - No recolor effect. The default value. + // "LIGHT1" - A recolor effect that lightens the image using the + // page's first available + // color from its color scheme. + // "LIGHT2" - A recolor effect that lightens the image using the + // page's second + // available color from its color scheme. + // "LIGHT3" - A recolor effect that lightens the image using the + // page's third available + // color from its color scheme. + // "LIGHT4" - A recolor effect that lightens the image using the + // page's forth available + // color from its color scheme. + // "LIGHT5" - A recolor effect that lightens the image using the + // page's fifth available + // color from its color scheme. + // "LIGHT6" - A recolor effect that lightens the image using the + // page's sixth available + // color from its color scheme. + // "LIGHT7" - A recolor effect that lightens the image using the + // page's seventh + // available color from its color scheme.e. + // "LIGHT8" - A recolor effect that lightens the image using the + // page's eighth + // available color from its color scheme. + // "LIGHT9" - A recolor effect that lightens the image using the + // page's ninth available + // color from its color scheme. + // "LIGHT10" - A recolor effect that lightens the image using the + // page's tenth available + // color from its color scheme. + // "DARK1" - A recolor effect that darkens the image using the page's + // first available + // color from its color scheme. + // "DARK2" - A recolor effect that darkens the image using the page's + // second available + // color from its color scheme. + // "DARK3" - A recolor effect that darkens the image using the page's + // third available + // color from its color scheme. + // "DARK4" - A recolor effect that darkens the image using the page's + // fourth available + // color from its color scheme. + // "DARK5" - A recolor effect that darkens the image using the page's + // fifth available + // color from its color scheme. + // "DARK6" - A recolor effect that darkens the image using the page's + // sixth available + // color from its color scheme. + // "DARK7" - A recolor effect that darkens the image using the page's + // seventh + // available color from its color scheme. + // "DARK8" - A recolor effect that darkens the image using the page's + // eighth available + // color from its color scheme. + // "DARK9" - A recolor effect that darkens the image using the page's + // ninth available + // color from its color scheme. + // "DARK10" - A recolor effect that darkens the image using the page's + // tenth available + // color from its color scheme. + // "GRAYSCALE" - A recolor effect that recolors the image to + // grayscale. + // "NEGATIVE" - A recolor effect that recolors the image to negative + // grayscale. + // "SEPIA" - A recolor effect that recolors the image using the sepia + // color. + // "CUSTOM" - Custom recolor effect. Refer to `recolor_stops` for the + // concrete + // gradient. + Name string `json:"name,omitempty"` + // RecolorStops: The recolor effect is represented by a gradient, which // is a list of color - // stops. This property is read-only. + // stops. + // + // The colors in the gradient will replace the corresponding colors + // at + // the same position in the color palette and apply to the image. + // This + // property is read-only. RecolorStops []*ColorStop `json:"recolorStops,omitempty"` - // ForceSendFields is a list of field names (e.g. "RecolorStops") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -3490,10 +3753,10 @@ type Recolor struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RecolorStops") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -3628,6 +3891,96 @@ func (s *ReplaceAllShapesWithImageResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ReplaceAllShapesWithSheetsChartRequest: Replaces all shapes that +// match the given criteria with the provided Google +// Sheets chart. The chart will be scaled and centered to fit within the +// bounds +// of the original shape. +// +// NOTE: Replacing shapes with a chart requires at least one of +// the +// spreadsheets.readonly, spreadsheets, drive.readonly, or drive OAuth +// scopes. +type ReplaceAllShapesWithSheetsChartRequest struct { + // ChartId: The ID of the specific chart in the Google Sheets + // spreadsheet. + ChartId int64 `json:"chartId,omitempty"` + + // ContainsText: The criteria that the shapes must match in order to be + // replaced. The + // request will replace all of the shapes that contain the given text. + ContainsText *SubstringMatchCriteria `json:"containsText,omitempty"` + + // LinkingMode: The mode with which the chart is linked to the source + // spreadsheet. When + // not specified, the chart will be an image that is not linked. + // + // Possible values: + // "NOT_LINKED_IMAGE" - The chart is not associated with the source + // spreadsheet and cannot be + // updated. A chart that is not linked will be inserted as an image. + // "LINKED" - Linking the chart allows it to be updated, and other + // collaborators will + // see a link to the spreadsheet. + LinkingMode string `json:"linkingMode,omitempty"` + + // SpreadsheetId: The ID of the Google Sheets spreadsheet that contains + // the chart. + SpreadsheetId string `json:"spreadsheetId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ChartId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChartId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReplaceAllShapesWithSheetsChartRequest) MarshalJSON() ([]byte, error) { + type noMethod ReplaceAllShapesWithSheetsChartRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReplaceAllShapesWithSheetsChartResponse: The result of replacing +// shapes with a Google Sheets chart. +type ReplaceAllShapesWithSheetsChartResponse struct { + // OccurrencesChanged: The number of shapes replaced with charts. + OccurrencesChanged int64 `json:"occurrencesChanged,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OccurrencesChanged") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OccurrencesChanged") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ReplaceAllShapesWithSheetsChartResponse) MarshalJSON() ([]byte, error) { + type noMethod ReplaceAllShapesWithSheetsChartResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ReplaceAllTextRequest: Replaces all instances of text matching a // criteria with replace text. type ReplaceAllTextRequest struct { @@ -3719,6 +4072,9 @@ type Request struct { // DeleteObject: Deletes a page or page element from the presentation. DeleteObject *DeleteObjectRequest `json:"deleteObject,omitempty"` + // DeleteParagraphBullets: Deletes bullets from paragraphs. + DeleteParagraphBullets *DeleteParagraphBulletsRequest `json:"deleteParagraphBullets,omitempty"` + // DeleteTableColumn: Deletes a column from a table. DeleteTableColumn *DeleteTableColumnRequest `json:"deleteTableColumn,omitempty"` @@ -3747,6 +4103,10 @@ type Request struct { // with an image. ReplaceAllShapesWithImage *ReplaceAllShapesWithImageRequest `json:"replaceAllShapesWithImage,omitempty"` + // ReplaceAllShapesWithSheetsChart: Replaces all shapes matching some + // criteria with a Google Sheets chart. + ReplaceAllShapesWithSheetsChart *ReplaceAllShapesWithSheetsChartRequest `json:"replaceAllShapesWithSheetsChart,omitempty"` + // ReplaceAllText: Replaces all instances of specified text. ReplaceAllText *ReplaceAllTextRequest `json:"replaceAllText,omitempty"` @@ -3762,6 +4122,10 @@ type Request struct { // UpdatePageProperties: Updates the properties of a Page. UpdatePageProperties *UpdatePagePropertiesRequest `json:"updatePageProperties,omitempty"` + // UpdateParagraphStyle: Updates the styling of paragraphs within a + // Shape or Table. + UpdateParagraphStyle *UpdateParagraphStyleRequest `json:"updateParagraphStyle,omitempty"` + // UpdateShapeProperties: Updates the properties of a Shape. UpdateShapeProperties *UpdateShapePropertiesRequest `json:"updateShapeProperties,omitempty"` @@ -3832,6 +4196,11 @@ type Response struct { // image. ReplaceAllShapesWithImage *ReplaceAllShapesWithImageResponse `json:"replaceAllShapesWithImage,omitempty"` + // ReplaceAllShapesWithSheetsChart: The result of replacing all shapes + // matching some criteria with a Google + // Sheets chart. + ReplaceAllShapesWithSheetsChart *ReplaceAllShapesWithSheetsChartResponse `json:"replaceAllShapesWithSheetsChart,omitempty"` + // ReplaceAllText: The result of replacing text. ReplaceAllText *ReplaceAllTextResponse `json:"replaceAllText,omitempty"` @@ -4659,6 +5028,22 @@ type SlideProperties struct { // on. MasterObjectId string `json:"masterObjectId,omitempty"` + // NotesPage: The notes page that this slide is associated with. It + // defines the visual + // appearance of a notes page when printing or exporting slides with + // speaker + // notes. A notes page inherits properties from the + // notes mater. + // The placeholder shape with type BODY on the notes page contains the + // speaker + // notes for this slide. The ID of this shape is identified by + // the + // speaker notes object id field. + // The notes page is read-only except for the text content and styles of + // the + // speaker notes shape. + NotesPage *Page `json:"notesPage,omitempty"` + // ForceSendFields is a list of field names (e.g. "LayoutObjectId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -5344,7 +5729,7 @@ type TextStyle struct { // "SUBSCRIPT" - The text is vertically offset downwards (subscript). BaselineOffset string `json:"baselineOffset,omitempty"` - // Bold: Whether or not the text is bold. + // Bold: Whether or not the text is rendered as bold. Bold bool `json:"bold,omitempty"` // FontFamily: The font family of the text. @@ -5682,6 +6067,67 @@ func (s *UpdatePagePropertiesRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UpdateParagraphStyleRequest: Updates the styling for all of the +// paragraphs within a Shape or Table that +// overlap with the given text index range. +type UpdateParagraphStyleRequest struct { + // CellLocation: The location of the cell in the table containing the + // paragraph(s) to + // style. If object_id refers to a table, cell_location must have a + // value. + // Otherwise, it must not. + CellLocation *TableCellLocation `json:"cellLocation,omitempty"` + + // Fields: The fields that should be updated. + // + // At least one field must be specified. The root `style` is implied + // and + // should not be specified. A single "*" can be used as short-hand + // for + // listing every field. + // + // For example, to update the paragraph alignment, set `fields` + // to + // "alignment". + // + // To reset a property to its default value, include its field name in + // the + // field mask but leave the field itself unset. + Fields string `json:"fields,omitempty"` + + // ObjectId: The object ID of the shape or table with the text to be + // styled. + ObjectId string `json:"objectId,omitempty"` + + // Style: The paragraph's style. + Style *ParagraphStyle `json:"style,omitempty"` + + // TextRange: The range of text containing the paragraph(s) to style. + TextRange *Range `json:"textRange,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CellLocation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CellLocation") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateParagraphStyleRequest) MarshalJSON() ([]byte, error) { + type noMethod UpdateParagraphStyleRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // UpdateShapePropertiesRequest: Update the properties of a Shape. type UpdateShapePropertiesRequest struct { // Fields: The fields that should be updated. @@ -5831,9 +6277,11 @@ func (s *UpdateTableCellPropertiesRequest) MarshalJSON() ([]byte, error) { // or // Table. type UpdateTextStyleRequest struct { - // CellLocation: The optional table cell location if the text to be - // styled is in a table - // cell. If present, the object_id must refer to a table. + // CellLocation: The location of the cell in the table containing the + // text to style. If + // object_id refers to a table, cell_location must have a value. + // Otherwise, it + // must not. CellLocation *TableCellLocation `json:"cellLocation,omitempty"` // Fields: The fields that should be updated. @@ -5844,12 +6292,12 @@ type UpdateTextStyleRequest struct { // for // listing every field. // - // For example to update the text style to bold, set `fields` to + // For example, to update the text style to bold, set `fields` to // "bold". // - // To reset a property to its default value, - // include its field name in the field mask but leave the field itself - // unset. + // To reset a property to its default value, include its field name in + // the + // field mask but leave the field itself unset. Fields string `json:"fields,omitempty"` // ObjectId: The object ID of the shape or table with the text to be @@ -6130,6 +6578,7 @@ func (c *PresentationsBatchUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchupdatepresentationrequest) if err != nil { @@ -6269,6 +6718,7 @@ func (c *PresentationsCreateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.presentation) if err != nil { @@ -6401,6 +6851,7 @@ func (c *PresentationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6546,6 +6997,7 @@ func (c *PresentationsPagesGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-api.json b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-api.json new file mode 100644 index 000000000..d8e630b0a --- /dev/null +++ b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-api.json @@ -0,0 +1,727 @@ +{ + "revision": "20170213", + "documentationLink": "https://cloud.google.com/eap/cloud-repositories/cloud-sourcerepo-api", + "id": "sourcerepo:v1", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "Binding": { + "properties": { + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "type": "array", + "items": { + "type": "string" + } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + } + }, + "id": "Binding", + "description": "Associates `members` with a `role`.", + "type": "object" + }, + "MirrorConfig": { + "description": "Configuration to automatically mirror a repository from another\nhosting service, for example GitHub or BitBucket.", + "type": "object", + "properties": { + "deployKeyId": { + "description": "ID of the SSH deploy key at the other hosting service.\nRemoving this key from the other service would deauthorize\nGoogle Cloud Source Repositories from mirroring.", + "type": "string" + }, + "url": { + "description": "URL of the main repository at the other hosting service.", + "type": "string" + }, + "webhookId": { + "description": "ID of the webhook listening to updates to trigger mirroring.\nRemoving this webook from the other hosting service will stop\nGoogle Cloud Source Repositories from receiving notifications,\nand thereby disabling mirroring.", + "type": "string" + } + }, + "id": "MirrorConfig" + }, + "Empty": { + "properties": {}, + "id": "Empty", + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object" + }, + "Repo": { + "description": "A repository (or repo) is a Git repository storing versioned source content.", + "type": "object", + "properties": { + "name": { + "description": "Resource name of the repository, of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "type": "string" + }, + "mirrorConfig": { + "description": "How this repository mirrors a repository managed by another service.", + "$ref": "MirrorConfig" + }, + "url": { + "description": "URL to clone the repository from Google Cloud Source Repositories.", + "type": "string" + }, + "size": { + "description": "The size in bytes of the repo.", + "format": "int64", + "type": "string" + } + }, + "id": "Repo" + }, + "Condition": { + "description": "A condition to be met.", + "type": "object", + "properties": { + "op": { + "enum": [ + "NO_OP", + "EQUALS", + "NOT_EQUALS", + "IN", + "NOT_IN", + "DISCHARGED" + ], + "description": "An operator to apply the subject with.", + "type": "string", + "enumDescriptions": [ + "Default no-op.", + "DEPRECATED. Use IN instead.", + "DEPRECATED. Use NOT_IN instead.", + "Set-inclusion check.", + "Set-exclusion check.", + "Subject is discharged" + ] + }, + "svc": { + "description": "Trusted attributes discharged by the service.", + "type": "string" + }, + "value": { + "description": "DEPRECATED. Use 'values' instead.", + "type": "string" + }, + "sys": { + "enumDescriptions": [ + "Default non-attribute type", + "Region of the resource", + "Service name", + "Resource name", + "IP address of the caller" + ], + "enum": [ + "NO_ATTR", + "REGION", + "SERVICE", + "NAME", + "IP" + ], + "description": "Trusted attributes supplied by any service that owns resources and uses\nthe IAM system for access control.", + "type": "string" + }, + "values": { + "description": "The objects of the condition. This is mutually exclusive with 'value'.", + "type": "array", + "items": { + "type": "string" + } + }, + "iam": { + "enumDescriptions": [ + "Default non-attribute.", + "Either principal or (if present) authority selector.", + "The principal (even if an authority selector is present), which\nmust only be used for attribution, not authorization.", + "An approver (distinct from the requester) that has authorized this\nrequest.\nWhen used with IN, the condition indicates that one of the approvers\nassociated with the request matches the specified principal, or is a\nmember of the specified group. Approvers can only grant additional\naccess, and are thus only used in a strictly positive context\n(e.g. ALLOW/IN or DENY/NOT_IN).\nSee: go/rpc-security-policy-dynamicauth." + ], + "enum": [ + "NO_ATTR", + "AUTHORITY", + "ATTRIBUTION", + "APPROVER" + ], + "description": "Trusted attributes supplied by the IAM system.", + "type": "string" + } + }, + "id": "Condition" + }, + "ListReposResponse": { + "description": "Response for ListRepos.", + "type": "object", + "properties": { + "repos": { + "description": "The listed repos.", + "type": "array", + "items": { + "$ref": "Repo" + } + } + }, + "id": "ListReposResponse" + }, + "TestIamPermissionsResponse": { + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse", + "description": "Response message for `TestIamPermissions` method.", + "type": "object" + }, + "CounterOptions": { + "properties": { + "metric": { + "description": "The metric to update.", + "type": "string" + }, + "field": { + "description": "The field value to attribute.", + "type": "string" + } + }, + "id": "CounterOptions", + "description": "Options for counters", + "type": "object" + }, + "AuditLogConfig": { + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", + "type": "object", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "type": "array", + "items": { + "type": "string" + } + }, + "logType": { + "enumDescriptions": [ + "Default case. Should never be this.", + "Admin reads. Example: CloudIAM getIamPolicy", + "Data writes. Example: CloudSQL Users create", + "Data reads. Example: CloudSQL Users list" + ], + "enum": [ + "LOG_TYPE_UNSPECIFIED", + "ADMIN_READ", + "DATA_WRITE", + "DATA_READ" + ], + "description": "The log type that this config enables.", + "type": "string" + } + }, + "id": "AuditLogConfig" + }, + "Rule": { + "properties": { + "notIn": { + "description": "If one or more 'not_in' clauses are specified, the rule matches\nif the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.\nThe format for in and not_in entries is the same as for members in a\nBinding (see google/iam/v1/policy.proto).", + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "description": "Human-readable description of the rule.", + "type": "string" + }, + "conditions": { + "description": "Additional restrictions that must be met", + "type": "array", + "items": { + "$ref": "Condition" + } + }, + "logConfig": { + "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries\nthat match the LOG action.", + "type": "array", + "items": { + "$ref": "LogConfig" + } + }, + "in": { + "description": "If one or more 'in' clauses are specified, the rule matches if\nthe PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", + "type": "array", + "items": { + "type": "string" + } + }, + "permissions": { + "description": "A permission is a string of form '\u003cservice\u003e.\u003cresource type\u003e.\u003cverb\u003e'\n(e.g., 'storage.buckets.list'). A value of '*' matches all permissions,\nand a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", + "type": "array", + "items": { + "type": "string" + } + }, + "action": { + "enumDescriptions": [ + "Default no action.", + "Matching 'Entries' grant access.", + "Matching 'Entries' grant access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' deny access.", + "Matching 'Entries' deny access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' tell IAM.Check callers to generate logs." + ], + "enum": [ + "NO_ACTION", + "ALLOW", + "ALLOW_WITH_LOG", + "DENY", + "DENY_WITH_LOG", + "LOG" + ], + "description": "Required", + "type": "string" + } + }, + "id": "Rule", + "description": "A rule to be applied in a Policy.", + "type": "object" + }, + "LogConfig": { + "description": "Specifies what kind of log the caller must write\nIncrement a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only,\nand end in \"_count\". Field names should not contain an initial slash.\nThe actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are\ntheir respective values.\n\nAt present the only supported field names are\n - \"iam_principal\", corresponding to IAMContext.principal;\n - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples:\n counter { metric: \"/debug_access_count\" field: \"iam_principal\" }\n ==\u003e increment counter /iam/policy/backend_debug_access_count\n {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support:\n* multiple field names (though this may be supported in the future)\n* decrementing the counter\n* incrementing it by anything other than 1", + "type": "object", + "properties": { + "cloudAudit": { + "description": "Cloud audit options.", + "$ref": "CloudAuditOptions" + }, + "counter": { + "description": "Counter options.", + "$ref": "CounterOptions" + }, + "dataAccess": { + "$ref": "DataAccessOptions", + "description": "Data access options." + } + }, + "id": "LogConfig" + }, + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsRequest" + }, + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "type": "object", + "properties": { + "iamOwned": { + "type": "boolean" + }, + "rules": { + "description": "If more than one rule is specified, the rules are applied in the following\nmanner:\n- All matching LOG rules are always applied.\n- If any DENY/DENY_WITH_LOG rule matches, permission is denied.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is\n granted.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if no rule applies, permission is denied.", + "type": "array", + "items": { + "$ref": "Rule" + } + }, + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", + "type": "array", + "items": { + "$ref": "AuditConfig" + } + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } + }, + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + } + }, + "id": "Policy" + }, + "DataAccessOptions": { + "properties": {}, + "id": "DataAccessOptions", + "description": "Write a Data Access (Gin) log", + "type": "object" + }, + "AuditConfig": { + "description": "Specifies the audit configuration for a service.\nIt consists of which permission types are logged, and what identities, if\nany, are exempted from logging.\nAn AuditConifg must have one or more AuditLogConfigs.", + "type": "object", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that are exempted from \"data access\" audit\nlogging for the `service` specified above.\nFollows the same format of Binding.members.\nThis field is deprecated in favor of per-permission-type exemptions.", + "type": "array", + "items": { + "type": "string" + } + }, + "service": { + "description": "Specifies a service that will be enabled for audit logging.\nFor example, `resourcemanager`, `storage`, `compute`.\n`allServices` is a special value that covers all services.", + "type": "string" + }, + "auditLogConfigs": { + "description": "The configuration for logging of each type of permission.\nNext ID: 4", + "type": "array", + "items": { + "$ref": "AuditLogConfig" + } + } + }, + "id": "AuditConfig" + }, + "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "type": "object", + "properties": { + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + }, + "updateMask": { + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, a default\nmask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", + "format": "google-fieldmask", + "type": "string" + } + }, + "id": "SetIamPolicyRequest" + }, + "CloudAuditOptions": { + "properties": {}, + "id": "CloudAuditOptions", + "description": "Write a Cloud Audit log", + "type": "object" + } + }, + "protocol": "rest", + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "canonicalName": "Cloud Source Repositories", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://sourcerepo.googleapis.com/", + "ownerDomain": "google.com", + "name": "sourcerepo", + "batchPath": "batch", + "title": "Cloud Source Repositories API", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "repos": { + "methods": { + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/repos/.+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/repos/{reposId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "sourcerepo.projects.repos.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "request": { + "$ref": "TestIamPermissionsRequest" + } + }, + "delete": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "parameters": { + "name": { + "description": "The name of the repo to delete. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/repos/.+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/repos/{reposId}", + "path": "v1/{+name}", + "id": "sourcerepo.projects.repos.delete", + "description": "Deletes a repo." + }, + "list": { + "description": "Returns all repos belonging to a project.", + "response": { + "$ref": "ListReposResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The project ID whose repos should be listed. Values are of the form\n`projects/\u003cproject\u003e`.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/repos", + "path": "v1/{+name}/repos", + "id": "sourcerepo.projects.repos.list" + }, + "setIamPolicy": { + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/repos/.+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/repos/{reposId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "sourcerepo.projects.repos.setIamPolicy" + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "Repo" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "parent": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The project in which to create the repo. Values are of the form\n`projects/\u003cproject\u003e`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/repos", + "id": "sourcerepo.projects.repos.create", + "path": "v1/{+parent}/repos", + "description": "Creates a repo in the given project with the given name..\n\nIf the named repository already exists, `CreateRepo` returns\n`ALREADY_EXISTS`.", + "request": { + "$ref": "Repo" + } + }, + "getIamPolicy": { + "response": { + "$ref": "Policy" + }, + "httpMethod": "GET", + "parameterOrder": [ + "resource" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "pattern": "^projects/[^/]+/repos/.+$", + "location": "path", + "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/repos/{reposId}:getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "id": "sourcerepo.projects.repos.getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." + }, + "get": { + "response": { + "$ref": "Repo" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "description": "The name of the requested repository. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/repos/.+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/repos/{reposId}", + "path": "v1/{+name}", + "id": "sourcerepo.projects.repos.get", + "description": "Returns information about a repo." + } + } + } + } + } + }, + "parameters": { + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + } + }, + "version": "v1", + "baseUrl": "https://sourcerepo.googleapis.com/", + "kind": "discovery#restDescription", + "description": "Access source code repositories hosted by Google.", + "servicePath": "", + "basePath": "" +} diff --git a/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go new file mode 100644 index 000000000..c7e70c286 --- /dev/null +++ b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go @@ -0,0 +1,1860 @@ +// Package sourcerepo provides access to the Cloud Source Repositories API. +// +// See https://cloud.google.com/eap/cloud-repositories/cloud-sourcerepo-api +// +// Usage example: +// +// import "google.golang.org/api/sourcerepo/v1" +// ... +// sourcerepoService, err := sourcerepo.New(oauthHttpClient) +package sourcerepo // import "google.golang.org/api/sourcerepo/v1" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "sourcerepo:v1" +const apiName = "sourcerepo" +const apiVersion = "v1" +const basePath = "https://sourcerepo.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Repos = NewProjectsReposService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Repos *ProjectsReposService +} + +func NewProjectsReposService(s *Service) *ProjectsReposService { + rs := &ProjectsReposService{s: s} + return rs +} + +type ProjectsReposService struct { + s *Service +} + +// AuditConfig: Specifies the audit configuration for a service. +// It consists of which permission types are logged, and what +// identities, if +// any, are exempted from logging. +// An AuditConifg must have one or more AuditLogConfigs. +type AuditConfig struct { + // AuditLogConfigs: The configuration for logging of each type of + // permission. + // Next ID: 4 + AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` + + // ExemptedMembers: Specifies the identities that are exempted from + // "data access" audit + // logging for the `service` specified above. + // Follows the same format of Binding.members. + // This field is deprecated in favor of per-permission-type exemptions. + ExemptedMembers []string `json:"exemptedMembers,omitempty"` + + // Service: Specifies a service that will be enabled for audit + // logging. + // For example, `resourcemanager`, `storage`, `compute`. + // `allServices` is a special value that covers all services. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditLogConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuditConfig) MarshalJSON() ([]byte, error) { + type noMethod AuditConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuditLogConfig: Provides the configuration for logging a type of +// permissions. +// Example: +// +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:foo@gmail.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE", +// } +// ] +// } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while +// exempting +// foo@gmail.com from DATA_READ logging. +type AuditLogConfig struct { + // ExemptedMembers: Specifies the identities that do not cause logging + // for this type of + // permission. + // Follows the same format of Binding.members. + ExemptedMembers []string `json:"exemptedMembers,omitempty"` + + // LogType: The log type that this config enables. + // + // Possible values: + // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. + // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy + // "DATA_WRITE" - Data writes. Example: CloudSQL Users create + // "DATA_READ" - Data reads. Example: CloudSQL Users list + LogType string `json:"logType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExemptedMembers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { + type noMethod AuditLogConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Binding: Associates `members` with a `role`. +type Binding struct { + // Members: Specifies the identities requesting access for a Cloud + // Platform resource. + // `members` can have the following values: + // + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents + // anyone + // who is authenticated with a Google account or a service + // account. + // + // * `user:{emailid}`: An email address that represents a specific + // Google + // account. For example, `alice@gmail.com` or `joe@example.com`. + // + // + // * `serviceAccount:{emailid}`: An email address that represents a + // service + // account. For example, + // `my-other-app@appspot.gserviceaccount.com`. + // + // * `group:{emailid}`: An email address that represents a Google + // group. + // For example, `admins@example.com`. + // + // * `domain:{domain}`: A Google Apps domain name that represents all + // the + // users of that domain. For example, `google.com` or + // `example.com`. + // + // + Members []string `json:"members,omitempty"` + + // Role: Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or + // `roles/owner`. + // Required + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Members") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Members") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Binding) MarshalJSON() ([]byte, error) { + type noMethod Binding + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CloudAuditOptions: Write a Cloud Audit log +type CloudAuditOptions struct { +} + +// Condition: A condition to be met. +type Condition struct { + // Iam: Trusted attributes supplied by the IAM system. + // + // Possible values: + // "NO_ATTR" - Default non-attribute. + // "AUTHORITY" - Either principal or (if present) authority selector. + // "ATTRIBUTION" - The principal (even if an authority selector is + // present), which + // must only be used for attribution, not authorization. + // "APPROVER" - An approver (distinct from the requester) that has + // authorized this + // request. + // When used with IN, the condition indicates that one of the + // approvers + // associated with the request matches the specified principal, or is + // a + // member of the specified group. Approvers can only grant + // additional + // access, and are thus only used in a strictly positive context + // (e.g. ALLOW/IN or DENY/NOT_IN). + // See: go/rpc-security-policy-dynamicauth. + Iam string `json:"iam,omitempty"` + + // Op: An operator to apply the subject with. + // + // Possible values: + // "NO_OP" - Default no-op. + // "EQUALS" - DEPRECATED. Use IN instead. + // "NOT_EQUALS" - DEPRECATED. Use NOT_IN instead. + // "IN" - Set-inclusion check. + // "NOT_IN" - Set-exclusion check. + // "DISCHARGED" - Subject is discharged + Op string `json:"op,omitempty"` + + // Svc: Trusted attributes discharged by the service. + Svc string `json:"svc,omitempty"` + + // Sys: Trusted attributes supplied by any service that owns resources + // and uses + // the IAM system for access control. + // + // Possible values: + // "NO_ATTR" - Default non-attribute type + // "REGION" - Region of the resource + // "SERVICE" - Service name + // "NAME" - Resource name + // "IP" - IP address of the caller + Sys string `json:"sys,omitempty"` + + // Value: DEPRECATED. Use 'values' instead. + Value string `json:"value,omitempty"` + + // Values: The objects of the condition. This is mutually exclusive with + // 'value'. + Values []string `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Iam") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Iam") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Condition) MarshalJSON() ([]byte, error) { + type noMethod Condition + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CounterOptions: Options for counters +type CounterOptions struct { + // Field: The field value to attribute. + Field string `json:"field,omitempty"` + + // Metric: The metric to update. + Metric string `json:"metric,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CounterOptions) MarshalJSON() ([]byte, error) { + type noMethod CounterOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DataAccessOptions: Write a Data Access (Gin) log +type DataAccessOptions struct { +} + +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + +// ListReposResponse: Response for ListRepos. +type ListReposResponse struct { + // Repos: The listed repos. + Repos []*Repo `json:"repos,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Repos") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Repos") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListReposResponse) MarshalJSON() ([]byte, error) { + type noMethod ListReposResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LogConfig: Specifies what kind of log the caller must write +// Increment a streamz counter with the specified metric and field +// names. +// +// Metric names should start with a '/', generally be +// lowercase-only, +// and end in "_count". Field names should not contain an initial +// slash. +// The actual exported metric names will have "/iam/policy" +// prepended. +// +// Field names correspond to IAM request parameters and field values +// are +// their respective values. +// +// At present the only supported field names are +// - "iam_principal", corresponding to IAMContext.principal; +// - "" (empty string), resulting in one aggretated counter with no +// field. +// +// Examples: +// counter { metric: "/debug_access_count" field: "iam_principal" } +// ==> increment counter /iam/policy/backend_debug_access_count +// {iam_principal=[value of +// IAMContext.principal]} +// +// At this time we do not support: +// * multiple field names (though this may be supported in the future) +// * decrementing the counter +// * incrementing it by anything other than 1 +type LogConfig struct { + // CloudAudit: Cloud audit options. + CloudAudit *CloudAuditOptions `json:"cloudAudit,omitempty"` + + // Counter: Counter options. + Counter *CounterOptions `json:"counter,omitempty"` + + // DataAccess: Data access options. + DataAccess *DataAccessOptions `json:"dataAccess,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CloudAudit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CloudAudit") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogConfig) MarshalJSON() ([]byte, error) { + type noMethod LogConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MirrorConfig: Configuration to automatically mirror a repository from +// another +// hosting service, for example GitHub or BitBucket. +type MirrorConfig struct { + // DeployKeyId: ID of the SSH deploy key at the other hosting + // service. + // Removing this key from the other service would deauthorize + // Google Cloud Source Repositories from mirroring. + DeployKeyId string `json:"deployKeyId,omitempty"` + + // Url: URL of the main repository at the other hosting service. + Url string `json:"url,omitempty"` + + // WebhookId: ID of the webhook listening to updates to trigger + // mirroring. + // Removing this webook from the other hosting service will stop + // Google Cloud Source Repositories from receiving notifications, + // and thereby disabling mirroring. + WebhookId string `json:"webhookId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DeployKeyId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeployKeyId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MirrorConfig) MarshalJSON() ([]byte, error) { + type noMethod MirrorConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Policy: Defines an Identity and Access Management (IAM) policy. It is +// used to +// specify access control policies for Cloud Platform resources. +// +// +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list +// of +// `members` to a `role`, where the members can be user accounts, Google +// groups, +// Google domains, and service accounts. A `role` is a named list of +// permissions +// defined by IAM. +// +// **Example** +// +// { +// "bindings": [ +// { +// "role": "roles/owner", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", +// ] +// }, +// { +// "role": "roles/viewer", +// "members": ["user:sean@example.com"] +// } +// ] +// } +// +// For a description of IAM and its features, see the +// [IAM developer's guide](https://cloud.google.com/iam). +type Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` + + // Bindings: Associates a list of `members` to a `role`. + // Multiple `bindings` must not be specified for the same + // `role`. + // `bindings` with no members will result in an error. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: `etag` is used for optimistic concurrency control as a way to + // help + // prevent simultaneous updates of a policy from overwriting each + // other. + // It is strongly suggested that systems make use of the `etag` in + // the + // read-modify-write cycle to perform policy updates in order to avoid + // race + // conditions: An `etag` is returned in the response to `getIamPolicy`, + // and + // systems are expected to put that etag in the request to + // `setIamPolicy` to + // ensure that their change will be applied to the same version of the + // policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the + // existing + // policy is overwritten blindly. + Etag string `json:"etag,omitempty"` + + IamOwned bool `json:"iamOwned,omitempty"` + + // Rules: If more than one rule is specified, the rules are applied in + // the following + // manner: + // - All matching LOG rules are always applied. + // - If any DENY/DENY_WITH_LOG rule matches, permission is denied. + // Logging will be applied if one or more matching rule requires + // logging. + // - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is + // granted. + // Logging will be applied if one or more matching rule requires + // logging. + // - Otherwise, if no rule applies, permission is denied. + Rules []*Rule `json:"rules,omitempty"` + + // Version: Version of the `Policy`. The default version is 0. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Policy) MarshalJSON() ([]byte, error) { + type noMethod Policy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Repo: A repository (or repo) is a Git repository storing versioned +// source content. +type Repo struct { + // MirrorConfig: How this repository mirrors a repository managed by + // another service. + MirrorConfig *MirrorConfig `json:"mirrorConfig,omitempty"` + + // Name: Resource name of the repository, of the + // form + // `projects//repos/`. + Name string `json:"name,omitempty"` + + // Size: The size in bytes of the repo. + Size int64 `json:"size,omitempty,string"` + + // Url: URL to clone the repository from Google Cloud Source + // Repositories. + Url string `json:"url,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "MirrorConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MirrorConfig") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Repo) MarshalJSON() ([]byte, error) { + type noMethod Repo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Rule: A rule to be applied in a Policy. +type Rule struct { + // Action: Required + // + // Possible values: + // "NO_ACTION" - Default no action. + // "ALLOW" - Matching 'Entries' grant access. + // "ALLOW_WITH_LOG" - Matching 'Entries' grant access and the caller + // promises to log + // the request per the returned log_configs. + // "DENY" - Matching 'Entries' deny access. + // "DENY_WITH_LOG" - Matching 'Entries' deny access and the caller + // promises to log + // the request per the returned log_configs. + // "LOG" - Matching 'Entries' tell IAM.Check callers to generate logs. + Action string `json:"action,omitempty"` + + // Conditions: Additional restrictions that must be met + Conditions []*Condition `json:"conditions,omitempty"` + + // Description: Human-readable description of the rule. + Description string `json:"description,omitempty"` + + // In: If one or more 'in' clauses are specified, the rule matches + // if + // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. + In []string `json:"in,omitempty"` + + // LogConfig: The config returned to callers of tech.iam.IAM.CheckPolicy + // for any entries + // that match the LOG action. + LogConfig []*LogConfig `json:"logConfig,omitempty"` + + // NotIn: If one or more 'not_in' clauses are specified, the rule + // matches + // if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries. + // The format for in and not_in entries is the same as for members in + // a + // Binding (see google/iam/v1/policy.proto). + NotIn []string `json:"notIn,omitempty"` + + // Permissions: A permission is a string of form '..' + // (e.g., 'storage.buckets.list'). A value of '*' matches all + // permissions, + // and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Rule) MarshalJSON() ([]byte, error) { + type noMethod Rule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SetIamPolicyRequest: Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + // Policy: REQUIRED: The complete policy to be applied to the + // `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as + // Projects) + // might reject them. + Policy *Policy `json:"policy,omitempty"` + + // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the + // policy to modify. Only + // the fields in the mask will be modified. If no mask is provided, a + // default + // mask is used: + // paths: "bindings, etag" + // This field is only used by Cloud IAM. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policy") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type noMethod SetIamPolicyRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsRequest: Request message for `TestIamPermissions` +// method. +type TestIamPermissionsRequest struct { + // Permissions: The set of permissions to check for the `resource`. + // Permissions with + // wildcards (such as '*' or 'storage.*') are not allowed. For + // more + // information see + // [IAM + // Overview](https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { + type noMethod TestIamPermissionsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: Response message for `TestIamPermissions` +// method. +type TestIamPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type noMethod TestIamPermissionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "sourcerepo.projects.repos.create": + +type ProjectsReposCreateCall struct { + s *Service + parent string + repo *Repo + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a repo in the given project with the given name.. +// +// If the named repository already exists, `CreateRepo` +// returns +// `ALREADY_EXISTS`. +func (r *ProjectsReposService) Create(parent string, repo *Repo) *ProjectsReposCreateCall { + c := &ProjectsReposCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.repo = repo + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsReposCreateCall) Fields(s ...googleapi.Field) *ProjectsReposCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsReposCreateCall) Context(ctx context.Context) *ProjectsReposCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsReposCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsReposCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.repo) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/repos") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sourcerepo.projects.repos.create" call. +// Exactly one of *Repo or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Repo.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsReposCreateCall) Do(opts ...googleapi.CallOption) (*Repo, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Repo{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a repo in the given project with the given name..\n\nIf the named repository already exists, `CreateRepo` returns\n`ALREADY_EXISTS`.", + // "flatPath": "v1/projects/{projectsId}/repos", + // "httpMethod": "POST", + // "id": "sourcerepo.projects.repos.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The project in which to create the repo. Values are of the form\n`projects/\u003cproject\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/repos", + // "request": { + // "$ref": "Repo" + // }, + // "response": { + // "$ref": "Repo" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sourcerepo.projects.repos.delete": + +type ProjectsReposDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a repo. +func (r *ProjectsReposService) Delete(name string) *ProjectsReposDeleteCall { + c := &ProjectsReposDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsReposDeleteCall) Fields(s ...googleapi.Field) *ProjectsReposDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsReposDeleteCall) Context(ctx context.Context) *ProjectsReposDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsReposDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsReposDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sourcerepo.projects.repos.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsReposDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a repo.", + // "flatPath": "v1/projects/{projectsId}/repos/{reposId}", + // "httpMethod": "DELETE", + // "id": "sourcerepo.projects.repos.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the repo to delete. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/repos/.+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sourcerepo.projects.repos.get": + +type ProjectsReposGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns information about a repo. +func (r *ProjectsReposService) Get(name string) *ProjectsReposGetCall { + c := &ProjectsReposGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsReposGetCall) Fields(s ...googleapi.Field) *ProjectsReposGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsReposGetCall) IfNoneMatch(entityTag string) *ProjectsReposGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsReposGetCall) Context(ctx context.Context) *ProjectsReposGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsReposGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsReposGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sourcerepo.projects.repos.get" call. +// Exactly one of *Repo or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Repo.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsReposGetCall) Do(opts ...googleapi.CallOption) (*Repo, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Repo{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns information about a repo.", + // "flatPath": "v1/projects/{projectsId}/repos/{reposId}", + // "httpMethod": "GET", + // "id": "sourcerepo.projects.repos.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the requested repository. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/repos/.+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Repo" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sourcerepo.projects.repos.getIamPolicy": + +type ProjectsReposGetIamPolicyCall struct { + s *Service + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. +// Returns an empty policy if the resource exists and does not have a +// policy +// set. +func (r *ProjectsReposService) GetIamPolicy(resource string) *ProjectsReposGetIamPolicyCall { + c := &ProjectsReposGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsReposGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsReposGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsReposGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsReposGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsReposGetIamPolicyCall) Context(ctx context.Context) *ProjectsReposGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsReposGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsReposGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sourcerepo.projects.repos.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsReposGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "flatPath": "v1/projects/{projectsId}/repos/{reposId}:getIamPolicy", + // "httpMethod": "GET", + // "id": "sourcerepo.projects.repos.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/repos/.+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sourcerepo.projects.repos.list": + +type ProjectsReposListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns all repos belonging to a project. +func (r *ProjectsReposService) List(name string) *ProjectsReposListCall { + c := &ProjectsReposListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsReposListCall) Fields(s ...googleapi.Field) *ProjectsReposListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsReposListCall) IfNoneMatch(entityTag string) *ProjectsReposListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsReposListCall) Context(ctx context.Context) *ProjectsReposListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsReposListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsReposListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/repos") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sourcerepo.projects.repos.list" call. +// Exactly one of *ListReposResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListReposResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsReposListCall) Do(opts ...googleapi.CallOption) (*ListReposResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListReposResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns all repos belonging to a project.", + // "flatPath": "v1/projects/{projectsId}/repos", + // "httpMethod": "GET", + // "id": "sourcerepo.projects.repos.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The project ID whose repos should be listed. Values are of the form\n`projects/\u003cproject\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}/repos", + // "response": { + // "$ref": "ListReposResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sourcerepo.projects.repos.setIamPolicy": + +type ProjectsReposSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any +// existing policy. +func (r *ProjectsReposService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsReposSetIamPolicyCall { + c := &ProjectsReposSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsReposSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsReposSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsReposSetIamPolicyCall) Context(ctx context.Context) *ProjectsReposSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsReposSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsReposSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sourcerepo.projects.repos.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsReposSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + // "flatPath": "v1/projects/{projectsId}/repos/{reposId}:setIamPolicy", + // "httpMethod": "POST", + // "id": "sourcerepo.projects.repos.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being specified.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/repos/.+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:setIamPolicy", + // "request": { + // "$ref": "SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "sourcerepo.projects.repos.testIamPermissions": + +type ProjectsReposTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// If the resource does not exist, this will return an empty set +// of +// permissions, not a NOT_FOUND error. +func (r *ProjectsReposService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsReposTestIamPermissionsCall { + c := &ProjectsReposTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsReposTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsReposTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsReposTestIamPermissionsCall) Context(ctx context.Context) *ProjectsReposTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsReposTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsReposTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sourcerepo.projects.repos.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsReposTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "flatPath": "v1/projects/{projectsId}/repos/{reposId}:testIamPermissions", + // "httpMethod": "POST", + // "id": "sourcerepo.projects.repos.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy detail is being requested.\n`resource` is usually specified as a path. For example, a Project\nresource is specified as `projects/{project}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/repos/.+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/spanner/v1/spanner-api.json b/vendor/google.golang.org/api/spanner/v1/spanner-api.json new file mode 100644 index 000000000..ddc860c5c --- /dev/null +++ b/vendor/google.golang.org/api/spanner/v1/spanner-api.json @@ -0,0 +1,2607 @@ +{ + "version_module": "True", + "schemas": { + "Database": { + "id": "Database", + "description": "A Cloud Spanner database.", + "type": "object", + "properties": { + "state": { + "enumDescriptions": [ + "Not specified.", + "The database is still being created. Operations on the database may fail\nwith `FAILED_PRECONDITION` in this state.", + "The database is fully created and ready for use." + ], + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "READY" + ], + "description": "Output only. The current database state.", + "type": "string" + }, + "name": { + "description": "Required. The name of the database. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`,\nwhere `\u003cdatabase\u003e` is as specified in the `CREATE DATABASE`\nstatement. This name can be passed to other API methods to\nidentify the database.", + "type": "string" + } + } + }, + "ListDatabasesResponse": { + "description": "The response for ListDatabases.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "`next_page_token` can be sent in a subsequent\nListDatabases call to fetch more\nof the matching databases.", + "type": "string" + }, + "databases": { + "description": "Databases that matched the request.", + "type": "array", + "items": { + "$ref": "Database" + } + } + }, + "id": "ListDatabasesResponse" + }, + "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "type": "object", + "properties": { + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + }, + "updateMask": { + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, a default\nmask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", + "format": "google-fieldmask", + "type": "string" + } + }, + "id": "SetIamPolicyRequest" + }, + "Instance": { + "properties": { + "displayName": { + "type": "string", + "description": "Required. The descriptive name for this instance as it appears in UIs.\nMust be unique per project and between 4 and 30 characters in length." + }, + "nodeCount": { + "description": "Required. The number of nodes allocated to this instance.", + "format": "int32", + "type": "integer" + }, + "labels": { + "description": "Cloud Labels are a flexible and lightweight mechanism for organizing cloud\nresources into groups that reflect a customer's organizational needs and\ndeployment strategies. Cloud Labels can be used to filter collections of\nresources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route,\nfirewall, load balancing, etc.).\n\n * Label keys must be between 1 and 63 characters long and must conform to\n the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.\n * Label values must be between 0 and 63 characters long and must conform\n to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.\n * No more than 64 labels can be associated with a given resource.\n\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\n\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. And so you are advised to use an\ninternal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: name + \"_\" + value would prove problematic if we were to\nallow \"_\" in a future release.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "config": { + "description": "Required. The name of the instance's configuration. Values are of the form\n`projects/\u003cproject\u003e/instanceConfigs/\u003cconfiguration\u003e`. See\nalso InstanceConfig and\nListInstanceConfigs.", + "type": "string" + }, + "state": { + "enumDescriptions": [ + "Not specified.", + "The instance is still being created. Resources may not be\navailable yet, and operations such as database creation may not\nwork.", + "The instance is fully created and ready to do work such as\ncreating databases." + ], + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "READY" + ], + "description": "Output only. The current instance state. For\nCreateInstance, the state must be\neither omitted or set to `CREATING`. For\nUpdateInstance, the state must be\neither omitted or set to `READY`.", + "type": "string" + }, + "name": { + "description": "Required. A unique identifier for the instance, which cannot be changed\nafter the instance is created. Values are of the form\n`projects/\u003cproject\u003e/instances/a-z*[a-z0-9]`. The final\nsegment of the name must be between 6 and 30 characters in length.", + "type": "string" + } + }, + "id": "Instance", + "description": "An isolated set of Cloud Spanner resources on which databases can be hosted.", + "type": "object" + }, + "RollbackRequest": { + "description": "The request for Rollback.", + "type": "object", + "properties": { + "transactionId": { + "description": "Required. The transaction to roll back.", + "format": "byte", + "type": "string" + } + }, + "id": "RollbackRequest" + }, + "Transaction": { + "description": "A transaction.", + "type": "object", + "properties": { + "id": { + "description": "`id` may be used to identify the transaction in subsequent\nRead,\nExecuteSql,\nCommit, or\nRollback calls.\n\nSingle-use read-only transactions do not have IDs, because\nsingle-use transactions do not support multiple requests.", + "format": "byte", + "type": "string" + }, + "readTimestamp": { + "description": "For snapshot read-only transactions, the read timestamp chosen\nfor the transaction. Not returned by default: see\nTransactionOptions.ReadOnly.return_read_timestamp.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "Transaction" + }, + "UpdateDatabaseDdlMetadata": { + "id": "UpdateDatabaseDdlMetadata", + "description": "Metadata type for the operation returned by\nUpdateDatabaseDdl.", + "type": "object", + "properties": { + "statements": { + "description": "For an update this list contains all the statements. For an\nindividual statement, this list contains only that statement.", + "type": "array", + "items": { + "type": "string" + } + }, + "commitTimestamps": { + "description": "Reports the commit timestamps of all statements that have\nsucceeded so far, where `commit_timestamps[i]` is the commit\ntimestamp for the statement `statements[i]`.", + "type": "array", + "items": { + "format": "google-datetime", + "type": "string" + } + }, + "database": { + "description": "The database being modified.", + "type": "string" + } + } + }, + "CounterOptions": { + "description": "Options for counters", + "type": "object", + "properties": { + "metric": { + "description": "The metric to update.", + "type": "string" + }, + "field": { + "description": "The field value to attribute.", + "type": "string" + } + }, + "id": "CounterOptions" + }, + "StructType": { + "description": "`StructType` defines the fields of a STRUCT type.", + "type": "object", + "properties": { + "fields": { + "description": "The list of fields that make up this struct. Order is\nsignificant, because values of this struct type are represented as\nlists, where the order of field values matches the order of\nfields in the StructType. In turn, the order of fields\nmatches the order of columns in a read request, or the order of\nfields in the `SELECT` clause of a query.", + "type": "array", + "items": { + "$ref": "Field" + } + } + }, + "id": "StructType" + }, + "QueryPlan": { + "description": "Contains an ordered list of nodes appearing in the query plan.", + "type": "object", + "properties": { + "planNodes": { + "description": "The nodes in the query plan. Plan nodes are returned in pre-order starting\nwith the plan root. Each PlanNode's `id` corresponds to its index in\n`plan_nodes`.", + "type": "array", + "items": { + "$ref": "PlanNode" + } + } + }, + "id": "QueryPlan" + }, + "Field": { + "description": "Message representing a single field of a struct.", + "type": "object", + "properties": { + "name": { + "description": "The name of the field. For reads, this is the column name. For\nSQL queries, it is the column alias (e.g., `\"Word\"` in the\nquery `\"SELECT 'hello' AS Word\"`), or the column name (e.g.,\n`\"ColName\"` in the query `\"SELECT ColName FROM Table\"`). Some\ncolumns might have an empty name (e.g., !\"SELECT\nUPPER(ColName)\"`). Note that a query result can contain\nmultiple fields with the same name.", + "type": "string" + }, + "type": { + "$ref": "Type", + "description": "The type of the field." + } + }, + "id": "Field" + }, + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "REQUIRED: The set of permissions to check for 'resource'.\nPermissions with wildcards (such as '*', 'spanner.*', 'spanner.instances.*') are not allowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsRequest" + }, + "ResultSetStats": { + "description": "Additional statistics about a ResultSet or PartialResultSet.", + "type": "object", + "properties": { + "queryPlan": { + "description": "QueryPlan for the query associated with this result.", + "$ref": "QueryPlan" + }, + "queryStats": { + "additionalProperties": { + "type": "any", + "description": "Properties of the object." + }, + "description": "Aggregated statistics from the execution of the query. Only present when\nthe query is profiled. For example, a query could return the statistics as\nfollows:\n\n {\n \"rows_returned\": \"3\",\n \"elapsed_time\": \"1.22 secs\",\n \"cpu_time\": \"1.19 secs\"\n }", + "type": "object" + } + }, + "id": "ResultSetStats" + }, + "CommitResponse": { + "description": "The response for Commit.", + "type": "object", + "properties": { + "commitTimestamp": { + "description": "The Cloud Spanner timestamp at which the transaction committed.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "CommitResponse" + }, + "Type": { + "type": "object", + "properties": { + "structType": { + "$ref": "StructType", + "description": "If code == STRUCT, then `struct_type`\nprovides type information for the struct's fields." + }, + "arrayElementType": { + "$ref": "Type", + "description": "If code == ARRAY, then `array_element_type`\nis the type of the array elements." + }, + "code": { + "type": "string", + "enumDescriptions": [ + "Not specified.", + "Encoded as JSON `true` or `false`.", + "Encoded as `string`, in decimal format.", + "Encoded as `number`, or the strings `\"NaN\"`, `\"Infinity\"`, or\n`\"-Infinity\"`.", + "Encoded as `string` in RFC 3339 timestamp format. The time zone\nmust be present, and must be `\"Z\"`.", + "Encoded as `string` in RFC 3339 date format.", + "Encoded as `string`.", + "Encoded as a base64-encoded `string`, as described in RFC 4648,\nsection 4.", + "Encoded as `list`, where the list elements are represented\naccording to array_element_type.", + "Encoded as `list`, where list element `i` is represented according\nto [struct_type.fields[i]][google.spanner.v1.StructType.fields]." + ], + "enum": [ + "TYPE_CODE_UNSPECIFIED", + "BOOL", + "INT64", + "FLOAT64", + "TIMESTAMP", + "DATE", + "STRING", + "BYTES", + "ARRAY", + "STRUCT" + ], + "description": "Required. The TypeCode for this type." + } + }, + "id": "Type", + "description": "`Type` indicates the type of a Cloud Spanner value, as might be stored in a\ntable cell or returned from an SQL query." + }, + "PlanNode": { + "description": "Node information for nodes appearing in a QueryPlan.plan_nodes.", + "type": "object", + "properties": { + "index": { + "description": "The `PlanNode`'s index in node list.", + "format": "int32", + "type": "integer" + }, + "kind": { + "description": "Used to determine the type of node. May be needed for visualizing\ndifferent kinds of nodes differently. For example, If the node is a\nSCALAR node, it will have a condensed representation\nwhich can be used to directly embed a description of the node in its\nparent.", + "type": "string", + "enumDescriptions": [ + "Not specified.", + "Denotes a Relational operator node in the expression tree. Relational\noperators represent iterative processing of rows during query execution.\nFor example, a `TableScan` operation that reads rows from a table.", + "Denotes a Scalar node in the expression tree. Scalar nodes represent\nnon-iterable entities in the query plan. For example, constants or\narithmetic operators appearing inside predicate expressions or references\nto column names." + ], + "enum": [ + "KIND_UNSPECIFIED", + "RELATIONAL", + "SCALAR" + ] + }, + "displayName": { + "description": "The display name for the node.", + "type": "string" + }, + "childLinks": { + "description": "List of child node `index`es and their relationship to this parent.", + "type": "array", + "items": { + "$ref": "ChildLink" + } + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Attributes relevant to the node contained in a group of key-value pairs.\nFor example, a Parameter Reference node could have the following\ninformation in its metadata:\n\n {\n \"parameter_reference\": \"param1\",\n \"parameter_type\": \"array\"\n }", + "type": "object" + }, + "executionStats": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The execution statistics associated with the node, contained in a group of\nkey-value pairs. Only present if the plan was returned as a result of a\nprofile query. For example, number of executions, number of rows/time per\nexecution etc.", + "type": "object" + }, + "shortRepresentation": { + "$ref": "ShortRepresentation", + "description": "Condensed representation for SCALAR nodes." + } + }, + "id": "PlanNode" + }, + "CreateInstanceMetadata": { + "description": "Metadata type for the operation returned by\nCreateInstance.", + "type": "object", + "properties": { + "cancelTime": { + "description": "The time at which this operation was cancelled. If set, this operation is\nin the process of undoing itself (which is guaranteed to succeed) and\ncannot be cancelled again.", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "The time at which this operation failed or was completed successfully.", + "format": "google-datetime", + "type": "string" + }, + "instance": { + "$ref": "Instance", + "description": "The instance being created." + }, + "startTime": { + "description": "The time at which the\nCreateInstance request was\nreceived.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "CreateInstanceMetadata" + }, + "AuditConfig": { + "description": "Specifies the audit configuration for a service.\nIt consists of which permission types are logged, and what identities, if\nany, are exempted from logging.\nAn AuditConifg must have one or more AuditLogConfigs.", + "type": "object", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that are exempted from \"data access\" audit\nlogging for the `service` specified above.\nFollows the same format of Binding.members.\nThis field is deprecated in favor of per-permission-type exemptions.", + "type": "array", + "items": { + "type": "string" + } + }, + "service": { + "description": "Specifies a service that will be enabled for audit logging.\nFor example, `resourcemanager`, `storage`, `compute`.\n`allServices` is a special value that covers all services.", + "type": "string" + }, + "auditLogConfigs": { + "description": "The configuration for logging of each type of permission.\nNext ID: 4", + "type": "array", + "items": { + "$ref": "AuditLogConfig" + } + } + }, + "id": "AuditConfig" + }, + "ChildLink": { + "description": "Metadata associated with a parent-child relationship appearing in a\nPlanNode.", + "type": "object", + "properties": { + "type": { + "description": "The type of the link. For example, in Hash Joins this could be used to\ndistinguish between the build child and the probe child, or in the case\nof the child being an output variable, to represent the tag associated\nwith the output variable.", + "type": "string" + }, + "childIndex": { + "description": "The node to which the link points.", + "format": "int32", + "type": "integer" + }, + "variable": { + "type": "string", + "description": "Only present if the child node is SCALAR and corresponds\nto an output variable of the parent node. The field carries the name of\nthe output variable.\nFor example, a `TableScan` operator that reads rows from a table will\nhave child links to the `SCALAR` nodes representing the output variables\ncreated for each column that is read by the operator. The corresponding\n`variable` fields will be set to the variable names assigned to the\ncolumns." + } + }, + "id": "ChildLink" + }, + "CloudAuditOptions": { + "description": "Write a Cloud Audit log", + "type": "object", + "properties": {}, + "id": "CloudAuditOptions" + }, + "Delete": { + "description": "Arguments to delete operations.", + "type": "object", + "properties": { + "table": { + "description": "Required. The table whose rows will be deleted.", + "type": "string" + }, + "keySet": { + "$ref": "KeySet", + "description": "Required. The primary keys of the rows within table to delete." + } + }, + "id": "Delete" + }, + "CommitRequest": { + "description": "The request for Commit.", + "type": "object", + "properties": { + "singleUseTransaction": { + "$ref": "TransactionOptions", + "description": "Execute mutations in a temporary transaction. Note that unlike\ncommit of a previously-started transaction, commit with a\ntemporary transaction is non-idempotent. That is, if the\n`CommitRequest` is sent to Cloud Spanner more than once (for\ninstance, due to retries in the application, or in the\ntransport library), it is possible that the mutations are\nexecuted more than once. If this is undesirable, use\nBeginTransaction and\nCommit instead." + }, + "mutations": { + "description": "The mutations to be executed when this transaction commits. All\nmutations are applied atomically, in the order they appear in\nthis list.", + "type": "array", + "items": { + "$ref": "Mutation" + } + }, + "transactionId": { + "description": "Commit a previously-started transaction.", + "format": "byte", + "type": "string" + } + }, + "id": "CommitRequest" + }, + "BeginTransactionRequest": { + "description": "The request for BeginTransaction.", + "type": "object", + "properties": { + "options": { + "$ref": "TransactionOptions", + "description": "Required. Options for the new transaction." + } + }, + "id": "BeginTransactionRequest" + }, + "ListInstanceConfigsResponse": { + "description": "The response for ListInstanceConfigs.", + "type": "object", + "properties": { + "nextPageToken": { + "type": "string", + "description": "`next_page_token` can be sent in a subsequent\nListInstanceConfigs call to\nfetch more of the matching instance configurations." + }, + "instanceConfigs": { + "description": "The list of requested instance configurations.", + "type": "array", + "items": { + "$ref": "InstanceConfig" + } + } + }, + "id": "ListInstanceConfigsResponse" + }, + "GetIamPolicyRequest": { + "description": "Request message for `GetIamPolicy` method.", + "type": "object", + "properties": {}, + "id": "GetIamPolicyRequest" + }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse" + }, + "Rule": { + "description": "A rule to be applied in a Policy.", + "type": "object", + "properties": { + "notIn": { + "description": "If one or more 'not_in' clauses are specified, the rule matches\nif the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.\nThe format for in and not_in entries is the same as for members in a\nBinding (see google/iam/v1/policy.proto).", + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "description": "Human-readable description of the rule.", + "type": "string" + }, + "conditions": { + "type": "array", + "items": { + "$ref": "Condition" + }, + "description": "Additional restrictions that must be met" + }, + "logConfig": { + "type": "array", + "items": { + "$ref": "LogConfig" + }, + "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries\nthat match the LOG action." + }, + "in": { + "description": "If one or more 'in' clauses are specified, the rule matches if\nthe PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", + "type": "array", + "items": { + "type": "string" + } + }, + "permissions": { + "description": "A permission is a string of form '\u003cservice\u003e.\u003cresource type\u003e.\u003cverb\u003e'\n(e.g., 'storage.buckets.list'). A value of '*' matches all permissions,\nand a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", + "type": "array", + "items": { + "type": "string" + } + }, + "action": { + "enum": [ + "NO_ACTION", + "ALLOW", + "ALLOW_WITH_LOG", + "DENY", + "DENY_WITH_LOG", + "LOG" + ], + "description": "Required", + "type": "string", + "enumDescriptions": [ + "Default no action.", + "Matching 'Entries' grant access.", + "Matching 'Entries' grant access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' deny access.", + "Matching 'Entries' deny access and the caller promises to log\nthe request per the returned log_configs.", + "Matching 'Entries' tell IAM.Check callers to generate logs." + ] + } + }, + "id": "Rule" + }, + "CreateDatabaseMetadata": { + "id": "CreateDatabaseMetadata", + "description": "Metadata type for the operation returned by\nCreateDatabase.", + "type": "object", + "properties": { + "database": { + "description": "The database being created.", + "type": "string" + } + } + }, + "LogConfig": { + "description": "Specifies what kind of log the caller must write\nIncrement a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only,\nand end in \"_count\". Field names should not contain an initial slash.\nThe actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are\ntheir respective values.\n\nAt present the only supported field names are\n - \"iam_principal\", corresponding to IAMContext.principal;\n - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples:\n counter { metric: \"/debug_access_count\" field: \"iam_principal\" }\n ==\u003e increment counter /iam/policy/backend_debug_access_count\n {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support:\n* multiple field names (though this may be supported in the future)\n* decrementing the counter\n* incrementing it by anything other than 1", + "type": "object", + "properties": { + "counter": { + "description": "Counter options.", + "$ref": "CounterOptions" + }, + "dataAccess": { + "description": "Data access options.", + "$ref": "DataAccessOptions" + }, + "cloudAudit": { + "description": "Cloud audit options.", + "$ref": "CloudAuditOptions" + } + }, + "id": "LogConfig" + }, + "Session": { + "description": "A session in the Cloud Spanner API.", + "type": "object", + "properties": { + "name": { + "description": "Required. The name of the session.", + "type": "string" + } + }, + "id": "Session" + }, + "KeyRange": { + "description": "KeyRange represents a range of rows in a table or index.\n\nA range has a start key and an end key. These keys can be open or\nclosed, indicating if the range includes rows with that key.\n\nKeys are represented by lists, where the ith value in the list\ncorresponds to the ith component of the table or index primary key.\nIndividual values are encoded as described here.\n\nFor example, consider the following table definition:\n\n CREATE TABLE UserEvents (\n UserName STRING(MAX),\n EventDate STRING(10)\n ) PRIMARY KEY(UserName, EventDate);\n\nThe following keys name rows in this table:\n\n \"Bob\", \"2014-09-23\"\n\nSince the `UserEvents` table's `PRIMARY KEY` clause names two\ncolumns, each `UserEvents` key has two elements; the first is the\n`UserName`, and the second is the `EventDate`.\n\nKey ranges with multiple components are interpreted\nlexicographically by component using the table or index key's declared\nsort order. For example, the following range returns all events for\nuser `\"Bob\"` that occurred in the year 2015:\n\n \"start_closed\": [\"Bob\", \"2015-01-01\"]\n \"end_closed\": [\"Bob\", \"2015-12-31\"]\n\nStart and end keys can omit trailing key components. This affects the\ninclusion and exclusion of rows that exactly match the provided key\ncomponents: if the key is closed, then rows that exactly match the\nprovided components are included; if the key is open, then rows\nthat exactly match are not included.\n\nFor example, the following range includes all events for `\"Bob\"` that\noccurred during and after the year 2000:\n\n \"start_closed\": [\"Bob\", \"2000-01-01\"]\n \"end_closed\": [\"Bob\"]\n\nThe next example retrieves all events for `\"Bob\"`:\n\n \"start_closed\": [\"Bob\"]\n \"end_closed\": [\"Bob\"]\n\nTo retrieve events before the year 2000:\n\n \"start_closed\": [\"Bob\"]\n \"end_open\": [\"Bob\", \"2000-01-01\"]\n\nThe following range includes all rows in the table:\n\n \"start_closed\": []\n \"end_closed\": []\n\nThis range returns all users whose `UserName` begins with any\ncharacter from A to C:\n\n \"start_closed\": [\"A\"]\n \"end_open\": [\"D\"]\n\nThis range returns all users whose `UserName` begins with B:\n\n \"start_closed\": [\"B\"]\n \"end_open\": [\"C\"]\n\nKey ranges honor column sort order. For example, suppose a table is\ndefined as follows:\n\n CREATE TABLE DescendingSortedTable {\n Key INT64,\n ...\n ) PRIMARY KEY(Key DESC);\n\nThe following range retrieves all rows with key values between 1\nand 100 inclusive:\n\n \"start_closed\": [\"100\"]\n \"end_closed\": [\"1\"]\n\nNote that 100 is passed as the start, and 1 is passed as the end,\nbecause `Key` is a descending column in the schema.", + "type": "object", + "properties": { + "startClosed": { + "description": "If the start is closed, then the range includes all rows whose\nfirst `len(start_closed)` key columns exactly match `start_closed`.", + "type": "array", + "items": { + "type": "any" + } + }, + "startOpen": { + "description": "If the start is open, then the range excludes rows whose first\n`len(start_open)` key columns exactly match `start_open`.", + "type": "array", + "items": { + "type": "any" + } + }, + "endOpen": { + "description": "If the end is open, then the range excludes rows whose first\n`len(end_open)` key columns exactly match `end_open`.", + "type": "array", + "items": { + "type": "any" + } + }, + "endClosed": { + "description": "If the end is closed, then the range includes all rows whose\nfirst `len(end_closed)` key columns exactly match `end_closed`.", + "type": "array", + "items": { + "type": "any" + } + } + }, + "id": "KeyRange" + }, + "ListInstancesResponse": { + "properties": { + "nextPageToken": { + "description": "`next_page_token` can be sent in a subsequent\nListInstances call to fetch more\nof the matching instances.", + "type": "string" + }, + "instances": { + "description": "The list of requested instances.", + "type": "array", + "items": { + "$ref": "Instance" + } + } + }, + "id": "ListInstancesResponse", + "description": "The response for ListInstances.", + "type": "object" + }, + "ShortRepresentation": { + "description": "Condensed representation of a node and its subtree. Only present for\n`SCALAR` PlanNode(s).", + "type": "object", + "properties": { + "description": { + "description": "A string representation of the expression subtree rooted at this node.", + "type": "string" + }, + "subqueries": { + "additionalProperties": { + "format": "int32", + "type": "integer" + }, + "description": "A mapping of (subquery variable name) -\u003e (subquery node id) for cases\nwhere the `description` string of this node references a `SCALAR`\nsubquery contained in the expression subtree rooted at this node. The\nreferenced `SCALAR` subquery may not necessarily be a direct child of\nthis node.", + "type": "object" + } + }, + "id": "ShortRepresentation" + }, + "InstanceConfig": { + "properties": { + "name": { + "description": "A unique identifier for the instance configuration. Values\nare of the form\n`projects/\u003cproject\u003e/instanceConfigs/a-z*`", + "type": "string" + }, + "displayName": { + "description": "The name of this instance configuration as it appears in UIs.", + "type": "string" + } + }, + "id": "InstanceConfig", + "description": "A possible configuration for a Cloud Spanner instance. Configurations\ndefine the geographic placement of nodes and their replication.", + "type": "object" + }, + "UpdateInstanceRequest": { + "description": "The request for UpdateInstance.", + "type": "object", + "properties": { + "instance": { + "$ref": "Instance", + "description": "Required. The instance to update, which must always include the instance\nname. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included." + }, + "fieldMask": { + "description": "Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated.\nThe field mask must always be specified; this prevents any future fields in\n[][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know\nabout them.", + "format": "google-fieldmask", + "type": "string" + } + }, + "id": "UpdateInstanceRequest" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "TransactionOptions": { + "properties": { + "readWrite": { + "$ref": "ReadWrite", + "description": "Transaction may write.\n\nAuthorization to begin a read-write transaction requires\n`spanner.databases.beginOrRollbackReadWriteTransaction` permission\non the `session` resource." + }, + "readOnly": { + "$ref": "ReadOnly", + "description": "Transaction will not write.\n\nAuthorization to begin a read-only transaction requires\n`spanner.databases.beginReadOnlyTransaction` permission\non the `session` resource." + } + }, + "id": "TransactionOptions", + "description": "# Transactions\n\n\nEach session can have at most one active transaction at a time. After the\nactive transaction is completed, the session can immediately be\nre-used for the next transaction. It is not necessary to create a\nnew session for each transaction.\n\n# Transaction Modes\n\nCloud Spanner supports two transaction modes:\n\n 1. Locking read-write. This type of transaction is the only way\n to write data into Cloud Spanner. These transactions rely on\n pessimistic locking and, if necessary, two-phase commit.\n Locking read-write transactions may abort, requiring the\n application to retry.\n\n 2. Snapshot read-only. This transaction type provides guaranteed\n consistency across several reads, but does not allow\n writes. Snapshot read-only transactions can be configured to\n read at timestamps in the past. Snapshot read-only\n transactions do not need to be committed.\n\nFor transactions that only read, snapshot read-only transactions\nprovide simpler semantics and are almost always faster. In\nparticular, read-only transactions do not take locks, so they do\nnot conflict with read-write transactions. As a consequence of not\ntaking locks, they also do not abort, so retry loops are not needed.\n\nTransactions may only read/write data in a single database. They\nmay, however, read/write data in different tables within that\ndatabase.\n\n## Locking Read-Write Transactions\n\nLocking transactions may be used to atomically read-modify-write\ndata anywhere in a database. This type of transaction is externally\nconsistent.\n\nClients should attempt to minimize the amount of time a transaction\nis active. Faster transactions commit with higher probability\nand cause less contention. Cloud Spanner attempts to keep read locks\nactive as long as the transaction continues to do reads, and the\ntransaction has not been terminated by\nCommit or\nRollback. Long periods of\ninactivity at the client may cause Cloud Spanner to release a\ntransaction's locks and abort it.\n\nReads performed within a transaction acquire locks on the data\nbeing read. Writes can only be done at commit time, after all reads\nhave been completed.\nConceptually, a read-write transaction consists of zero or more\nreads or SQL queries followed by\nCommit. At any time before\nCommit, the client can send a\nRollback request to abort the\ntransaction.\n\n### Semantics\n\nCloud Spanner can commit the transaction if all read locks it acquired\nare still valid at commit time, and it is able to acquire write\nlocks for all writes. Cloud Spanner can abort the transaction for any\nreason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees\nthat the transaction has not modified any user data in Cloud Spanner.\n\nUnless the transaction commits, Cloud Spanner makes no guarantees about\nhow long the transaction's locks were held for. It is an error to\nuse Cloud Spanner locks for any sort of mutual exclusion other than\nbetween Cloud Spanner transactions themselves.\n\n### Retrying Aborted Transactions\n\nWhen a transaction aborts, the application can choose to retry the\nwhole transaction again. To maximize the chances of successfully\ncommitting the retry, the client should execute the retry in the\nsame session as the original attempt. The original session's lock\npriority increases with each consecutive abort, meaning that each\nattempt has a slightly better chance of success than the previous.\n\nUnder some circumstances (e.g., many transactions attempting to\nmodify the same row(s)), a transaction can abort many times in a\nshort period before successfully committing. Thus, it is not a good\nidea to cap the number of retries a transaction can attempt;\ninstead, it is better to limit the total amount of wall time spent\nretrying.\n\n### Idle Transactions\n\nA transaction is considered idle if it has no outstanding reads or\nSQL queries and has not started a read or SQL query within the last 10\nseconds. Idle transactions can be aborted by Cloud Spanner so that they\ndon't hold on to locks indefinitely. In that case, the commit will\nfail with error `ABORTED`.\n\nIf this behavior is undesirable, periodically executing a simple\nSQL query in the transaction (e.g., `SELECT 1`) prevents the\ntransaction from becoming idle.\n\n## Snapshot Read-Only Transactions\n\nSnapshot read-only transactions provides a simpler method than\nlocking read-write transactions for doing several consistent\nreads. However, this type of transaction does not support writes.\n\nSnapshot transactions do not take locks. Instead, they work by\nchoosing a Cloud Spanner timestamp, then executing all reads at that\ntimestamp. Since they do not acquire locks, they do not block\nconcurrent read-write transactions.\n\nUnlike locking read-write transactions, snapshot read-only\ntransactions never abort. They can fail if the chosen read\ntimestamp is garbage collected; however, the default garbage\ncollection policy is generous enough that most applications do not\nneed to worry about this in practice.\n\nSnapshot read-only transactions do not need to call\nCommit or\nRollback (and in fact are not\npermitted to do so).\n\nTo execute a snapshot transaction, the client specifies a timestamp\nbound, which tells Cloud Spanner how to choose a read timestamp.\n\nThe types of timestamp bound are:\n\n - Strong (the default).\n - Bounded staleness.\n - Exact staleness.\n\nIf the Cloud Spanner database to be read is geographically distributed,\nstale read-only transactions can execute more quickly than strong\nor read-write transaction, because they are able to execute far\nfrom the leader replica.\n\nEach type of timestamp bound is discussed in detail below.\n\n### Strong\n\nStrong reads are guaranteed to see the effects of all transactions\nthat have committed before the start of the read. Furthermore, all\nrows yielded by a single read are consistent with each other -- if\nany part of the read observes a transaction, all parts of the read\nsee the transaction.\n\nStrong reads are not repeatable: two consecutive strong read-only\ntransactions might return inconsistent results if there are\nconcurrent writes. If consistency across reads is required, the\nreads should be executed within a transaction or at an exact read\ntimestamp.\n\nSee TransactionOptions.ReadOnly.strong.\n\n### Exact Staleness\n\nThese timestamp bounds execute reads at a user-specified\ntimestamp. Reads at a timestamp are guaranteed to see a consistent\nprefix of the global transaction history: they observe\nmodifications done by all transactions with a commit timestamp \u003c=\nthe read timestamp, and observe none of the modifications done by\ntransactions with a larger commit timestamp. They will block until\nall conflicting transactions that may be assigned commit timestamps\n\u003c= the read timestamp have finished.\n\nThe timestamp can either be expressed as an absolute Cloud Spanner commit\ntimestamp or a staleness relative to the current time.\n\nThese modes do not require a \"negotiation phase\" to pick a\ntimestamp. As a result, they execute slightly faster than the\nequivalent boundedly stale concurrency modes. On the other hand,\nboundedly stale reads usually return fresher results.\n\nSee TransactionOptions.ReadOnly.read_timestamp and\nTransactionOptions.ReadOnly.exact_staleness.\n\n### Bounded Staleness\n\nBounded staleness modes allow Cloud Spanner to pick the read timestamp,\nsubject to a user-provided staleness bound. Cloud Spanner chooses the\nnewest timestamp within the staleness bound that allows execution\nof the reads at the closest available replica without blocking.\n\nAll rows yielded are consistent with each other -- if any part of\nthe read observes a transaction, all parts of the read see the\ntransaction. Boundedly stale reads are not repeatable: two stale\nreads, even if they use the same staleness bound, can execute at\ndifferent timestamps and thus return inconsistent results.\n\nBoundedly stale reads execute in two phases: the first phase\nnegotiates a timestamp among all replicas needed to serve the\nread. In the second phase, reads are executed at the negotiated\ntimestamp.\n\nAs a result of the two phase execution, bounded staleness reads are\nusually a little slower than comparable exact staleness\nreads. However, they are typically able to return fresher\nresults, and are more likely to execute at the closest replica.\n\nBecause the timestamp negotiation requires up-front knowledge of\nwhich rows will be read, it can only be used with single-use\nread-only transactions.\n\nSee TransactionOptions.ReadOnly.max_staleness and\nTransactionOptions.ReadOnly.min_read_timestamp.\n\n### Old Read Timestamps and Garbage Collection\n\nCloud Spanner continuously garbage collects deleted and overwritten data\nin the background to reclaim storage space. This process is known\nas \"version GC\". By default, version GC reclaims versions after they\nare one hour old. Because of this, Cloud Spanner cannot perform reads\nat read timestamps more than one hour in the past. This\nrestriction also applies to in-progress reads and/or SQL queries whose\ntimestamp become too old while executing. Reads and SQL queries with\ntoo-old read timestamps fail with the error `FAILED_PRECONDITION`.", + "type": "object" + }, + "CreateDatabaseRequest": { + "properties": { + "createStatement": { + "type": "string", + "description": "Required. A `CREATE DATABASE` statement, which specifies the ID of the\nnew database. The database ID must conform to the regular expression\n`a-z*[a-z0-9]` and be between 2 and 30 characters in length." + }, + "extraStatements": { + "description": "An optional list of DDL statements to run inside the newly created\ndatabase. Statements can create tables, indexes, etc. These\nstatements execute atomically with the creation of the database:\nif there is an error in any statement, the database is not created.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "CreateDatabaseRequest", + "description": "The request for CreateDatabase.", + "type": "object" + }, + "CreateInstanceRequest": { + "description": "The request for CreateInstance.", + "type": "object", + "properties": { + "instanceId": { + "description": "Required. The ID of the instance to create. Valid identifiers are of the\nform `a-z*[a-z0-9]` and must be between 6 and 30 characters in\nlength.", + "type": "string" + }, + "instance": { + "$ref": "Instance", + "description": "Required. The instance to create. The name may be omitted, but if\nspecified must be `\u003cparent\u003e/instances/\u003cinstance_id\u003e`." + } + }, + "id": "CreateInstanceRequest" + }, + "Condition": { + "description": "A condition to be met.", + "type": "object", + "properties": { + "iam": { + "enumDescriptions": [ + "Default non-attribute.", + "Either principal or (if present) authority selector.", + "The principal (even if an authority selector is present), which\nmust only be used for attribution, not authorization.", + "Any of the security realms in the IAMContext (go/security-realms).\nWhen used with IN, the condition indicates \"any of the request's realms\nmatch one of the given values; with NOT_IN, \"none of the realms match\nany of the given values\". It is not permitted to grant access based on\nthe *absence* of a realm, so realm conditions can only be used in\na \"positive\" context (e.g., ALLOW/IN or DENY/NOT_IN)." + ], + "enum": [ + "NO_ATTR", + "AUTHORITY", + "ATTRIBUTION", + "SECURITY_REALM" + ], + "description": "Trusted attributes supplied by the IAM system.", + "type": "string" + }, + "values": { + "description": "The objects of the condition. This is mutually exclusive with 'value'.", + "type": "array", + "items": { + "type": "string" + } + }, + "op": { + "type": "string", + "enumDescriptions": [ + "Default no-op.", + "DEPRECATED. Use IN instead.", + "DEPRECATED. Use NOT_IN instead.", + "Set-inclusion check.", + "Set-exclusion check.", + "Subject is discharged" + ], + "enum": [ + "NO_OP", + "EQUALS", + "NOT_EQUALS", + "IN", + "NOT_IN", + "DISCHARGED" + ], + "description": "An operator to apply the subject with." + }, + "svc": { + "description": "Trusted attributes discharged by the service.", + "type": "string" + }, + "value": { + "type": "string", + "description": "DEPRECATED. Use 'values' instead." + }, + "sys": { + "enumDescriptions": [ + "Default non-attribute type", + "Region of the resource", + "Service name", + "Resource name", + "IP address of the caller" + ], + "enum": [ + "NO_ATTR", + "REGION", + "SERVICE", + "NAME", + "IP" + ], + "description": "Trusted attributes supplied by any service that owns resources and uses\nthe IAM system for access control.", + "type": "string" + } + }, + "id": "Condition" + }, + "AuditLogConfig": { + "id": "AuditLogConfig", + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", + "type": "object", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "type": "array", + "items": { + "type": "string" + } + }, + "logType": { + "description": "The log type that this config enables.", + "type": "string", + "enumDescriptions": [ + "Default case. Should never be this.", + "Admin reads. Example: CloudIAM getIamPolicy", + "Data writes. Example: CloudSQL Users create", + "Data reads. Example: CloudSQL Users list" + ], + "enum": [ + "LOG_TYPE_UNSPECIFIED", + "ADMIN_READ", + "DATA_WRITE", + "DATA_READ" + ] + } + } + }, + "ReadOnly": { + "description": "Options for read-only transactions.", + "type": "object", + "properties": { + "exactStaleness": { + "type": "string", + "description": "Executes all reads at a timestamp that is `exact_staleness`\nold. The timestamp is chosen soon after the read is started.\n\nGuarantees that all writes that have committed more than the\nspecified number of seconds ago are visible. Because Cloud Spanner\nchooses the exact timestamp, this mode works even if the client's\nlocal clock is substantially skewed from Cloud Spanner commit\ntimestamps.\n\nUseful for reading at nearby replicas without the distributed\ntimestamp negotiation overhead of `max_staleness`.", + "format": "google-duration" + }, + "strong": { + "description": "Read at a timestamp where all previously committed transactions\nare visible.", + "type": "boolean" + }, + "minReadTimestamp": { + "description": "Executes all reads at a timestamp \u003e= `min_read_timestamp`.\n\nThis is useful for requesting fresher data than some previous\nread, or data that is fresh enough to observe the effects of some\npreviously committed transaction whose timestamp is known.\n\nNote that this option can only be used in single-use transactions.", + "format": "google-datetime", + "type": "string" + }, + "maxStaleness": { + "description": "Read data at a timestamp \u003e= `NOW - max_staleness`\nseconds. Guarantees that all writes that have committed more\nthan the specified number of seconds ago are visible. Because\nCloud Spanner chooses the exact timestamp, this mode works even if\nthe client's local clock is substantially skewed from Cloud Spanner\ncommit timestamps.\n\nUseful for reading the freshest data available at a nearby\nreplica, while bounding the possible staleness if the local\nreplica has fallen behind.\n\nNote that this option can only be used in single-use\ntransactions.", + "format": "google-duration", + "type": "string" + }, + "readTimestamp": { + "type": "string", + "description": "Executes all reads at the given timestamp. Unlike other modes,\nreads at a specific timestamp are repeatable; the same read at\nthe same timestamp always returns the same data. If the\ntimestamp is in the future, the read will block until the\nspecified timestamp, modulo the read's deadline.\n\nUseful for large scale consistent reads such as mapreduces, or\nfor coordinating many reads against a consistent snapshot of the\ndata.", + "format": "google-datetime" + }, + "returnReadTimestamp": { + "description": "If true, the Cloud Spanner-selected read timestamp is included in\nthe Transaction message that describes the transaction.", + "type": "boolean" + } + }, + "id": "ReadOnly" + }, + "ExecuteSqlRequest": { + "description": "The request for ExecuteSql and\nExecuteStreamingSql.", + "type": "object", + "properties": { + "paramTypes": { + "additionalProperties": { + "$ref": "Type" + }, + "description": "It is not always possible for Cloud Spanner to infer the right SQL type\nfrom a JSON value. For example, values of type `BYTES` and values\nof type `STRING` both appear in params as JSON strings.\n\nIn these cases, `param_types` can be used to specify the exact\nSQL type for some or all of the SQL query parameters. See the\ndefinition of Type for more information\nabout SQL types.", + "type": "object" + }, + "sql": { + "description": "Required. The SQL query string.", + "type": "string" + }, + "params": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The SQL query string can contain parameter placeholders. A parameter\nplaceholder consists of `'@'` followed by the parameter\nname. Parameter names consist of any combination of letters,\nnumbers, and underscores.\n\nParameters can appear anywhere that a literal value is expected. The same\nparameter name can be used more than once, for example:\n `\"WHERE id \u003e @msg_id AND id \u003c @msg_id + 100\"`\n\nIt is an error to execute an SQL query with unbound parameters.\n\nParameter values are specified using `params`, which is a JSON\nobject whose keys are parameter names, and whose values are the\ncorresponding parameter values.", + "type": "object" + }, + "queryMode": { + "enum": [ + "NORMAL", + "PLAN", + "PROFILE" + ], + "description": "Used to control the amount of debugging information returned in\nResultSetStats.", + "type": "string", + "enumDescriptions": [ + "The default mode where only the query result, without any information\nabout the query plan is returned.", + "This mode returns only the query plan, without any result rows or\nexecution statistics information.", + "This mode returns both the query plan and the execution statistics along\nwith the result rows." + ] + }, + "transaction": { + "description": "The transaction to use. If none is provided, the default is a\ntemporary read-only transaction with strong concurrency.", + "$ref": "TransactionSelector" + }, + "resumeToken": { + "description": "If this request is resuming a previously interrupted SQL query\nexecution, `resume_token` should be copied from the last\nPartialResultSet yielded before the interruption. Doing this\nenables the new SQL query execution to resume where the last one left\noff. The rest of the request parameters must exactly match the\nrequest that yielded this token.", + "format": "byte", + "type": "string" + } + }, + "id": "ExecuteSqlRequest" + }, + "Policy": { + "id": "Policy", + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "type": "object", + "properties": { + "rules": { + "description": "If more than one rule is specified, the rules are applied in the following\nmanner:\n- All matching LOG rules are always applied.\n- If any DENY/DENY_WITH_LOG rule matches, permission is denied.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is\n granted.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if no rule applies, permission is denied.", + "type": "array", + "items": { + "$ref": "Rule" + } + }, + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", + "type": "array", + "items": { + "$ref": "AuditConfig" + } + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } + }, + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + }, + "iamOwned": { + "type": "boolean" + } + } + }, + "ReadRequest": { + "id": "ReadRequest", + "description": "The request for Read and\nStreamingRead.", + "type": "object", + "properties": { + "limit": { + "description": "If greater than zero, only the first `limit` rows are yielded. If `limit`\nis zero, the default is no limit.", + "format": "int64", + "type": "string" + }, + "index": { + "description": "If non-empty, the name of an index on table. This index is\nused instead of the table primary key when interpreting key_set\nand sorting result rows. See key_set for further information.", + "type": "string" + }, + "keySet": { + "$ref": "KeySet", + "description": "Required. `key_set` identifies the rows to be yielded. `key_set` names the\nprimary keys of the rows in table to be yielded, unless index\nis present. If index is present, then key_set instead names\nindex keys in index.\n\nRows are yielded in table primary key order (if index is empty)\nor index key order (if index is non-empty).\n\nIt is not an error for the `key_set` to name rows that do not\nexist in the database. Read yields nothing for nonexistent rows." + }, + "columns": { + "description": "The columns of table to be returned for each row matching\nthis request.", + "type": "array", + "items": { + "type": "string" + } + }, + "transaction": { + "$ref": "TransactionSelector", + "description": "The transaction to use. If none is provided, the default is a\ntemporary read-only transaction with strong concurrency." + }, + "resumeToken": { + "type": "string", + "description": "If this request is resuming a previously interrupted read,\n`resume_token` should be copied from the last\nPartialResultSet yielded before the interruption. Doing this\nenables the new read to resume where the last read left off. The\nrest of the request parameters must exactly match the request\nthat yielded this token.", + "format": "byte" + }, + "table": { + "description": "Required. The name of the table in the database to be read.", + "type": "string" + } + } + }, + "Write": { + "type": "object", + "properties": { + "columns": { + "description": "The names of the columns in table to be written.\n\nThe list of columns must contain enough columns to allow\nCloud Spanner to derive values for all primary key columns in the\nrow(s) to be modified.", + "type": "array", + "items": { + "type": "string" + } + }, + "values": { + "description": "The values to be written. `values` can contain more than one\nlist of values. If it does, then multiple rows are written, one\nfor each entry in `values`. Each list in `values` must have\nexactly as many entries as there are entries in columns\nabove. Sending multiple lists is equivalent to sending multiple\n`Mutation`s, each containing one `values` entry and repeating\ntable and columns. Individual values in each list are\nencoded as described here.", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "any" + } + } + }, + "table": { + "description": "Required. The table whose rows will be written.", + "type": "string" + } + }, + "id": "Write", + "description": "Arguments to insert, update, insert_or_update, and\nreplace operations." + }, + "DataAccessOptions": { + "description": "Write a Data Access (Gin) log", + "type": "object", + "properties": {}, + "id": "DataAccessOptions" + }, + "ReadWrite": { + "description": "Options for read-write transactions.", + "type": "object", + "properties": {}, + "id": "ReadWrite" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "type": "object", + "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + }, + "response": { + "additionalProperties": { + "type": "any", + "description": "Properties of the object. Contains field @type with type URL." + }, + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "type": "string" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object" + } + }, + "id": "Operation" + }, + "ResultSet": { + "properties": { + "stats": { + "$ref": "ResultSetStats", + "description": "Query plan and execution statistics for the query that produced this\nresult set. These can be requested by setting\nExecuteSqlRequest.query_mode." + }, + "rows": { + "description": "Each element in `rows` is a row whose format is defined by\nmetadata.row_type. The ith element\nin each row matches the ith field in\nmetadata.row_type. Elements are\nencoded based on type as described\nhere.", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "any" + } + } + }, + "metadata": { + "$ref": "ResultSetMetadata", + "description": "Metadata about the result set, such as row type information." + } + }, + "id": "ResultSet", + "description": "Results from Read or\nExecuteSql.", + "type": "object" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + } + }, + "id": "Status" + }, + "UpdateDatabaseDdlRequest": { + "description": "Enqueues the given DDL statements to be applied, in order but not\nnecessarily all at once, to the database schema at some point (or\npoints) in the future. The server checks that the statements\nare executable (syntactically valid, name tables that exist, etc.)\nbefore enqueueing them, but they may still fail upon\nlater execution (e.g., if a statement from another batch of\nstatements is applied first and it conflicts in some way, or if\nthere is some data-related problem like a `NULL` value in a column to\nwhich `NOT NULL` would be added). If a statement fails, all\nsubsequent statements in the batch are automatically cancelled.\n\nEach batch of statements is assigned a name which can be used with\nthe Operations API to monitor\nprogress. See the\noperation_id field for more\ndetails.", + "type": "object", + "properties": { + "statements": { + "description": "DDL statements to be applied to the database.", + "type": "array", + "items": { + "type": "string" + } + }, + "operationId": { + "description": "If empty, the new update request is assigned an\nautomatically-generated operation ID. Otherwise, `operation_id`\nis used to construct the name of the resulting\nOperation.\n\nSpecifying an explicit operation ID simplifies determining\nwhether the statements were executed in the event that the\nUpdateDatabaseDdl call is replayed,\nor the return value is otherwise lost: the database and\n`operation_id` fields can be combined to form the\nname of the resulting\nlongrunning.Operation: `\u003cdatabase\u003e/operations/\u003coperation_id\u003e`.\n\n`operation_id` should be unique within the database, and must be\na valid identifier: `a-z*`. Note that\nautomatically-generated operation IDs always begin with an\nunderscore. If the named operation already exists,\nUpdateDatabaseDdl returns\n`ALREADY_EXISTS`.", + "type": "string" + } + }, + "id": "UpdateDatabaseDdlRequest" + }, + "Binding": { + "description": "Associates `members` with a `role`.", + "type": "object", + "properties": { + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "type": "array", + "items": { + "type": "string" + } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + } + }, + "id": "Binding" + }, + "PartialResultSet": { + "properties": { + "chunkedValue": { + "description": "If true, then the final value in values is chunked, and must\nbe combined with more values from subsequent `PartialResultSet`s\nto obtain a complete field value.", + "type": "boolean" + }, + "metadata": { + "description": "Metadata about the result set, such as row type information.\nOnly present in the first response.", + "$ref": "ResultSetMetadata" + }, + "values": { + "description": "A streamed result set consists of a stream of values, which might\nbe split into many `PartialResultSet` messages to accommodate\nlarge rows and/or large values. Every N complete values defines a\nrow, where N is equal to the number of entries in\nmetadata.row_type.fields.\n\nMost values are encoded based on type as described\nhere.\n\nIt is possible that the last value in values is \"chunked\",\nmeaning that the rest of the value is sent in subsequent\n`PartialResultSet`(s). This is denoted by the chunked_value\nfield. Two or more chunked values can be merged to form a\ncomplete value as follows:\n\n * `bool/number/null`: cannot be chunked\n * `string`: concatenate the strings\n * `list`: concatenate the lists. If the last element in a list is a\n `string`, `list`, or `object`, merge it with the first element in\n the next list by applying these rules recursively.\n * `object`: concatenate the (field name, field value) pairs. If a\n field name is duplicated, then apply these rules recursively\n to merge the field values.\n\nSome examples of merging:\n\n # Strings are concatenated.\n \"foo\", \"bar\" =\u003e \"foobar\"\n\n # Lists of non-strings are concatenated.\n [2, 3], [4] =\u003e [2, 3, 4]\n\n # Lists are concatenated, but the last and first elements are merged\n # because they are strings.\n [\"a\", \"b\"], [\"c\", \"d\"] =\u003e [\"a\", \"bc\", \"d\"]\n\n # Lists are concatenated, but the last and first elements are merged\n # because they are lists. Recursively, the last and first elements\n # of the inner lists are merged because they are strings.\n [\"a\", [\"b\", \"c\"]], [[\"d\"], \"e\"] =\u003e [\"a\", [\"b\", \"cd\"], \"e\"]\n\n # Non-overlapping object fields are combined.\n {\"a\": \"1\"}, {\"b\": \"2\"} =\u003e {\"a\": \"1\", \"b\": 2\"}\n\n # Overlapping object fields are merged.\n {\"a\": \"1\"}, {\"a\": \"2\"} =\u003e {\"a\": \"12\"}\n\n # Examples of merging objects containing lists of strings.\n {\"a\": [\"1\"]}, {\"a\": [\"2\"]} =\u003e {\"a\": [\"12\"]}\n\nFor a more complete example, suppose a streaming SQL query is\nyielding a result set whose rows contain a single string\nfield. The following `PartialResultSet`s might be yielded:\n\n {\n \"metadata\": { ... }\n \"values\": [\"Hello\", \"W\"]\n \"chunked_value\": true\n \"resume_token\": \"Af65...\"\n }\n {\n \"values\": [\"orl\"]\n \"chunked_value\": true\n \"resume_token\": \"Bqp2...\"\n }\n {\n \"values\": [\"d\"]\n \"resume_token\": \"Zx1B...\"\n }\n\nThis sequence of `PartialResultSet`s encodes two rows, one\ncontaining the field value `\"Hello\"`, and a second containing the\nfield value `\"World\" = \"W\" + \"orl\" + \"d\"`.", + "type": "array", + "items": { + "type": "any" + } + }, + "resumeToken": { + "description": "Streaming calls might be interrupted for a variety of reasons, such\nas TCP connection loss. If this occurs, the stream of results can\nbe resumed by re-sending the original request and including\n`resume_token`. Note that executing any other transaction in the\nsame session invalidates the token.", + "format": "byte", + "type": "string" + }, + "stats": { + "description": "Query plan and execution statistics for the query that produced this\nstreaming result set. These can be requested by setting\nExecuteSqlRequest.query_mode and are sent\nonly once with the last response in the stream.", + "$ref": "ResultSetStats" + } + }, + "id": "PartialResultSet", + "description": "Partial results from a streaming read or SQL query. Streaming reads and\nSQL queries better tolerate large result sets, large rows, and large\nvalues, but are a little trickier to consume.", + "type": "object" + }, + "UpdateInstanceMetadata": { + "description": "Metadata type for the operation returned by\nUpdateInstance.", + "type": "object", + "properties": { + "instance": { + "$ref": "Instance", + "description": "The desired end state of the update." + }, + "startTime": { + "description": "The time at which UpdateInstance\nrequest was received.", + "format": "google-datetime", + "type": "string" + }, + "cancelTime": { + "description": "The time at which this operation was cancelled. If set, this operation is\nin the process of undoing itself (which is guaranteed to succeed) and\ncannot be cancelled again.", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "The time at which this operation failed or was completed successfully.", + "format": "google-datetime", + "type": "string" + } + }, + "id": "UpdateInstanceMetadata" + }, + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "type": "object", + "properties": { + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "type": "array", + "items": { + "$ref": "Operation" + } + }, + "nextPageToken": { + "type": "string", + "description": "The standard List next-page token." + } + }, + "id": "ListOperationsResponse" + }, + "ResultSetMetadata": { + "description": "Metadata about a ResultSet or PartialResultSet.", + "type": "object", + "properties": { + "rowType": { + "$ref": "StructType", + "description": "Indicates the field names and types for the rows in the result\nset. For example, a SQL query like `\"SELECT UserId, UserName FROM\nUsers\"` could return a `row_type` value like:\n\n \"fields\": [\n { \"name\": \"UserId\", \"type\": { \"code\": \"INT64\" } },\n { \"name\": \"UserName\", \"type\": { \"code\": \"STRING\" } },\n ]" + }, + "transaction": { + "description": "If the read or SQL query began a transaction as a side-effect, the\ninformation about the new transaction is yielded here.", + "$ref": "Transaction" + } + }, + "id": "ResultSetMetadata" + }, + "TransactionSelector": { + "properties": { + "singleUse": { + "$ref": "TransactionOptions", + "description": "Execute the read or SQL query in a temporary transaction.\nThis is the most efficient way to execute a transaction that\nconsists of a single SQL query." + }, + "begin": { + "$ref": "TransactionOptions", + "description": "Begin a new transaction and execute this read or SQL query in\nit. The transaction ID of the new transaction is returned in\nResultSetMetadata.transaction, which is a Transaction." + }, + "id": { + "description": "Execute the read or SQL query in a previously-started transaction.", + "format": "byte", + "type": "string" + } + }, + "id": "TransactionSelector", + "description": "This message is used to select the transaction in which a\nRead or\nExecuteSql call runs.\n\nSee TransactionOptions for more information about transactions.", + "type": "object" + }, + "Mutation": { + "properties": { + "delete": { + "$ref": "Delete", + "description": "Delete rows from a table. Succeeds whether or not the named\nrows were present." + }, + "insert": { + "$ref": "Write", + "description": "Insert new rows in a table. If any of the rows already exist,\nthe write or transaction fails with error `ALREADY_EXISTS`." + }, + "insertOrUpdate": { + "$ref": "Write", + "description": "Like insert, except that if the row already exists, then\nits column values are overwritten with the ones provided. Any\ncolumn values not explicitly written are preserved." + }, + "update": { + "$ref": "Write", + "description": "Update existing rows in a table. If any of the rows does not\nalready exist, the transaction fails with error `NOT_FOUND`." + }, + "replace": { + "$ref": "Write", + "description": "Like insert, except that if the row already exists, it is\ndeleted, and the column values provided are inserted\ninstead. Unlike insert_or_update, this means any values not\nexplicitly written become `NULL`." + } + }, + "id": "Mutation", + "description": "A modification to one or more Cloud Spanner rows. Mutations can be\napplied to a Cloud Spanner database by sending them in a\nCommit call.", + "type": "object" + }, + "KeySet": { + "description": "`KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All\nthe keys are expected to be in the same table or index. The keys need\nnot be sorted in any particular way.\n\nIf the same key is specified multiple times in the set (for example\nif two ranges, two keys, or a key and a range overlap), Cloud Spanner\nbehaves as if the key were only specified once.", + "type": "object", + "properties": { + "ranges": { + "description": "A list of key ranges. See KeyRange for more information about\nkey range specifications.", + "type": "array", + "items": { + "$ref": "KeyRange" + } + }, + "keys": { + "description": "A list of specific keys. Entries in `keys` should have exactly as\nmany elements as there are columns in the primary or index key\nwith which this `KeySet` is used. Individual key values are\nencoded as described here.", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "any" + } + } + }, + "all": { + "description": "For convenience `all` can be set to `true` to indicate that this\n`KeySet` matches all keys in the table or index. Note that any keys\nspecified in `keys` or `ranges` are only yielded once.", + "type": "boolean" + } + }, + "id": "KeySet" + }, + "GetDatabaseDdlResponse": { + "description": "The response for GetDatabaseDdl.", + "type": "object", + "properties": { + "statements": { + "description": "A list of formatted DDL statements defining the schema of the database\nspecified in the request.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "GetDatabaseDdlResponse" + } + }, + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "protocol": "rest", + "canonicalName": "Spanner", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://spanner.googleapis.com/", + "ownerDomain": "google.com", + "name": "spanner", + "batchPath": "batch", + "title": "Cloud Spanner API", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "instances": { + "methods": { + "testIamPermissions": { + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Returns permissions that the caller has on the specified instance resource.\n\nAttempting this RPC on a non-existent Cloud Spanner instance resource will\nresult in a NOT_FOUND error if the user has `spanner.instances.list`\npermission on the containing Google Cloud Project. Otherwise returns an\nempty set of permissions.", + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:testIamPermissions", + "id": "spanner.projects.instances.testIamPermissions", + "path": "v1/{+resource}:testIamPermissions" + }, + "delete": { + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", + "id": "spanner.projects.instances.delete", + "path": "v1/{+name}", + "description": "Deletes an instance.\n\nImmediately upon completion of the request:\n\n * Billing ceases for all of the instance's reserved resources.\n\nSoon afterward:\n\n * The instance and *all of its databases* immediately and\n irrevocably disappear from the API. All data in the databases\n is permanently deleted.", + "httpMethod": "DELETE", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "location": "path", + "description": "Required. The name of the instance to be deleted. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists all instances in the given project.", + "response": { + "$ref": "ListInstancesResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "parameters": { + "pageToken": { + "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListInstancesResponse.", + "type": "string", + "location": "query" + }, + "pageSize": { + "description": "Number of instances to be returned in the response. If 0 or less, defaults\nto the server's maximum allowed page size.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "parent": { + "description": "Required. The name of the project for which a list of instances is\nrequested. Values are of the form `projects/\u003cproject\u003e`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "filter": { + "location": "query", + "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n * name\n * display_name\n * labels.key where key is the name of a label\n\nSome examples of using filters are:\n\n * name:* --\u003e The instance has a name.\n * name:Howl --\u003e The instance's name contains the string \"howl\".\n * name:HOWL --\u003e Equivalent to above.\n * NAME:howl --\u003e Equivalent to above.\n * labels.env:* --\u003e The instance has the label \"env\".\n * labels.env:dev --\u003e The instance has the label \"env\" and the value of\n the label contains the string \"dev\".\n * name:howl labels.env:dev --\u003e The instance's name contains \"howl\" and\n it has the label \"env\" with its value\n containing \"dev\".", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances", + "path": "v1/{+parent}/instances", + "id": "spanner.projects.instances.list" + }, + "setIamPolicy": { + "description": "Sets the access control policy on an instance resource. Replaces any\nexisting policy.\n\nAuthorization requires `spanner.instances.setIamPolicy` on\nresource.", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:setIamPolicy", + "id": "spanner.projects.instances.setIamPolicy", + "path": "v1/{+resource}:setIamPolicy" + }, + "create": { + "flatPath": "v1/projects/{projectsId}/instances", + "id": "spanner.projects.instances.create", + "path": "v1/{+parent}/instances", + "request": { + "$ref": "CreateInstanceRequest" + }, + "description": "Creates an instance and begins preparing it to begin serving. The\nreturned long-running operation\ncan be used to track the progress of preparing the new\ninstance. The instance name is assigned by the caller. If the\nnamed instance already exists, `CreateInstance` returns\n`ALREADY_EXISTS`.\n\nImmediately upon completion of this request:\n\n * The instance is readable via the API, with all requested attributes\n but no allocated resources. Its state is `CREATING`.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation renders the instance immediately unreadable\n via the API.\n * The instance can be deleted.\n * All other attempts to modify the instance are rejected.\n\nUpon completion of the returned operation:\n\n * Billing for all successfully-allocated resources begins (some types\n may have lower than the requested levels).\n * Databases can be created in the instance.\n * The instance's allocated resource levels are readable via the API.\n * The instance's state becomes `READY`.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track creation of the instance. The\nmetadata field type is\nCreateInstanceMetadata.\nThe response field type is\nInstance, if successful.", + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "parent": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "Required. The name of the project in which to create the instance. Values\nare of the form `projects/\u003cproject\u003e`.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "getIamPolicy": { + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:getIamPolicy", + "id": "spanner.projects.instances.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "description": "Gets the access control policy for an instance resource. Returns an empty\npolicy if an instance exists but does not have a policy set.\n\nAuthorization requires `spanner.instances.getIamPolicy` on\nresource." + }, + "patch": { + "httpMethod": "PATCH", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "Required. A unique identifier for the instance, which cannot be changed\nafter the instance is created. Values are of the form\n`projects/\u003cproject\u003e/instances/a-z*[a-z0-9]`. The final\nsegment of the name must be between 6 and 30 characters in length.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", + "id": "spanner.projects.instances.patch", + "path": "v1/{+name}", + "description": "Updates an instance, and begins allocating or releasing resources\nas requested. The returned long-running\noperation can be used to track the\nprogress of updating the instance. If the named instance does not\nexist, returns `NOT_FOUND`.\n\nImmediately upon completion of this request:\n\n * For resource types for which a decrease in the instance's allocation\n has been requested, billing is based on the newly-requested level.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation sets its metadata's\n cancel_time, and begins\n restoring resources to their pre-request values. The operation\n is guaranteed to succeed at undoing all resource changes,\n after which point it terminates with a `CANCELLED` status.\n * All other attempts to modify the instance are rejected.\n * Reading the instance via the API continues to give the pre-request\n resource levels.\n\nUpon completion of the returned operation:\n\n * Billing begins for all successfully-allocated resources (some types\n may have lower than the requested levels).\n * All newly-reserved resources are available for serving the instance's\n tables.\n * The instance's new resource levels are readable via the API.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track the instance modification. The\nmetadata field type is\nUpdateInstanceMetadata.\nThe response field type is\nInstance, if successful.\n\nAuthorization requires `spanner.instances.update` permission on\nresource name.", + "request": { + "$ref": "UpdateInstanceRequest" + } + }, + "get": { + "description": "Gets information about a particular instance.", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Instance" + }, + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "Required. The name of the requested instance. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", + "path": "v1/{+name}", + "id": "spanner.projects.instances.get" + } + }, + "resources": { + "databases": { + "methods": { + "getIamPolicy": { + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "parameters": { + "resource": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path", + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:getIamPolicy", + "id": "spanner.projects.instances.databases.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "description": "Gets the access control policy for a database resource. Returns an empty\npolicy if a database exists but does not have a policy set.\n\nAuthorization requires `spanner.databases.getIamPolicy` permission on\nresource." + }, + "get": { + "id": "spanner.projects.instances.databases.get", + "path": "v1/{+name}", + "description": "Gets the state of a Cloud Spanner database.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Database" + }, + "parameters": { + "name": { + "description": "Required. The name of the requested database. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}" + }, + "dropDatabase": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "database" + ], + "httpMethod": "DELETE", + "parameters": { + "database": { + "description": "Required. The database to be dropped.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}", + "path": "v1/{+database}", + "id": "spanner.projects.instances.databases.dropDatabase", + "description": "Drops (aka deletes) a Cloud Spanner database." + }, + "updateDdl": { + "id": "spanner.projects.instances.databases.updateDdl", + "path": "v1/{+database}/ddl", + "description": "Updates the schema of a Cloud Spanner database by\ncreating/altering/dropping tables, columns, indexes, etc. The returned\nlong-running operation will have a name of\nthe format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and can be used to\ntrack execution of the schema change(s). The\nmetadata field type is\nUpdateDatabaseDdlMetadata. The operation has no response.", + "request": { + "$ref": "UpdateDatabaseDdlRequest" + }, + "httpMethod": "PATCH", + "parameterOrder": [ + "database" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "database": { + "description": "Required. The database to update.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl" + }, + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "spanner.projects.instances.databases.testIamPermissions", + "description": "Returns permissions that the caller has on the specified database resource.\n\nAttempting this RPC on a non-existent Cloud Spanner database will result in\na NOT_FOUND error if the user has `spanner.databases.list` permission on\nthe containing Cloud Spanner instance. Otherwise returns an empty set of\npermissions.", + "request": { + "$ref": "TestIamPermissionsRequest" + } + }, + "getDdl": { + "id": "spanner.projects.instances.databases.getDdl", + "path": "v1/{+database}/ddl", + "description": "Returns the schema of a Cloud Spanner database as a list of formatted\nDDL statements. This method does not show pending schema updates, those may\nbe queried using the Operations API.", + "httpMethod": "GET", + "response": { + "$ref": "GetDatabaseDdlResponse" + }, + "parameterOrder": [ + "database" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "database": { + "description": "Required. The database whose schema we wish to get.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl" + }, + "list": { + "response": { + "$ref": "ListDatabasesResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "pageToken": { + "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListDatabasesResponse.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Number of databases to be returned in the response. If 0 or less,\ndefaults to the server's maximum allowed page size.", + "format": "int32", + "type": "integer" + }, + "parent": { + "location": "path", + "description": "Required. The instance whose databases should be listed.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases", + "path": "v1/{+parent}/databases", + "id": "spanner.projects.instances.databases.list", + "description": "Lists Cloud Spanner databases." + }, + "setIamPolicy": { + "id": "spanner.projects.instances.databases.setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "description": "Sets the access control policy on a database resource. Replaces any\nexisting policy.\n\nAuthorization requires `spanner.databases.setIamPolicy` permission on\nresource.", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:setIamPolicy" + }, + "create": { + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "Operation" + }, + "parameters": { + "parent": { + "location": "path", + "description": "Required. The name of the instance that will serve the new database.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases", + "id": "spanner.projects.instances.databases.create", + "path": "v1/{+parent}/databases", + "request": { + "$ref": "CreateDatabaseRequest" + }, + "description": "Creates a new Cloud Spanner database and starts to prepare it for serving.\nThe returned long-running operation will\nhave a name of the format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track preparation of the database. The\nmetadata field type is\nCreateDatabaseMetadata. The\nresponse field type is\nDatabase, if successful." + } + }, + "resources": { + "operations": { + "methods": { + "cancel": { + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations/[^/]+$", + "location": "path", + "description": "The name of the operation resource to be cancelled.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}:cancel", + "id": "spanner.projects.instances.databases.operations.cancel", + "path": "v1/{+name}:cancel", + "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`." + }, + "delete": { + "id": "spanner.projects.instances.databases.operations.delete", + "path": "v1/{+name}", + "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "httpMethod": "DELETE", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "location": "path", + "description": "The name of the operation resource to be deleted.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}" + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", + "httpMethod": "GET", + "response": { + "$ref": "ListOperationsResponse" + }, + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "location": "path", + "description": "The name of the operation collection.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations$" + }, + "pageToken": { + "location": "query", + "description": "The standard list page token.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + }, + "filter": { + "description": "The standard list filter.", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations", + "id": "spanner.projects.instances.databases.operations.list", + "path": "v1/{+name}" + }, + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations/[^/]+$", + "location": "path", + "description": "The name of the operation resource." + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}", + "id": "spanner.projects.instances.databases.operations.get", + "path": "v1/{+name}", + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice." + } + } + }, + "sessions": { + "methods": { + "commit": { + "response": { + "$ref": "CommitResponse" + }, + "parameterOrder": [ + "session" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "session": { + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + "location": "path", + "description": "Required. The session in which the transaction to be committed is running.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:commit", + "path": "v1/{+session}:commit", + "id": "spanner.projects.instances.databases.sessions.commit", + "description": "Commits a transaction. The request includes the mutations to be\napplied to rows in the database.\n\n`Commit` might return an `ABORTED` error. This can occur at any time;\ncommonly, the cause is conflicts with concurrent\ntransactions. However, it can also happen for a variety of other\nreasons. If `Commit` returns `ABORTED`, the caller should re-attempt\nthe transaction from the beginning, re-using the same session.", + "request": { + "$ref": "CommitRequest" + } + }, + "beginTransaction": { + "response": { + "$ref": "Transaction" + }, + "parameterOrder": [ + "session" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "session": { + "location": "path", + "description": "Required. The session in which the transaction runs.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:beginTransaction", + "path": "v1/{+session}:beginTransaction", + "id": "spanner.projects.instances.databases.sessions.beginTransaction", + "description": "Begins a new transaction. This step can often be skipped:\nRead, ExecuteSql and\nCommit can begin a new transaction as a\nside-effect.", + "request": { + "$ref": "BeginTransactionRequest" + } + }, + "delete": { + "description": "Ends a session, releasing server resources associated with it.", + "httpMethod": "DELETE", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "Required. The name of the session to delete.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", + "id": "spanner.projects.instances.databases.sessions.delete", + "path": "v1/{+name}" + }, + "executeStreamingSql": { + "description": "Like ExecuteSql, except returns the result\nset as a stream. Unlike ExecuteSql, there\nis no limit on the size of the returned result set. However, no\nindividual row in the result set can exceed 100 MiB, and no\ncolumn value can exceed 10 MiB.", + "request": { + "$ref": "ExecuteSqlRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "session" + ], + "response": { + "$ref": "PartialResultSet" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "session": { + "location": "path", + "description": "Required. The session in which the SQL query should be performed.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeStreamingSql", + "id": "spanner.projects.instances.databases.sessions.executeStreamingSql", + "path": "v1/{+session}:executeStreamingSql" + }, + "executeSql": { + "request": { + "$ref": "ExecuteSqlRequest" + }, + "description": "Executes an SQL query, returning all rows in a single reply. This\nmethod cannot be used to return a result set larger than 10 MiB;\nif the query yields more data than that, the query fails with\na `FAILED_PRECONDITION` error.\n\nQueries inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be fetched in streaming fashion by calling\nExecuteStreamingSql instead.", + "httpMethod": "POST", + "parameterOrder": [ + "session" + ], + "response": { + "$ref": "ResultSet" + }, + "parameters": { + "session": { + "location": "path", + "description": "Required. The session in which the SQL query should be performed.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeSql", + "id": "spanner.projects.instances.databases.sessions.executeSql", + "path": "v1/{+session}:executeSql" + }, + "streamingRead": { + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:streamingRead", + "path": "v1/{+session}:streamingRead", + "id": "spanner.projects.instances.databases.sessions.streamingRead", + "request": { + "$ref": "ReadRequest" + }, + "description": "Like Read, except returns the result set as a\nstream. Unlike Read, there is no limit on the\nsize of the returned result set. However, no individual row in\nthe result set can exceed 100 MiB, and no column value can exceed\n10 MiB.", + "response": { + "$ref": "PartialResultSet" + }, + "parameterOrder": [ + "session" + ], + "httpMethod": "POST", + "parameters": { + "session": { + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + "location": "path", + "description": "Required. The session in which the read should be performed.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "rollback": { + "description": "Rolls back a transaction, releasing any locks it holds. It is a good\nidea to call this for any transaction that includes one or more\nRead or ExecuteSql requests and\nultimately decides not to commit.\n\n`Rollback` returns `OK` if it successfully aborts the transaction, the\ntransaction was already aborted, or the transaction is not\nfound. `Rollback` never returns `ABORTED`.", + "request": { + "$ref": "RollbackRequest" + }, + "httpMethod": "POST", + "parameterOrder": [ + "session" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "session": { + "description": "Required. The session in which the transaction to roll back is running.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:rollback", + "id": "spanner.projects.instances.databases.sessions.rollback", + "path": "v1/{+session}:rollback" + }, + "create": { + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions", + "path": "v1/{+database}/sessions", + "id": "spanner.projects.instances.databases.sessions.create", + "description": "Creates a new session. A session can be used to perform\ntransactions that read and/or modify data in a Cloud Spanner database.\nSessions are meant to be reused for many consecutive\ntransactions.\n\nSessions can only execute one transaction at a time. To execute\nmultiple concurrent read-write/write-only transactions, create\nmultiple sessions. Note that standalone reads and queries use a\ntransaction internally, and count toward the one transaction\nlimit.\n\nCloud Spanner limits the number of sessions that can exist at any given\ntime; thus, it is a good idea to delete idle and/or unneeded sessions.\nAside from explicit deletes, Cloud Spanner can delete sessions for\nwhich no operations are sent for more than an hour, or due to\ninternal errors. If a session is deleted, requests to it\nreturn `NOT_FOUND`.\n\nIdle sessions can be kept alive by sending a trivial SQL query\nperiodically, e.g., `\"SELECT 1\"`.", + "response": { + "$ref": "Session" + }, + "parameterOrder": [ + "database" + ], + "httpMethod": "POST", + "parameters": { + "database": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "location": "path", + "description": "Required. The database in which the new session is created." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "read": { + "description": "Reads rows from the database using key lookups and scans, as a\nsimple key/value style alternative to\nExecuteSql. This method cannot be used to\nreturn a result set larger than 10 MiB; if the read matches more\ndata than that, the read fails with a `FAILED_PRECONDITION`\nerror.\n\nReads inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be yielded in streaming fashion by calling\nStreamingRead instead.", + "request": { + "$ref": "ReadRequest" + }, + "response": { + "$ref": "ResultSet" + }, + "parameterOrder": [ + "session" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "session": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + "location": "path", + "description": "Required. The session in which the read should be performed." + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:read", + "path": "v1/{+session}:read", + "id": "spanner.projects.instances.databases.sessions.read" + }, + "get": { + "httpMethod": "GET", + "response": { + "$ref": "Session" + }, + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the session to retrieve.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", + "id": "spanner.projects.instances.databases.sessions.get", + "path": "v1/{+name}", + "description": "Gets a session. Returns `NOT_FOUND` if the session does not exist.\nThis is mainly useful for determining whether a session is still\nalive." + } + } + } + } + }, + "operations": { + "methods": { + "delete": { + "path": "v1/{+name}", + "id": "spanner.projects.instances.operations.delete", + "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/instances/[^/]+/operations/[^/]+$", + "location": "path", + "description": "The name of the operation resource to be deleted.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}" + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", + "response": { + "$ref": "ListOperationsResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The name of the operation collection.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/operations$", + "location": "path" + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "The standard list page token." + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" + }, + "filter": { + "location": "query", + "description": "The standard list filter.", + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations", + "path": "v1/{+name}", + "id": "spanner.projects.instances.operations.list" + }, + "get": { + "id": "spanner.projects.instances.operations.get", + "path": "v1/{+name}", + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "httpMethod": "GET", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instances/[^/]+/operations/[^/]+$", + "location": "path", + "description": "The name of the operation resource." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}" + }, + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "httpMethod": "POST", + "parameters": { + "name": { + "pattern": "^projects/[^/]+/instances/[^/]+/operations/[^/]+$", + "location": "path", + "description": "The name of the operation resource to be cancelled.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}:cancel", + "path": "v1/{+name}:cancel", + "id": "spanner.projects.instances.operations.cancel" + } + } + } + } + }, + "instanceConfigs": { + "methods": { + "list": { + "response": { + "$ref": "ListInstanceConfigsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "pageToken": { + "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListInstanceConfigsResponse.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Number of instance configurations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + "format": "int32", + "type": "integer" + }, + "parent": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "Required. The name of the project for which a list of supported instance\nconfigurations is requested. Values are of the form\n`projects/\u003cproject\u003e`.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/instanceConfigs", + "path": "v1/{+parent}/instanceConfigs", + "id": "spanner.projects.instanceConfigs.list", + "description": "Lists the supported instance configurations for a given project." + }, + "get": { + "response": { + "$ref": "InstanceConfig" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/instanceConfigs/[^/]+$", + "location": "path", + "description": "Required. The name of the requested instance configuration. Values are of\nthe form `projects/\u003cproject\u003e/instanceConfigs/\u003cconfig\u003e`." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/instanceConfigs/{instanceConfigsId}", + "path": "v1/{+name}", + "id": "spanner.projects.instanceConfigs.get", + "description": "Gets information about a particular instance configuration." + } + } + } + } + } + }, + "parameters": { + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "alt": { + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + }, + "quotaUser": { + "type": "string", + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + } + }, + "version": "v1", + "baseUrl": "https://spanner.googleapis.com/", + "servicePath": "", + "description": "Cloud Spanner is a managed, mission-critical, globally consistent and scalable relational database service.", + "kind": "discovery#restDescription", + "basePath": "", + "revision": "20170214", + "id": "spanner:v1", + "documentationLink": "https://cloud.google.com/spanner/", + "discoveryVersion": "v1" +} diff --git a/vendor/google.golang.org/api/spanner/v1/spanner-gen.go b/vendor/google.golang.org/api/spanner/v1/spanner-gen.go new file mode 100644 index 000000000..a93cbaec2 --- /dev/null +++ b/vendor/google.golang.org/api/spanner/v1/spanner-gen.go @@ -0,0 +1,9223 @@ +// Package spanner provides access to the Cloud Spanner API. +// +// See https://cloud.google.com/spanner/ +// +// Usage example: +// +// import "google.golang.org/api/spanner/v1" +// ... +// spannerService, err := spanner.New(oauthHttpClient) +package spanner // import "google.golang.org/api/spanner/v1" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "spanner:v1" +const apiName = "spanner" +const apiVersion = "v1" +const basePath = "https://spanner.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.InstanceConfigs = NewProjectsInstanceConfigsService(s) + rs.Instances = NewProjectsInstancesService(s) + return rs +} + +type ProjectsService struct { + s *Service + + InstanceConfigs *ProjectsInstanceConfigsService + + Instances *ProjectsInstancesService +} + +func NewProjectsInstanceConfigsService(s *Service) *ProjectsInstanceConfigsService { + rs := &ProjectsInstanceConfigsService{s: s} + return rs +} + +type ProjectsInstanceConfigsService struct { + s *Service +} + +func NewProjectsInstancesService(s *Service) *ProjectsInstancesService { + rs := &ProjectsInstancesService{s: s} + rs.Databases = NewProjectsInstancesDatabasesService(s) + rs.Operations = NewProjectsInstancesOperationsService(s) + return rs +} + +type ProjectsInstancesService struct { + s *Service + + Databases *ProjectsInstancesDatabasesService + + Operations *ProjectsInstancesOperationsService +} + +func NewProjectsInstancesDatabasesService(s *Service) *ProjectsInstancesDatabasesService { + rs := &ProjectsInstancesDatabasesService{s: s} + rs.Operations = NewProjectsInstancesDatabasesOperationsService(s) + rs.Sessions = NewProjectsInstancesDatabasesSessionsService(s) + return rs +} + +type ProjectsInstancesDatabasesService struct { + s *Service + + Operations *ProjectsInstancesDatabasesOperationsService + + Sessions *ProjectsInstancesDatabasesSessionsService +} + +func NewProjectsInstancesDatabasesOperationsService(s *Service) *ProjectsInstancesDatabasesOperationsService { + rs := &ProjectsInstancesDatabasesOperationsService{s: s} + return rs +} + +type ProjectsInstancesDatabasesOperationsService struct { + s *Service +} + +func NewProjectsInstancesDatabasesSessionsService(s *Service) *ProjectsInstancesDatabasesSessionsService { + rs := &ProjectsInstancesDatabasesSessionsService{s: s} + return rs +} + +type ProjectsInstancesDatabasesSessionsService struct { + s *Service +} + +func NewProjectsInstancesOperationsService(s *Service) *ProjectsInstancesOperationsService { + rs := &ProjectsInstancesOperationsService{s: s} + return rs +} + +type ProjectsInstancesOperationsService struct { + s *Service +} + +// AuditConfig: Specifies the audit configuration for a service. +// It consists of which permission types are logged, and what +// identities, if +// any, are exempted from logging. +// An AuditConifg must have one or more AuditLogConfigs. +type AuditConfig struct { + // AuditLogConfigs: The configuration for logging of each type of + // permission. + // Next ID: 4 + AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` + + // ExemptedMembers: Specifies the identities that are exempted from + // "data access" audit + // logging for the `service` specified above. + // Follows the same format of Binding.members. + // This field is deprecated in favor of per-permission-type exemptions. + ExemptedMembers []string `json:"exemptedMembers,omitempty"` + + // Service: Specifies a service that will be enabled for audit + // logging. + // For example, `resourcemanager`, `storage`, `compute`. + // `allServices` is a special value that covers all services. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditLogConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuditConfig) MarshalJSON() ([]byte, error) { + type noMethod AuditConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuditLogConfig: Provides the configuration for logging a type of +// permissions. +// Example: +// +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:foo@gmail.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE", +// } +// ] +// } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while +// exempting +// foo@gmail.com from DATA_READ logging. +type AuditLogConfig struct { + // ExemptedMembers: Specifies the identities that do not cause logging + // for this type of + // permission. + // Follows the same format of Binding.members. + ExemptedMembers []string `json:"exemptedMembers,omitempty"` + + // LogType: The log type that this config enables. + // + // Possible values: + // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. + // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy + // "DATA_WRITE" - Data writes. Example: CloudSQL Users create + // "DATA_READ" - Data reads. Example: CloudSQL Users list + LogType string `json:"logType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExemptedMembers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { + type noMethod AuditLogConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BeginTransactionRequest: The request for BeginTransaction. +type BeginTransactionRequest struct { + // Options: Required. Options for the new transaction. + Options *TransactionOptions `json:"options,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Options") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Options") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BeginTransactionRequest) MarshalJSON() ([]byte, error) { + type noMethod BeginTransactionRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Binding: Associates `members` with a `role`. +type Binding struct { + // Members: Specifies the identities requesting access for a Cloud + // Platform resource. + // `members` can have the following values: + // + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents + // anyone + // who is authenticated with a Google account or a service + // account. + // + // * `user:{emailid}`: An email address that represents a specific + // Google + // account. For example, `alice@gmail.com` or `joe@example.com`. + // + // + // * `serviceAccount:{emailid}`: An email address that represents a + // service + // account. For example, + // `my-other-app@appspot.gserviceaccount.com`. + // + // * `group:{emailid}`: An email address that represents a Google + // group. + // For example, `admins@example.com`. + // + // * `domain:{domain}`: A Google Apps domain name that represents all + // the + // users of that domain. For example, `google.com` or + // `example.com`. + // + // + Members []string `json:"members,omitempty"` + + // Role: Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or + // `roles/owner`. + // Required + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Members") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Members") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Binding) MarshalJSON() ([]byte, error) { + type noMethod Binding + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChildLink: Metadata associated with a parent-child relationship +// appearing in a +// PlanNode. +type ChildLink struct { + // ChildIndex: The node to which the link points. + ChildIndex int64 `json:"childIndex,omitempty"` + + // Type: The type of the link. For example, in Hash Joins this could be + // used to + // distinguish between the build child and the probe child, or in the + // case + // of the child being an output variable, to represent the tag + // associated + // with the output variable. + Type string `json:"type,omitempty"` + + // Variable: Only present if the child node is SCALAR and corresponds + // to an output variable of the parent node. The field carries the name + // of + // the output variable. + // For example, a `TableScan` operator that reads rows from a table + // will + // have child links to the `SCALAR` nodes representing the output + // variables + // created for each column that is read by the operator. The + // corresponding + // `variable` fields will be set to the variable names assigned to + // the + // columns. + Variable string `json:"variable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ChildIndex") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChildIndex") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChildLink) MarshalJSON() ([]byte, error) { + type noMethod ChildLink + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CloudAuditOptions: Write a Cloud Audit log +type CloudAuditOptions struct { +} + +// CommitRequest: The request for Commit. +type CommitRequest struct { + // Mutations: The mutations to be executed when this transaction + // commits. All + // mutations are applied atomically, in the order they appear in + // this list. + Mutations []*Mutation `json:"mutations,omitempty"` + + // SingleUseTransaction: Execute mutations in a temporary transaction. + // Note that unlike + // commit of a previously-started transaction, commit with a + // temporary transaction is non-idempotent. That is, if + // the + // `CommitRequest` is sent to Cloud Spanner more than once + // (for + // instance, due to retries in the application, or in the + // transport library), it is possible that the mutations are + // executed more than once. If this is undesirable, use + // BeginTransaction and + // Commit instead. + SingleUseTransaction *TransactionOptions `json:"singleUseTransaction,omitempty"` + + // TransactionId: Commit a previously-started transaction. + TransactionId string `json:"transactionId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mutations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mutations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitRequest) MarshalJSON() ([]byte, error) { + type noMethod CommitRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CommitResponse: The response for Commit. +type CommitResponse struct { + // CommitTimestamp: The Cloud Spanner timestamp at which the transaction + // committed. + CommitTimestamp string `json:"commitTimestamp,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CommitTimestamp") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommitTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CommitResponse) MarshalJSON() ([]byte, error) { + type noMethod CommitResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Condition: A condition to be met. +type Condition struct { + // Iam: Trusted attributes supplied by the IAM system. + // + // Possible values: + // "NO_ATTR" - Default non-attribute. + // "AUTHORITY" - Either principal or (if present) authority selector. + // "ATTRIBUTION" - The principal (even if an authority selector is + // present), which + // must only be used for attribution, not authorization. + // "SECURITY_REALM" - Any of the security realms in the IAMContext + // (go/security-realms). + // When used with IN, the condition indicates "any of the request's + // realms + // match one of the given values; with NOT_IN, "none of the realms + // match + // any of the given values". It is not permitted to grant access based + // on + // the *absence* of a realm, so realm conditions can only be used in + // a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN). + Iam string `json:"iam,omitempty"` + + // Op: An operator to apply the subject with. + // + // Possible values: + // "NO_OP" - Default no-op. + // "EQUALS" - DEPRECATED. Use IN instead. + // "NOT_EQUALS" - DEPRECATED. Use NOT_IN instead. + // "IN" - Set-inclusion check. + // "NOT_IN" - Set-exclusion check. + // "DISCHARGED" - Subject is discharged + Op string `json:"op,omitempty"` + + // Svc: Trusted attributes discharged by the service. + Svc string `json:"svc,omitempty"` + + // Sys: Trusted attributes supplied by any service that owns resources + // and uses + // the IAM system for access control. + // + // Possible values: + // "NO_ATTR" - Default non-attribute type + // "REGION" - Region of the resource + // "SERVICE" - Service name + // "NAME" - Resource name + // "IP" - IP address of the caller + Sys string `json:"sys,omitempty"` + + // Value: DEPRECATED. Use 'values' instead. + Value string `json:"value,omitempty"` + + // Values: The objects of the condition. This is mutually exclusive with + // 'value'. + Values []string `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Iam") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Iam") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Condition) MarshalJSON() ([]byte, error) { + type noMethod Condition + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CounterOptions: Options for counters +type CounterOptions struct { + // Field: The field value to attribute. + Field string `json:"field,omitempty"` + + // Metric: The metric to update. + Metric string `json:"metric,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CounterOptions) MarshalJSON() ([]byte, error) { + type noMethod CounterOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateDatabaseMetadata: Metadata type for the operation returned +// by +// CreateDatabase. +type CreateDatabaseMetadata struct { + // Database: The database being created. + Database string `json:"database,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Database") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Database") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateDatabaseMetadata) MarshalJSON() ([]byte, error) { + type noMethod CreateDatabaseMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateDatabaseRequest: The request for CreateDatabase. +type CreateDatabaseRequest struct { + // CreateStatement: Required. A `CREATE DATABASE` statement, which + // specifies the ID of the + // new database. The database ID must conform to the regular + // expression + // `a-z*[a-z0-9]` and be between 2 and 30 characters in length. + CreateStatement string `json:"createStatement,omitempty"` + + // ExtraStatements: An optional list of DDL statements to run inside the + // newly created + // database. Statements can create tables, indexes, etc. + // These + // statements execute atomically with the creation of the database: + // if there is an error in any statement, the database is not created. + ExtraStatements []string `json:"extraStatements,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateStatement") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateStatement") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CreateDatabaseRequest) MarshalJSON() ([]byte, error) { + type noMethod CreateDatabaseRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateInstanceMetadata: Metadata type for the operation returned +// by +// CreateInstance. +type CreateInstanceMetadata struct { + // CancelTime: The time at which this operation was cancelled. If set, + // this operation is + // in the process of undoing itself (which is guaranteed to succeed) + // and + // cannot be cancelled again. + CancelTime string `json:"cancelTime,omitempty"` + + // EndTime: The time at which this operation failed or was completed + // successfully. + EndTime string `json:"endTime,omitempty"` + + // Instance: The instance being created. + Instance *Instance `json:"instance,omitempty"` + + // StartTime: The time at which the + // CreateInstance request was + // received. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CancelTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CancelTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateInstanceMetadata) MarshalJSON() ([]byte, error) { + type noMethod CreateInstanceMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateInstanceRequest: The request for CreateInstance. +type CreateInstanceRequest struct { + // Instance: Required. The instance to create. The name may be omitted, + // but if + // specified must be `/instances/`. + Instance *Instance `json:"instance,omitempty"` + + // InstanceId: Required. The ID of the instance to create. Valid + // identifiers are of the + // form `a-z*[a-z0-9]` and must be between 6 and 30 characters + // in + // length. + InstanceId string `json:"instanceId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod CreateInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DataAccessOptions: Write a Data Access (Gin) log +type DataAccessOptions struct { +} + +// Database: A Cloud Spanner database. +type Database struct { + // Name: Required. The name of the database. Values are of the + // form + // `projects//instances//databases/`, + // w + // here `` is as specified in the `CREATE DATABASE` + // statement. This name can be passed to other API methods to + // identify the database. + Name string `json:"name,omitempty"` + + // State: Output only. The current database state. + // + // Possible values: + // "STATE_UNSPECIFIED" - Not specified. + // "CREATING" - The database is still being created. Operations on the + // database may fail + // with `FAILED_PRECONDITION` in this state. + // "READY" - The database is fully created and ready for use. + State string `json:"state,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Database) MarshalJSON() ([]byte, error) { + type noMethod Database + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Delete: Arguments to delete operations. +type Delete struct { + // KeySet: Required. The primary keys of the rows within table to + // delete. + KeySet *KeySet `json:"keySet,omitempty"` + + // Table: Required. The table whose rows will be deleted. + Table string `json:"table,omitempty"` + + // ForceSendFields is a list of field names (e.g. "KeySet") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KeySet") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Delete) MarshalJSON() ([]byte, error) { + type noMethod Delete + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + +// ExecuteSqlRequest: The request for ExecuteSql +// and +// ExecuteStreamingSql. +type ExecuteSqlRequest struct { + // ParamTypes: It is not always possible for Cloud Spanner to infer the + // right SQL type + // from a JSON value. For example, values of type `BYTES` and values + // of type `STRING` both appear in params as JSON strings. + // + // In these cases, `param_types` can be used to specify the exact + // SQL type for some or all of the SQL query parameters. See + // the + // definition of Type for more information + // about SQL types. + ParamTypes map[string]Type `json:"paramTypes,omitempty"` + + // Params: The SQL query string can contain parameter placeholders. A + // parameter + // placeholder consists of `'@'` followed by the parameter + // name. Parameter names consist of any combination of letters, + // numbers, and underscores. + // + // Parameters can appear anywhere that a literal value is expected. The + // same + // parameter name can be used more than once, for example: + // "WHERE id > @msg_id AND id < @msg_id + 100" + // + // It is an error to execute an SQL query with unbound + // parameters. + // + // Parameter values are specified using `params`, which is a JSON + // object whose keys are parameter names, and whose values are + // the + // corresponding parameter values. + Params googleapi.RawMessage `json:"params,omitempty"` + + // QueryMode: Used to control the amount of debugging information + // returned in + // ResultSetStats. + // + // Possible values: + // "NORMAL" - The default mode where only the query result, without + // any information + // about the query plan is returned. + // "PLAN" - This mode returns only the query plan, without any result + // rows or + // execution statistics information. + // "PROFILE" - This mode returns both the query plan and the execution + // statistics along + // with the result rows. + QueryMode string `json:"queryMode,omitempty"` + + // ResumeToken: If this request is resuming a previously interrupted SQL + // query + // execution, `resume_token` should be copied from the + // last + // PartialResultSet yielded before the interruption. Doing this + // enables the new SQL query execution to resume where the last one + // left + // off. The rest of the request parameters must exactly match + // the + // request that yielded this token. + ResumeToken string `json:"resumeToken,omitempty"` + + // Sql: Required. The SQL query string. + Sql string `json:"sql,omitempty"` + + // Transaction: The transaction to use. If none is provided, the default + // is a + // temporary read-only transaction with strong concurrency. + Transaction *TransactionSelector `json:"transaction,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ParamTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ParamTypes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExecuteSqlRequest) MarshalJSON() ([]byte, error) { + type noMethod ExecuteSqlRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Field: Message representing a single field of a struct. +type Field struct { + // Name: The name of the field. For reads, this is the column name. + // For + // SQL queries, it is the column alias (e.g., "Word" in the + // query "SELECT 'hello' AS Word"), or the column name + // (e.g., + // "ColName" in the query "SELECT ColName FROM Table"). Some + // columns might have an empty name (e.g., !"SELECT + // UPPER(ColName)"). Note that a query result can contain + // multiple fields with the same name. + Name string `json:"name,omitempty"` + + // Type: The type of the field. + Type *Type `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Field) MarshalJSON() ([]byte, error) { + type noMethod Field + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GetDatabaseDdlResponse: The response for GetDatabaseDdl. +type GetDatabaseDdlResponse struct { + // Statements: A list of formatted DDL statements defining the schema of + // the database + // specified in the request. + Statements []string `json:"statements,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Statements") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Statements") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GetDatabaseDdlResponse) MarshalJSON() ([]byte, error) { + type noMethod GetDatabaseDdlResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GetIamPolicyRequest: Request message for `GetIamPolicy` method. +type GetIamPolicyRequest struct { +} + +// Instance: An isolated set of Cloud Spanner resources on which +// databases can be hosted. +type Instance struct { + // Config: Required. The name of the instance's configuration. Values + // are of the form + // `projects//instanceConfigs/`. See + // also InstanceConfig and + // ListInstanceConfigs. + Config string `json:"config,omitempty"` + + // DisplayName: Required. The descriptive name for this instance as it + // appears in UIs. + // Must be unique per project and between 4 and 30 characters in length. + DisplayName string `json:"displayName,omitempty"` + + // Labels: Cloud Labels are a flexible and lightweight mechanism for + // organizing cloud + // resources into groups that reflect a customer's organizational needs + // and + // deployment strategies. Cloud Labels can be used to filter collections + // of + // resources. They can be used to control how resource metrics are + // aggregated. + // And they can be used as arguments to policy management rules (e.g. + // route, + // firewall, load balancing, etc.). + // + // * Label keys must be between 1 and 63 characters long and must + // conform to + // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. + // * Label values must be between 0 and 63 characters long and must + // conform + // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + // * No more than 64 labels can be associated with a given + // resource. + // + // See https://goo.gl/xmQnxf for more information on and examples of + // labels. + // + // If you plan to use labels in your own code, please note that + // additional + // characters may be allowed in the future. And so you are advised to + // use an + // internal label representation, such as JSON, which doesn't rely + // upon + // specific characters being disallowed. For example, representing + // labels + // as the string: name + "_" + value would prove problematic if we + // were to + // allow "_" in a future release. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Required. A unique identifier for the instance, which cannot be + // changed + // after the instance is created. Values are of the + // form + // `projects//instances/a-z*[a-z0-9]`. The final + // segment of the name must be between 6 and 30 characters in length. + Name string `json:"name,omitempty"` + + // NodeCount: Required. The number of nodes allocated to this instance. + NodeCount int64 `json:"nodeCount,omitempty"` + + // State: Output only. The current instance state. For + // CreateInstance, the state must be + // either omitted or set to `CREATING`. For + // UpdateInstance, the state must be + // either omitted or set to `READY`. + // + // Possible values: + // "STATE_UNSPECIFIED" - Not specified. + // "CREATING" - The instance is still being created. Resources may not + // be + // available yet, and operations such as database creation may not + // work. + // "READY" - The instance is fully created and ready to do work such + // as + // creating databases. + State string `json:"state,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Config") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Config") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Instance) MarshalJSON() ([]byte, error) { + type noMethod Instance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceConfig: A possible configuration for a Cloud Spanner +// instance. Configurations +// define the geographic placement of nodes and their replication. +type InstanceConfig struct { + // DisplayName: The name of this instance configuration as it appears in + // UIs. + DisplayName string `json:"displayName,omitempty"` + + // Name: A unique identifier for the instance configuration. Values + // are of the form + // `projects//instanceConfigs/a-z*` + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceConfig) MarshalJSON() ([]byte, error) { + type noMethod InstanceConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// KeyRange: KeyRange represents a range of rows in a table or index. +// +// A range has a start key and an end key. These keys can be open +// or +// closed, indicating if the range includes rows with that key. +// +// Keys are represented by lists, where the ith value in the +// list +// corresponds to the ith component of the table or index primary +// key. +// Individual values are encoded as described here. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10) +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// "Bob", "2014-09-23" +// +// Since the `UserEvents` table's `PRIMARY KEY` clause names +// two +// columns, each `UserEvents` key has two elements; the first is +// the +// `UserName`, and the second is the `EventDate`. +// +// Key ranges with multiple components are interpreted +// lexicographically by component using the table or index key's +// declared +// sort order. For example, the following range returns all events +// for +// user "Bob" that occurred in the year 2015: +// +// "start_closed": ["Bob", "2015-01-01"] +// "end_closed": ["Bob", "2015-12-31"] +// +// Start and end keys can omit trailing key components. This affects +// the +// inclusion and exclusion of rows that exactly match the provided +// key +// components: if the key is closed, then rows that exactly match +// the +// provided components are included; if the key is open, then rows +// that exactly match are not included. +// +// For example, the following range includes all events for "Bob" +// that +// occurred during and after the year 2000: +// +// "start_closed": ["Bob", "2000-01-01"] +// "end_closed": ["Bob"] +// +// The next example retrieves all events for "Bob": +// +// "start_closed": ["Bob"] +// "end_closed": ["Bob"] +// +// To retrieve events before the year 2000: +// +// "start_closed": ["Bob"] +// "end_open": ["Bob", "2000-01-01"] +// +// The following range includes all rows in the table: +// +// "start_closed": [] +// "end_closed": [] +// +// This range returns all users whose `UserName` begins with +// any +// character from A to C: +// +// "start_closed": ["A"] +// "end_open": ["D"] +// +// This range returns all users whose `UserName` begins with B: +// +// "start_closed": ["B"] +// "end_open": ["C"] +// +// Key ranges honor column sort order. For example, suppose a table +// is +// defined as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 +// and 100 inclusive: +// +// "start_closed": ["100"] +// "end_closed": ["1"] +// +// Note that 100 is passed as the start, and 1 is passed as the +// end, +// because `Key` is a descending column in the schema. +type KeyRange struct { + // EndClosed: If the end is closed, then the range includes all rows + // whose + // first `len(end_closed)` key columns exactly match `end_closed`. + EndClosed []interface{} `json:"endClosed,omitempty"` + + // EndOpen: If the end is open, then the range excludes rows whose + // first + // `len(end_open)` key columns exactly match `end_open`. + EndOpen []interface{} `json:"endOpen,omitempty"` + + // StartClosed: If the start is closed, then the range includes all rows + // whose + // first `len(start_closed)` key columns exactly match `start_closed`. + StartClosed []interface{} `json:"startClosed,omitempty"` + + // StartOpen: If the start is open, then the range excludes rows whose + // first + // `len(start_open)` key columns exactly match `start_open`. + StartOpen []interface{} `json:"startOpen,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndClosed") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndClosed") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *KeyRange) MarshalJSON() ([]byte, error) { + type noMethod KeyRange + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// KeySet: `KeySet` defines a collection of Cloud Spanner keys and/or +// key ranges. All +// the keys are expected to be in the same table or index. The keys +// need +// not be sorted in any particular way. +// +// If the same key is specified multiple times in the set (for +// example +// if two ranges, two keys, or a key and a range overlap), Cloud +// Spanner +// behaves as if the key were only specified once. +type KeySet struct { + // All: For convenience `all` can be set to `true` to indicate that + // this + // `KeySet` matches all keys in the table or index. Note that any + // keys + // specified in `keys` or `ranges` are only yielded once. + All bool `json:"all,omitempty"` + + // Keys: A list of specific keys. Entries in `keys` should have exactly + // as + // many elements as there are columns in the primary or index key + // with which this `KeySet` is used. Individual key values are + // encoded as described here. + Keys [][]interface{} `json:"keys,omitempty"` + + // Ranges: A list of key ranges. See KeyRange for more information + // about + // key range specifications. + Ranges []*KeyRange `json:"ranges,omitempty"` + + // ForceSendFields is a list of field names (e.g. "All") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "All") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *KeySet) MarshalJSON() ([]byte, error) { + type noMethod KeySet + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListDatabasesResponse: The response for ListDatabases. +type ListDatabasesResponse struct { + // Databases: Databases that matched the request. + Databases []*Database `json:"databases,omitempty"` + + // NextPageToken: `next_page_token` can be sent in a + // subsequent + // ListDatabases call to fetch more + // of the matching databases. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Databases") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Databases") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListDatabasesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListDatabasesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListInstanceConfigsResponse: The response for ListInstanceConfigs. +type ListInstanceConfigsResponse struct { + // InstanceConfigs: The list of requested instance configurations. + InstanceConfigs []*InstanceConfig `json:"instanceConfigs,omitempty"` + + // NextPageToken: `next_page_token` can be sent in a + // subsequent + // ListInstanceConfigs call to + // fetch more of the matching instance configurations. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "InstanceConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ListInstanceConfigsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListInstanceConfigsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListInstancesResponse: The response for ListInstances. +type ListInstancesResponse struct { + // Instances: The list of requested instances. + Instances []*Instance `json:"instances,omitempty"` + + // NextPageToken: `next_page_token` can be sent in a + // subsequent + // ListInstances call to fetch more + // of the matching instances. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListInstancesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListOperationsResponse: The response message for +// Operations.ListOperations. +type ListOperationsResponse struct { + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Operations: A list of operations that matches the specified filter in + // the request. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListOperationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LogConfig: Specifies what kind of log the caller must write +// Increment a streamz counter with the specified metric and field +// names. +// +// Metric names should start with a '/', generally be +// lowercase-only, +// and end in "_count". Field names should not contain an initial +// slash. +// The actual exported metric names will have "/iam/policy" +// prepended. +// +// Field names correspond to IAM request parameters and field values +// are +// their respective values. +// +// At present the only supported field names are +// - "iam_principal", corresponding to IAMContext.principal; +// - "" (empty string), resulting in one aggretated counter with no +// field. +// +// Examples: +// counter { metric: "/debug_access_count" field: "iam_principal" } +// ==> increment counter /iam/policy/backend_debug_access_count +// {iam_principal=[value of +// IAMContext.principal]} +// +// At this time we do not support: +// * multiple field names (though this may be supported in the future) +// * decrementing the counter +// * incrementing it by anything other than 1 +type LogConfig struct { + // CloudAudit: Cloud audit options. + CloudAudit *CloudAuditOptions `json:"cloudAudit,omitempty"` + + // Counter: Counter options. + Counter *CounterOptions `json:"counter,omitempty"` + + // DataAccess: Data access options. + DataAccess *DataAccessOptions `json:"dataAccess,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CloudAudit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CloudAudit") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogConfig) MarshalJSON() ([]byte, error) { + type noMethod LogConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Mutation: A modification to one or more Cloud Spanner rows. +// Mutations can be +// applied to a Cloud Spanner database by sending them in a +// Commit call. +type Mutation struct { + // Delete: Delete rows from a table. Succeeds whether or not the + // named + // rows were present. + Delete *Delete `json:"delete,omitempty"` + + // Insert: Insert new rows in a table. If any of the rows already + // exist, + // the write or transaction fails with error `ALREADY_EXISTS`. + Insert *Write `json:"insert,omitempty"` + + // InsertOrUpdate: Like insert, except that if the row already exists, + // then + // its column values are overwritten with the ones provided. Any + // column values not explicitly written are preserved. + InsertOrUpdate *Write `json:"insertOrUpdate,omitempty"` + + // Replace: Like insert, except that if the row already exists, it + // is + // deleted, and the column values provided are inserted + // instead. Unlike insert_or_update, this means any values + // not + // explicitly written become `NULL`. + Replace *Write `json:"replace,omitempty"` + + // Update: Update existing rows in a table. If any of the rows does + // not + // already exist, the transaction fails with error `NOT_FOUND`. + Update *Write `json:"update,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Delete") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Delete") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Mutation) MarshalJSON() ([]byte, error) { + type noMethod Mutation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Operation: This resource represents a long-running operation that is +// the result of a +// network API call. +type Operation struct { + // Done: If the value is `false`, it means the operation is still in + // progress. + // If true, the operation is completed, and either `error` or `response` + // is + // available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *Status `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. + // It typically + // contains progress information and common metadata such as create + // time. + // Some services might not provide such metadata. Any method that + // returns a + // long-running operation should document the metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that + // originally returns it. If you use the default HTTP mapping, + // the + // `name` should have the format of `operations/some/unique/name`. + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. + // If the original + // method returns no data on success, such as `Delete`, the response + // is + // `google.protobuf.Empty`. If the original method is + // standard + // `Get`/`Create`/`Update`, the response should be the resource. For + // other + // methods, the response should have the type `XxxResponse`, where + // `Xxx` + // is the original method name. For example, if the original method + // name + // is `TakeSnapshot()`, the inferred response type + // is + // `TakeSnapshotResponse`. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PartialResultSet: Partial results from a streaming read or SQL query. +// Streaming reads and +// SQL queries better tolerate large result sets, large rows, and +// large +// values, but are a little trickier to consume. +type PartialResultSet struct { + // ChunkedValue: If true, then the final value in values is chunked, and + // must + // be combined with more values from subsequent `PartialResultSet`s + // to obtain a complete field value. + ChunkedValue bool `json:"chunkedValue,omitempty"` + + // Metadata: Metadata about the result set, such as row type + // information. + // Only present in the first response. + Metadata *ResultSetMetadata `json:"metadata,omitempty"` + + // ResumeToken: Streaming calls might be interrupted for a variety of + // reasons, such + // as TCP connection loss. If this occurs, the stream of results can + // be resumed by re-sending the original request and + // including + // `resume_token`. Note that executing any other transaction in the + // same session invalidates the token. + ResumeToken string `json:"resumeToken,omitempty"` + + // Stats: Query plan and execution statistics for the query that + // produced this + // streaming result set. These can be requested by + // setting + // ExecuteSqlRequest.query_mode and are sent + // only once with the last response in the stream. + Stats *ResultSetStats `json:"stats,omitempty"` + + // Values: A streamed result set consists of a stream of values, which + // might + // be split into many `PartialResultSet` messages to accommodate + // large rows and/or large values. Every N complete values defines + // a + // row, where N is equal to the number of entries + // in + // metadata.row_type.fields. + // + // Most values are encoded based on type as described + // here. + // + // It is possible that the last value in values is "chunked", + // meaning that the rest of the value is sent in + // subsequent + // `PartialResultSet`(s). This is denoted by the chunked_value + // field. Two or more chunked values can be merged to form a + // complete value as follows: + // + // * `bool/number/null`: cannot be chunked + // * `string`: concatenate the strings + // * `list`: concatenate the lists. If the last element in a list is + // a + // `string`, `list`, or `object`, merge it with the first element + // in + // the next list by applying these rules recursively. + // * `object`: concatenate the (field name, field value) pairs. If a + // field name is duplicated, then apply these rules recursively + // to merge the field values. + // + // Some examples of merging: + // + // # Strings are concatenated. + // "foo", "bar" => "foobar" + // + // # Lists of non-strings are concatenated. + // [2, 3], [4] => [2, 3, 4] + // + // # Lists are concatenated, but the last and first elements are + // merged + // # because they are strings. + // ["a", "b"], ["c", "d"] => ["a", "bc", "d"] + // + // # Lists are concatenated, but the last and first elements are + // merged + // # because they are lists. Recursively, the last and first + // elements + // # of the inner lists are merged because they are strings. + // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] + // + // # Non-overlapping object fields are combined. + // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} + // + // # Overlapping object fields are merged. + // {"a": "1"}, {"a": "2"} => {"a": "12"} + // + // # Examples of merging objects containing lists of strings. + // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} + // + // For a more complete example, suppose a streaming SQL query + // is + // yielding a result set whose rows contain a single string + // field. The following `PartialResultSet`s might be yielded: + // + // { + // "metadata": { ... } + // "values": ["Hello", "W"] + // "chunked_value": true + // "resume_token": "Af65..." + // } + // { + // "values": ["orl"] + // "chunked_value": true + // "resume_token": "Bqp2..." + // } + // { + // "values": ["d"] + // "resume_token": "Zx1B..." + // } + // + // This sequence of `PartialResultSet`s encodes two rows, one + // containing the field value "Hello", and a second containing + // the + // field value "World" = "W" + "orl" + "d". + Values []interface{} `json:"values,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ChunkedValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChunkedValue") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PartialResultSet) MarshalJSON() ([]byte, error) { + type noMethod PartialResultSet + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PlanNode: Node information for nodes appearing in a +// QueryPlan.plan_nodes. +type PlanNode struct { + // ChildLinks: List of child node `index`es and their relationship to + // this parent. + ChildLinks []*ChildLink `json:"childLinks,omitempty"` + + // DisplayName: The display name for the node. + DisplayName string `json:"displayName,omitempty"` + + // ExecutionStats: The execution statistics associated with the node, + // contained in a group of + // key-value pairs. Only present if the plan was returned as a result of + // a + // profile query. For example, number of executions, number of rows/time + // per + // execution etc. + ExecutionStats googleapi.RawMessage `json:"executionStats,omitempty"` + + // Index: The `PlanNode`'s index in node list. + Index int64 `json:"index,omitempty"` + + // Kind: Used to determine the type of node. May be needed for + // visualizing + // different kinds of nodes differently. For example, If the node is + // a + // SCALAR node, it will have a condensed representation + // which can be used to directly embed a description of the node in + // its + // parent. + // + // Possible values: + // "KIND_UNSPECIFIED" - Not specified. + // "RELATIONAL" - Denotes a Relational operator node in the expression + // tree. Relational + // operators represent iterative processing of rows during query + // execution. + // For example, a `TableScan` operation that reads rows from a table. + // "SCALAR" - Denotes a Scalar node in the expression tree. Scalar + // nodes represent + // non-iterable entities in the query plan. For example, constants + // or + // arithmetic operators appearing inside predicate expressions or + // references + // to column names. + Kind string `json:"kind,omitempty"` + + // Metadata: Attributes relevant to the node contained in a group of + // key-value pairs. + // For example, a Parameter Reference node could have the + // following + // information in its metadata: + // + // { + // "parameter_reference": "param1", + // "parameter_type": "array" + // } + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // ShortRepresentation: Condensed representation for SCALAR nodes. + ShortRepresentation *ShortRepresentation `json:"shortRepresentation,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ChildLinks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChildLinks") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PlanNode) MarshalJSON() ([]byte, error) { + type noMethod PlanNode + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Policy: Defines an Identity and Access Management (IAM) policy. It is +// used to +// specify access control policies for Cloud Platform resources. +// +// +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list +// of +// `members` to a `role`, where the members can be user accounts, Google +// groups, +// Google domains, and service accounts. A `role` is a named list of +// permissions +// defined by IAM. +// +// **Example** +// +// { +// "bindings": [ +// { +// "role": "roles/owner", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", +// ] +// }, +// { +// "role": "roles/viewer", +// "members": ["user:sean@example.com"] +// } +// ] +// } +// +// For a description of IAM and its features, see the +// [IAM developer's guide](https://cloud.google.com/iam). +type Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` + + // Bindings: Associates a list of `members` to a `role`. + // Multiple `bindings` must not be specified for the same + // `role`. + // `bindings` with no members will result in an error. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: `etag` is used for optimistic concurrency control as a way to + // help + // prevent simultaneous updates of a policy from overwriting each + // other. + // It is strongly suggested that systems make use of the `etag` in + // the + // read-modify-write cycle to perform policy updates in order to avoid + // race + // conditions: An `etag` is returned in the response to `getIamPolicy`, + // and + // systems are expected to put that etag in the request to + // `setIamPolicy` to + // ensure that their change will be applied to the same version of the + // policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the + // existing + // policy is overwritten blindly. + Etag string `json:"etag,omitempty"` + + IamOwned bool `json:"iamOwned,omitempty"` + + // Rules: If more than one rule is specified, the rules are applied in + // the following + // manner: + // - All matching LOG rules are always applied. + // - If any DENY/DENY_WITH_LOG rule matches, permission is denied. + // Logging will be applied if one or more matching rule requires + // logging. + // - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is + // granted. + // Logging will be applied if one or more matching rule requires + // logging. + // - Otherwise, if no rule applies, permission is denied. + Rules []*Rule `json:"rules,omitempty"` + + // Version: Version of the `Policy`. The default version is 0. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Policy) MarshalJSON() ([]byte, error) { + type noMethod Policy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// QueryPlan: Contains an ordered list of nodes appearing in the query +// plan. +type QueryPlan struct { + // PlanNodes: The nodes in the query plan. Plan nodes are returned in + // pre-order starting + // with the plan root. Each PlanNode's `id` corresponds to its index + // in + // `plan_nodes`. + PlanNodes []*PlanNode `json:"planNodes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PlanNodes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PlanNodes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QueryPlan) MarshalJSON() ([]byte, error) { + type noMethod QueryPlan + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReadOnly: Options for read-only transactions. +type ReadOnly struct { + // ExactStaleness: Executes all reads at a timestamp that is + // `exact_staleness` + // old. The timestamp is chosen soon after the read is + // started. + // + // Guarantees that all writes that have committed more than + // the + // specified number of seconds ago are visible. Because Cloud + // Spanner + // chooses the exact timestamp, this mode works even if the + // client's + // local clock is substantially skewed from Cloud Spanner + // commit + // timestamps. + // + // Useful for reading at nearby replicas without the + // distributed + // timestamp negotiation overhead of `max_staleness`. + ExactStaleness string `json:"exactStaleness,omitempty"` + + // MaxStaleness: Read data at a timestamp >= `NOW - + // max_staleness` + // seconds. Guarantees that all writes that have committed more + // than the specified number of seconds ago are visible. Because + // Cloud Spanner chooses the exact timestamp, this mode works even + // if + // the client's local clock is substantially skewed from Cloud + // Spanner + // commit timestamps. + // + // Useful for reading the freshest data available at a nearby + // replica, while bounding the possible staleness if the local + // replica has fallen behind. + // + // Note that this option can only be used in single-use + // transactions. + MaxStaleness string `json:"maxStaleness,omitempty"` + + // MinReadTimestamp: Executes all reads at a timestamp >= + // `min_read_timestamp`. + // + // This is useful for requesting fresher data than some previous + // read, or data that is fresh enough to observe the effects of + // some + // previously committed transaction whose timestamp is known. + // + // Note that this option can only be used in single-use transactions. + MinReadTimestamp string `json:"minReadTimestamp,omitempty"` + + // ReadTimestamp: Executes all reads at the given timestamp. Unlike + // other modes, + // reads at a specific timestamp are repeatable; the same read at + // the same timestamp always returns the same data. If the + // timestamp is in the future, the read will block until the + // specified timestamp, modulo the read's deadline. + // + // Useful for large scale consistent reads such as mapreduces, or + // for coordinating many reads against a consistent snapshot of + // the + // data. + ReadTimestamp string `json:"readTimestamp,omitempty"` + + // ReturnReadTimestamp: If true, the Cloud Spanner-selected read + // timestamp is included in + // the Transaction message that describes the transaction. + ReturnReadTimestamp bool `json:"returnReadTimestamp,omitempty"` + + // Strong: Read at a timestamp where all previously committed + // transactions + // are visible. + Strong bool `json:"strong,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExactStaleness") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExactStaleness") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ReadOnly) MarshalJSON() ([]byte, error) { + type noMethod ReadOnly + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReadRequest: The request for Read and +// StreamingRead. +type ReadRequest struct { + // Columns: The columns of table to be returned for each row + // matching + // this request. + Columns []string `json:"columns,omitempty"` + + // Index: If non-empty, the name of an index on table. This index + // is + // used instead of the table primary key when interpreting key_set + // and sorting result rows. See key_set for further information. + Index string `json:"index,omitempty"` + + // KeySet: Required. `key_set` identifies the rows to be yielded. + // `key_set` names the + // primary keys of the rows in table to be yielded, unless index + // is present. If index is present, then key_set instead names + // index keys in index. + // + // Rows are yielded in table primary key order (if index is empty) + // or index key order (if index is non-empty). + // + // It is not an error for the `key_set` to name rows that do not + // exist in the database. Read yields nothing for nonexistent rows. + KeySet *KeySet `json:"keySet,omitempty"` + + // Limit: If greater than zero, only the first `limit` rows are yielded. + // If `limit` + // is zero, the default is no limit. + Limit int64 `json:"limit,omitempty,string"` + + // ResumeToken: If this request is resuming a previously interrupted + // read, + // `resume_token` should be copied from the last + // PartialResultSet yielded before the interruption. Doing this + // enables the new read to resume where the last read left off. The + // rest of the request parameters must exactly match the request + // that yielded this token. + ResumeToken string `json:"resumeToken,omitempty"` + + // Table: Required. The name of the table in the database to be read. + Table string `json:"table,omitempty"` + + // Transaction: The transaction to use. If none is provided, the default + // is a + // temporary read-only transaction with strong concurrency. + Transaction *TransactionSelector `json:"transaction,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Columns") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Columns") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReadRequest) MarshalJSON() ([]byte, error) { + type noMethod ReadRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReadWrite: Options for read-write transactions. +type ReadWrite struct { +} + +// ResultSet: Results from Read or +// ExecuteSql. +type ResultSet struct { + // Metadata: Metadata about the result set, such as row type + // information. + Metadata *ResultSetMetadata `json:"metadata,omitempty"` + + // Rows: Each element in `rows` is a row whose format is defined + // by + // metadata.row_type. The ith element + // in each row matches the ith field in + // metadata.row_type. Elements are + // encoded based on type as described + // here. + Rows [][]interface{} `json:"rows,omitempty"` + + // Stats: Query plan and execution statistics for the query that + // produced this + // result set. These can be requested by + // setting + // ExecuteSqlRequest.query_mode. + Stats *ResultSetStats `json:"stats,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Metadata") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Metadata") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResultSet) MarshalJSON() ([]byte, error) { + type noMethod ResultSet + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResultSetMetadata: Metadata about a ResultSet or PartialResultSet. +type ResultSetMetadata struct { + // RowType: Indicates the field names and types for the rows in the + // result + // set. For example, a SQL query like "SELECT UserId, UserName + // FROM + // Users" could return a `row_type` value like: + // + // "fields": [ + // { "name": "UserId", "type": { "code": "INT64" } }, + // { "name": "UserName", "type": { "code": "STRING" } }, + // ] + RowType *StructType `json:"rowType,omitempty"` + + // Transaction: If the read or SQL query began a transaction as a + // side-effect, the + // information about the new transaction is yielded here. + Transaction *Transaction `json:"transaction,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RowType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RowType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResultSetMetadata) MarshalJSON() ([]byte, error) { + type noMethod ResultSetMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResultSetStats: Additional statistics about a ResultSet or +// PartialResultSet. +type ResultSetStats struct { + // QueryPlan: QueryPlan for the query associated with this result. + QueryPlan *QueryPlan `json:"queryPlan,omitempty"` + + // QueryStats: Aggregated statistics from the execution of the query. + // Only present when + // the query is profiled. For example, a query could return the + // statistics as + // follows: + // + // { + // "rows_returned": "3", + // "elapsed_time": "1.22 secs", + // "cpu_time": "1.19 secs" + // } + QueryStats googleapi.RawMessage `json:"queryStats,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QueryPlan") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QueryPlan") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResultSetStats) MarshalJSON() ([]byte, error) { + type noMethod ResultSetStats + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RollbackRequest: The request for Rollback. +type RollbackRequest struct { + // TransactionId: Required. The transaction to roll back. + TransactionId string `json:"transactionId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TransactionId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TransactionId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RollbackRequest) MarshalJSON() ([]byte, error) { + type noMethod RollbackRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Rule: A rule to be applied in a Policy. +type Rule struct { + // Action: Required + // + // Possible values: + // "NO_ACTION" - Default no action. + // "ALLOW" - Matching 'Entries' grant access. + // "ALLOW_WITH_LOG" - Matching 'Entries' grant access and the caller + // promises to log + // the request per the returned log_configs. + // "DENY" - Matching 'Entries' deny access. + // "DENY_WITH_LOG" - Matching 'Entries' deny access and the caller + // promises to log + // the request per the returned log_configs. + // "LOG" - Matching 'Entries' tell IAM.Check callers to generate logs. + Action string `json:"action,omitempty"` + + // Conditions: Additional restrictions that must be met + Conditions []*Condition `json:"conditions,omitempty"` + + // Description: Human-readable description of the rule. + Description string `json:"description,omitempty"` + + // In: If one or more 'in' clauses are specified, the rule matches + // if + // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. + In []string `json:"in,omitempty"` + + // LogConfig: The config returned to callers of tech.iam.IAM.CheckPolicy + // for any entries + // that match the LOG action. + LogConfig []*LogConfig `json:"logConfig,omitempty"` + + // NotIn: If one or more 'not_in' clauses are specified, the rule + // matches + // if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries. + // The format for in and not_in entries is the same as for members in + // a + // Binding (see google/iam/v1/policy.proto). + NotIn []string `json:"notIn,omitempty"` + + // Permissions: A permission is a string of form '..' + // (e.g., 'storage.buckets.list'). A value of '*' matches all + // permissions, + // and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Rule) MarshalJSON() ([]byte, error) { + type noMethod Rule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Session: A session in the Cloud Spanner API. +type Session struct { + // Name: Required. The name of the session. + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Session) MarshalJSON() ([]byte, error) { + type noMethod Session + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SetIamPolicyRequest: Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + // Policy: REQUIRED: The complete policy to be applied to the + // `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as + // Projects) + // might reject them. + Policy *Policy `json:"policy,omitempty"` + + // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the + // policy to modify. Only + // the fields in the mask will be modified. If no mask is provided, a + // default + // mask is used: + // paths: "bindings, etag" + // This field is only used by Cloud IAM. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policy") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type noMethod SetIamPolicyRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ShortRepresentation: Condensed representation of a node and its +// subtree. Only present for +// `SCALAR` PlanNode(s). +type ShortRepresentation struct { + // Description: A string representation of the expression subtree rooted + // at this node. + Description string `json:"description,omitempty"` + + // Subqueries: A mapping of (subquery variable name) -> (subquery node + // id) for cases + // where the `description` string of this node references a + // `SCALAR` + // subquery contained in the expression subtree rooted at this node. + // The + // referenced `SCALAR` subquery may not necessarily be a direct child + // of + // this node. + Subqueries map[string]int64 `json:"subqueries,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ShortRepresentation) MarshalJSON() ([]byte, error) { + type noMethod ShortRepresentation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is +// suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of +// google.rpc.Code, but it may accept additional error codes if needed. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error +// details or +// localize it in the client. The optional error details may contain +// arbitrary +// information about the error. There is a predefined set of error +// detail types +// in the package `google.rpc` which can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it +// is not necessarily the actual wire format. When the `Status` message +// is +// exposed in different client libraries and different wire protocols, +// it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety +// of +// environments, either with or without APIs, to provide a +// consistent developer experience across different +// environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, +// it may embed the `Status` in the normal response to indicate the +// partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may +// have a `Status` message for error reporting purpose. +// +// - Batch operations. If a client uses batch request and batch +// response, the +// `Status` message should be used directly inside batch response, +// one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation +// results in its response, the status of those operations should +// be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could +// be used directly after any stripping needed for security/privacy +// reasons. +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There will + // be a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any + // user-facing error message should be localized and sent in + // the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type noMethod Status + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StructType: `StructType` defines the fields of a STRUCT type. +type StructType struct { + // Fields: The list of fields that make up this struct. Order + // is + // significant, because values of this struct type are represented + // as + // lists, where the order of field values matches the order of + // fields in the StructType. In turn, the order of fields + // matches the order of columns in a read request, or the order + // of + // fields in the `SELECT` clause of a query. + Fields []*Field `json:"fields,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fields") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fields") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StructType) MarshalJSON() ([]byte, error) { + type noMethod StructType + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsRequest: Request message for `TestIamPermissions` +// method. +type TestIamPermissionsRequest struct { + // Permissions: REQUIRED: The set of permissions to check for + // 'resource'. + // Permissions with wildcards (such as '*', 'spanner.*', + // 'spanner.instances.*') are not allowed. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { + type noMethod TestIamPermissionsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: Response message for `TestIamPermissions` +// method. +type TestIamPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type noMethod TestIamPermissionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Transaction: A transaction. +type Transaction struct { + // Id: `id` may be used to identify the transaction in + // subsequent + // Read, + // ExecuteSql, + // Commit, or + // Rollback calls. + // + // Single-use read-only transactions do not have IDs, because + // single-use transactions do not support multiple requests. + Id string `json:"id,omitempty"` + + // ReadTimestamp: For snapshot read-only transactions, the read + // timestamp chosen + // for the transaction. Not returned by default: + // see + // TransactionOptions.ReadOnly.return_read_timestamp. + ReadTimestamp string `json:"readTimestamp,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Transaction) MarshalJSON() ([]byte, error) { + type noMethod Transaction + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TransactionOptions: # Transactions +// +// +// Each session can have at most one active transaction at a time. After +// the +// active transaction is completed, the session can immediately +// be +// re-used for the next transaction. It is not necessary to create a +// new session for each transaction. +// +// # Transaction Modes +// +// Cloud Spanner supports two transaction modes: +// +// 1. Locking read-write. This type of transaction is the only way +// to write data into Cloud Spanner. These transactions rely on +// pessimistic locking and, if necessary, two-phase commit. +// Locking read-write transactions may abort, requiring the +// application to retry. +// +// 2. Snapshot read-only. This transaction type provides guaranteed +// consistency across several reads, but does not allow +// writes. Snapshot read-only transactions can be configured to +// read at timestamps in the past. Snapshot read-only +// transactions do not need to be committed. +// +// For transactions that only read, snapshot read-only +// transactions +// provide simpler semantics and are almost always faster. +// In +// particular, read-only transactions do not take locks, so they do +// not conflict with read-write transactions. As a consequence of +// not +// taking locks, they also do not abort, so retry loops are not +// needed. +// +// Transactions may only read/write data in a single database. They +// may, however, read/write data in different tables within +// that +// database. +// +// ## Locking Read-Write Transactions +// +// Locking transactions may be used to atomically read-modify-write +// data anywhere in a database. This type of transaction is +// externally +// consistent. +// +// Clients should attempt to minimize the amount of time a +// transaction +// is active. Faster transactions commit with higher probability +// and cause less contention. Cloud Spanner attempts to keep read +// locks +// active as long as the transaction continues to do reads, and +// the +// transaction has not been terminated by +// Commit or +// Rollback. Long periods of +// inactivity at the client may cause Cloud Spanner to release +// a +// transaction's locks and abort it. +// +// Reads performed within a transaction acquire locks on the data +// being read. Writes can only be done at commit time, after all +// reads +// have been completed. +// Conceptually, a read-write transaction consists of zero or more +// reads or SQL queries followed by +// Commit. At any time before +// Commit, the client can send a +// Rollback request to abort the +// transaction. +// +// ### Semantics +// +// Cloud Spanner can commit the transaction if all read locks it +// acquired +// are still valid at commit time, and it is able to acquire write +// locks for all writes. Cloud Spanner can abort the transaction for +// any +// reason. If a commit attempt returns `ABORTED`, Cloud Spanner +// guarantees +// that the transaction has not modified any user data in Cloud +// Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees +// about +// how long the transaction's locks were held for. It is an error to +// use Cloud Spanner locks for any sort of mutual exclusion other +// than +// between Cloud Spanner transactions themselves. +// +// ### Retrying Aborted Transactions +// +// When a transaction aborts, the application can choose to retry +// the +// whole transaction again. To maximize the chances of +// successfully +// committing the retry, the client should execute the retry in the +// same session as the original attempt. The original session's +// lock +// priority increases with each consecutive abort, meaning that +// each +// attempt has a slightly better chance of success than the +// previous. +// +// Under some circumstances (e.g., many transactions attempting +// to +// modify the same row(s)), a transaction can abort many times in +// a +// short period before successfully committing. Thus, it is not a +// good +// idea to cap the number of retries a transaction can attempt; +// instead, it is better to limit the total amount of wall time +// spent +// retrying. +// +// ### Idle Transactions +// +// A transaction is considered idle if it has no outstanding reads +// or +// SQL queries and has not started a read or SQL query within the last +// 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that +// they +// don't hold on to locks indefinitely. In that case, the commit +// will +// fail with error `ABORTED`. +// +// If this behavior is undesirable, periodically executing a simple +// SQL query in the transaction (e.g., `SELECT 1`) prevents +// the +// transaction from becoming idle. +// +// ## Snapshot Read-Only Transactions +// +// Snapshot read-only transactions provides a simpler method +// than +// locking read-write transactions for doing several consistent +// reads. However, this type of transaction does not support +// writes. +// +// Snapshot transactions do not take locks. Instead, they work +// by +// choosing a Cloud Spanner timestamp, then executing all reads at +// that +// timestamp. Since they do not acquire locks, they do not +// block +// concurrent read-write transactions. +// +// Unlike locking read-write transactions, snapshot +// read-only +// transactions never abort. They can fail if the chosen read +// timestamp is garbage collected; however, the default +// garbage +// collection policy is generous enough that most applications do +// not +// need to worry about this in practice. +// +// Snapshot read-only transactions do not need to call +// Commit or +// Rollback (and in fact are not +// permitted to do so). +// +// To execute a snapshot transaction, the client specifies a +// timestamp +// bound, which tells Cloud Spanner how to choose a read timestamp. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically +// distributed, +// stale read-only transactions can execute more quickly than strong +// or read-write transaction, because they are able to execute far +// from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. +// +// ### Strong +// +// Strong reads are guaranteed to see the effects of all +// transactions +// that have committed before the start of the read. Furthermore, +// all +// rows yielded by a single read are consistent with each other -- +// if +// any part of the read observes a transaction, all parts of the +// read +// see the transaction. +// +// Strong reads are not repeatable: two consecutive strong +// read-only +// transactions might return inconsistent results if there +// are +// concurrent writes. If consistency across reads is required, the +// reads should be executed within a transaction or at an exact +// read +// timestamp. +// +// See TransactionOptions.ReadOnly.strong. +// +// ### Exact Staleness +// +// These timestamp bounds execute reads at a user-specified +// timestamp. Reads at a timestamp are guaranteed to see a +// consistent +// prefix of the global transaction history: they observe +// modifications done by all transactions with a commit timestamp <= +// the read timestamp, and observe none of the modifications done +// by +// transactions with a larger commit timestamp. They will block +// until +// all conflicting transactions that may be assigned commit +// timestamps +// <= the read timestamp have finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner +// commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a +// timestamp. As a result, they execute slightly faster than +// the +// equivalent boundedly stale concurrency modes. On the other +// hand, +// boundedly stale reads usually return fresher results. +// +// See TransactionOptions.ReadOnly.read_timestamp +// and +// TransactionOptions.ReadOnly.exact_staleness. +// +// ### Bounded Staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read +// timestamp, +// subject to a user-provided staleness bound. Cloud Spanner chooses +// the +// newest timestamp within the staleness bound that allows execution +// of the reads at the closest available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of +// the read observes a transaction, all parts of the read see +// the +// transaction. Boundedly stale reads are not repeatable: two +// stale +// reads, even if they use the same staleness bound, can execute +// at +// different timestamps and thus return inconsistent results. +// +// Boundedly stale reads execute in two phases: the first +// phase +// negotiates a timestamp among all replicas needed to serve the +// read. In the second phase, reads are executed at the +// negotiated +// timestamp. +// +// As a result of the two phase execution, bounded staleness reads +// are +// usually a little slower than comparable exact staleness +// reads. However, they are typically able to return fresher +// results, and are more likely to execute at the closest +// replica. +// +// Because the timestamp negotiation requires up-front knowledge +// of +// which rows will be read, it can only be used with +// single-use +// read-only transactions. +// +// See TransactionOptions.ReadOnly.max_staleness +// and +// TransactionOptions.ReadOnly.min_read_timestamp. +// +// ### Old Read Timestamps and Garbage Collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten +// data +// in the background to reclaim storage space. This process is known +// as "version GC". By default, version GC reclaims versions after +// they +// are one hour old. Because of this, Cloud Spanner cannot perform +// reads +// at read timestamps more than one hour in the past. This +// restriction also applies to in-progress reads and/or SQL queries +// whose +// timestamp become too old while executing. Reads and SQL queries +// with +// too-old read timestamps fail with the error `FAILED_PRECONDITION`. +type TransactionOptions struct { + // ReadOnly: Transaction will not write. + // + // Authorization to begin a read-only transaction + // requires + // `spanner.databases.beginReadOnlyTransaction` permission + // on the `session` resource. + ReadOnly *ReadOnly `json:"readOnly,omitempty"` + + // ReadWrite: Transaction may write. + // + // Authorization to begin a read-write transaction + // requires + // `spanner.databases.beginOrRollbackReadWriteTransaction` permission + // on the `session` resource. + ReadWrite *ReadWrite `json:"readWrite,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ReadOnly") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ReadOnly") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TransactionOptions) MarshalJSON() ([]byte, error) { + type noMethod TransactionOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TransactionSelector: This message is used to select the transaction +// in which a +// Read or +// ExecuteSql call runs. +// +// See TransactionOptions for more information about transactions. +type TransactionSelector struct { + // Begin: Begin a new transaction and execute this read or SQL query + // in + // it. The transaction ID of the new transaction is returned + // in + // ResultSetMetadata.transaction, which is a Transaction. + Begin *TransactionOptions `json:"begin,omitempty"` + + // Id: Execute the read or SQL query in a previously-started + // transaction. + Id string `json:"id,omitempty"` + + // SingleUse: Execute the read or SQL query in a temporary + // transaction. + // This is the most efficient way to execute a transaction that + // consists of a single SQL query. + SingleUse *TransactionOptions `json:"singleUse,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Begin") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Begin") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TransactionSelector) MarshalJSON() ([]byte, error) { + type noMethod TransactionSelector + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Type: `Type` indicates the type of a Cloud Spanner value, as might be +// stored in a +// table cell or returned from an SQL query. +type Type struct { + // ArrayElementType: If code == ARRAY, then `array_element_type` + // is the type of the array elements. + ArrayElementType *Type `json:"arrayElementType,omitempty"` + + // Code: Required. The TypeCode for this type. + // + // Possible values: + // "TYPE_CODE_UNSPECIFIED" - Not specified. + // "BOOL" - Encoded as JSON `true` or `false`. + // "INT64" - Encoded as `string`, in decimal format. + // "FLOAT64" - Encoded as `number`, or the strings "NaN", + // "Infinity", or + // "-Infinity". + // "TIMESTAMP" - Encoded as `string` in RFC 3339 timestamp format. The + // time zone + // must be present, and must be "Z". + // "DATE" - Encoded as `string` in RFC 3339 date format. + // "STRING" - Encoded as `string`. + // "BYTES" - Encoded as a base64-encoded `string`, as described in RFC + // 4648, + // section 4. + // "ARRAY" - Encoded as `list`, where the list elements are + // represented + // according to array_element_type. + // "STRUCT" - Encoded as `list`, where list element `i` is represented + // according + // to [struct_type.fields[i]][google.spanner.v1.StructType.fields]. + Code string `json:"code,omitempty"` + + // StructType: If code == STRUCT, then `struct_type` + // provides type information for the struct's fields. + StructType *StructType `json:"structType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ArrayElementType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ArrayElementType") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Type) MarshalJSON() ([]byte, error) { + type noMethod Type + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UpdateDatabaseDdlMetadata: Metadata type for the operation returned +// by +// UpdateDatabaseDdl. +type UpdateDatabaseDdlMetadata struct { + // CommitTimestamps: Reports the commit timestamps of all statements + // that have + // succeeded so far, where `commit_timestamps[i]` is the + // commit + // timestamp for the statement `statements[i]`. + CommitTimestamps []string `json:"commitTimestamps,omitempty"` + + // Database: The database being modified. + Database string `json:"database,omitempty"` + + // Statements: For an update this list contains all the statements. For + // an + // individual statement, this list contains only that statement. + Statements []string `json:"statements,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CommitTimestamps") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommitTimestamps") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UpdateDatabaseDdlMetadata) MarshalJSON() ([]byte, error) { + type noMethod UpdateDatabaseDdlMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UpdateDatabaseDdlRequest: Enqueues the given DDL statements to be +// applied, in order but not +// necessarily all at once, to the database schema at some point +// (or +// points) in the future. The server checks that the statements +// are executable (syntactically valid, name tables that exist, +// etc.) +// before enqueueing them, but they may still fail upon +// later execution (e.g., if a statement from another batch +// of +// statements is applied first and it conflicts in some way, or if +// there is some data-related problem like a `NULL` value in a column +// to +// which `NOT NULL` would be added). If a statement fails, +// all +// subsequent statements in the batch are automatically cancelled. +// +// Each batch of statements is assigned a name which can be used +// with +// the Operations API to monitor +// progress. See the +// operation_id field for more +// details. +type UpdateDatabaseDdlRequest struct { + // OperationId: If empty, the new update request is assigned + // an + // automatically-generated operation ID. Otherwise, `operation_id` + // is used to construct the name of the resulting + // Operation. + // + // Specifying an explicit operation ID simplifies determining + // whether the statements were executed in the event that + // the + // UpdateDatabaseDdl call is replayed, + // or the return value is otherwise lost: the database + // and + // `operation_id` fields can be combined to form the + // name of the resulting + // longrunning.Operation: + // `/operations/`. + // + // `operation_id` should be unique within the database, and must be + // a valid identifier: `a-z*`. Note that + // automatically-generated operation IDs always begin with + // an + // underscore. If the named operation already exists, + // UpdateDatabaseDdl returns + // `ALREADY_EXISTS`. + OperationId string `json:"operationId,omitempty"` + + // Statements: DDL statements to be applied to the database. + Statements []string `json:"statements,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OperationId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OperationId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateDatabaseDdlRequest) MarshalJSON() ([]byte, error) { + type noMethod UpdateDatabaseDdlRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UpdateInstanceMetadata: Metadata type for the operation returned +// by +// UpdateInstance. +type UpdateInstanceMetadata struct { + // CancelTime: The time at which this operation was cancelled. If set, + // this operation is + // in the process of undoing itself (which is guaranteed to succeed) + // and + // cannot be cancelled again. + CancelTime string `json:"cancelTime,omitempty"` + + // EndTime: The time at which this operation failed or was completed + // successfully. + EndTime string `json:"endTime,omitempty"` + + // Instance: The desired end state of the update. + Instance *Instance `json:"instance,omitempty"` + + // StartTime: The time at which UpdateInstance + // request was received. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CancelTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CancelTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateInstanceMetadata) MarshalJSON() ([]byte, error) { + type noMethod UpdateInstanceMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UpdateInstanceRequest: The request for UpdateInstance. +type UpdateInstanceRequest struct { + // FieldMask: Required. A mask specifying which fields in + // [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] + // should be updated. + // The field mask must always be specified; this prevents any future + // fields in + // [][google.spanner.admin.instance.v1.Instance] from being erased + // accidentally by clients that do not know + // about them. + FieldMask string `json:"fieldMask,omitempty"` + + // Instance: Required. The instance to update, which must always include + // the instance + // name. Otherwise, only fields mentioned in + // [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] + // need be included. + Instance *Instance `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FieldMask") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FieldMask") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod UpdateInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Write: Arguments to insert, update, insert_or_update, and +// replace operations. +type Write struct { + // Columns: The names of the columns in table to be written. + // + // The list of columns must contain enough columns to allow + // Cloud Spanner to derive values for all primary key columns in + // the + // row(s) to be modified. + Columns []string `json:"columns,omitempty"` + + // Table: Required. The table whose rows will be written. + Table string `json:"table,omitempty"` + + // Values: The values to be written. `values` can contain more than + // one + // list of values. If it does, then multiple rows are written, one + // for each entry in `values`. Each list in `values` must have + // exactly as many entries as there are entries in columns + // above. Sending multiple lists is equivalent to sending + // multiple + // `Mutation`s, each containing one `values` entry and repeating + // table and columns. Individual values in each list are + // encoded as described here. + Values [][]interface{} `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Columns") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Columns") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Write) MarshalJSON() ([]byte, error) { + type noMethod Write + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "spanner.projects.instanceConfigs.get": + +type ProjectsInstanceConfigsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about a particular instance configuration. +func (r *ProjectsInstanceConfigsService) Get(name string) *ProjectsInstanceConfigsGetCall { + c := &ProjectsInstanceConfigsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstanceConfigsGetCall) Fields(s ...googleapi.Field) *ProjectsInstanceConfigsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstanceConfigsGetCall) IfNoneMatch(entityTag string) *ProjectsInstanceConfigsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstanceConfigsGetCall) Context(ctx context.Context) *ProjectsInstanceConfigsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstanceConfigsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstanceConfigsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instanceConfigs.get" call. +// Exactly one of *InstanceConfig or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceConfig.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstanceConfigsGetCall) Do(opts ...googleapi.CallOption) (*InstanceConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about a particular instance configuration.", + // "flatPath": "v1/projects/{projectsId}/instanceConfigs/{instanceConfigsId}", + // "httpMethod": "GET", + // "id": "spanner.projects.instanceConfigs.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the requested instance configuration. Values are of\nthe form `projects/\u003cproject\u003e/instanceConfigs/\u003cconfig\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instanceConfigs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "InstanceConfig" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instanceConfigs.list": + +type ProjectsInstanceConfigsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the supported instance configurations for a given +// project. +func (r *ProjectsInstanceConfigsService) List(parent string) *ProjectsInstanceConfigsListCall { + c := &ProjectsInstanceConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Number of instance +// configurations to be returned in the response. If 0 or +// less, defaults to the server's maximum allowed page size. +func (c *ProjectsInstanceConfigsListCall) PageSize(pageSize int64) *ProjectsInstanceConfigsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If non-empty, +// `page_token` should contain a +// next_page_token +// from a previous ListInstanceConfigsResponse. +func (c *ProjectsInstanceConfigsListCall) PageToken(pageToken string) *ProjectsInstanceConfigsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstanceConfigsListCall) Fields(s ...googleapi.Field) *ProjectsInstanceConfigsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstanceConfigsListCall) IfNoneMatch(entityTag string) *ProjectsInstanceConfigsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstanceConfigsListCall) Context(ctx context.Context) *ProjectsInstanceConfigsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstanceConfigsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstanceConfigsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/instanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instanceConfigs.list" call. +// Exactly one of *ListInstanceConfigsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListInstanceConfigsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstanceConfigsListCall) Do(opts ...googleapi.CallOption) (*ListInstanceConfigsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListInstanceConfigsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the supported instance configurations for a given project.", + // "flatPath": "v1/projects/{projectsId}/instanceConfigs", + // "httpMethod": "GET", + // "id": "spanner.projects.instanceConfigs.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Number of instance configurations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListInstanceConfigsResponse.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The name of the project for which a list of supported instance\nconfigurations is requested. Values are of the form\n`projects/\u003cproject\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/instanceConfigs", + // "response": { + // "$ref": "ListInstanceConfigsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstanceConfigsListCall) Pages(ctx context.Context, f func(*ListInstanceConfigsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "spanner.projects.instances.create": + +type ProjectsInstancesCreateCall struct { + s *Service + parent string + createinstancerequest *CreateInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an instance and begins preparing it to begin serving. +// The +// returned long-running operation +// can be used to track the progress of preparing the new +// instance. The instance name is assigned by the caller. If the +// named instance already exists, `CreateInstance` +// returns +// `ALREADY_EXISTS`. +// +// Immediately upon completion of this request: +// +// * The instance is readable via the API, with all requested +// attributes +// but no allocated resources. Its state is `CREATING`. +// +// Until completion of the returned operation: +// +// * Cancelling the operation renders the instance immediately +// unreadable +// via the API. +// * The instance can be deleted. +// * All other attempts to modify the instance are rejected. +// +// Upon completion of the returned operation: +// +// * Billing for all successfully-allocated resources begins (some +// types +// may have lower than the requested levels). +// * Databases can be created in the instance. +// * The instance's allocated resource levels are readable via the +// API. +// * The instance's state becomes `READY`. +// +// The returned long-running operation will +// have a name of the format `/operations/` +// and +// can be used to track creation of the instance. The +// metadata field type is +// CreateInstanceMetadata. +// The response field type is +// Instance, if successful. +func (r *ProjectsInstancesService) Create(parent string, createinstancerequest *CreateInstanceRequest) *ProjectsInstancesCreateCall { + c := &ProjectsInstancesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.createinstancerequest = createinstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesCreateCall) Fields(s ...googleapi.Field) *ProjectsInstancesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesCreateCall) Context(ctx context.Context) *ProjectsInstancesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createinstancerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance and begins preparing it to begin serving. The\nreturned long-running operation\ncan be used to track the progress of preparing the new\ninstance. The instance name is assigned by the caller. If the\nnamed instance already exists, `CreateInstance` returns\n`ALREADY_EXISTS`.\n\nImmediately upon completion of this request:\n\n * The instance is readable via the API, with all requested attributes\n but no allocated resources. Its state is `CREATING`.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation renders the instance immediately unreadable\n via the API.\n * The instance can be deleted.\n * All other attempts to modify the instance are rejected.\n\nUpon completion of the returned operation:\n\n * Billing for all successfully-allocated resources begins (some types\n may have lower than the requested levels).\n * Databases can be created in the instance.\n * The instance's allocated resource levels are readable via the API.\n * The instance's state becomes `READY`.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track creation of the instance. The\nmetadata field type is\nCreateInstanceMetadata.\nThe response field type is\nInstance, if successful.", + // "flatPath": "v1/projects/{projectsId}/instances", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the project in which to create the instance. Values\nare of the form `projects/\u003cproject\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/instances", + // "request": { + // "$ref": "CreateInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.delete": + +type ProjectsInstancesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an instance. +// +// Immediately upon completion of the request: +// +// * Billing ceases for all of the instance's reserved +// resources. +// +// Soon afterward: +// +// * The instance and *all of its databases* immediately and +// irrevocably disappear from the API. All data in the databases +// is permanently deleted. +func (r *ProjectsInstancesService) Delete(name string) *ProjectsInstancesDeleteCall { + c := &ProjectsInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDeleteCall) Fields(s ...googleapi.Field) *ProjectsInstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDeleteCall) Context(ctx context.Context) *ProjectsInstancesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an instance.\n\nImmediately upon completion of the request:\n\n * Billing ceases for all of the instance's reserved resources.\n\nSoon afterward:\n\n * The instance and *all of its databases* immediately and\n irrevocably disappear from the API. All data in the databases\n is permanently deleted.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", + // "httpMethod": "DELETE", + // "id": "spanner.projects.instances.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the instance to be deleted. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.get": + +type ProjectsInstancesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about a particular instance. +func (r *ProjectsInstancesService) Get(name string) *ProjectsInstancesGetCall { + c := &ProjectsInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesGetCall) Context(ctx context.Context) *ProjectsInstancesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Instance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about a particular instance.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the requested instance. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Instance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.getIamPolicy": + +type ProjectsInstancesGetIamPolicyCall struct { + s *Service + resource string + getiampolicyrequest *GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for an instance +// resource. Returns an empty +// policy if an instance exists but does not have a policy +// set. +// +// Authorization requires `spanner.instances.getIamPolicy` on +// resource. +func (r *ProjectsInstancesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesGetIamPolicyCall { + c := &ProjectsInstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.getiampolicyrequest = getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesGetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for an instance resource. Returns an empty\npolicy if an instance exists but does not have a policy set.\n\nAuthorization requires `spanner.instances.getIamPolicy` on\nresource.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:getIamPolicy", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:getIamPolicy", + // "request": { + // "$ref": "GetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.list": + +type ProjectsInstancesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all instances in the given project. +func (r *ProjectsInstancesService) List(parent string) *ProjectsInstancesListCall { + c := &ProjectsInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": An expression for +// filtering the results of the request. Filter rules are +// case insensitive. The fields eligible for filtering are: +// +// * name +// * display_name +// * labels.key where key is the name of a label +// +// Some examples of using filters are: +// +// * name:* --> The instance has a name. +// * name:Howl --> The instance's name contains the string "howl". +// * name:HOWL --> Equivalent to above. +// * NAME:howl --> Equivalent to above. +// * labels.env:* --> The instance has the label "env". +// * labels.env:dev --> The instance has the label "env" and the value +// of +// the label contains the string "dev". +// * name:howl labels.env:dev --> The instance's name contains "howl" +// and +// it has the label "env" with its +// value +// containing "dev". +func (c *ProjectsInstancesListCall) Filter(filter string) *ProjectsInstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": Number of instances +// to be returned in the response. If 0 or less, defaults +// to the server's maximum allowed page size. +func (c *ProjectsInstancesListCall) PageSize(pageSize int64) *ProjectsInstancesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If non-empty, +// `page_token` should contain a +// next_page_token from a +// previous ListInstancesResponse. +func (c *ProjectsInstancesListCall) PageToken(pageToken string) *ProjectsInstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesListCall) Fields(s ...googleapi.Field) *ProjectsInstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesListCall) IfNoneMatch(entityTag string) *ProjectsInstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesListCall) Context(ctx context.Context) *ProjectsInstancesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.list" call. +// Exactly one of *ListInstancesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListInstancesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesListCall) Do(opts ...googleapi.CallOption) (*ListInstancesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListInstancesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all instances in the given project.", + // "flatPath": "v1/projects/{projectsId}/instances", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n * name\n * display_name\n * labels.key where key is the name of a label\n\nSome examples of using filters are:\n\n * name:* --\u003e The instance has a name.\n * name:Howl --\u003e The instance's name contains the string \"howl\".\n * name:HOWL --\u003e Equivalent to above.\n * NAME:howl --\u003e Equivalent to above.\n * labels.env:* --\u003e The instance has the label \"env\".\n * labels.env:dev --\u003e The instance has the label \"env\" and the value of\n the label contains the string \"dev\".\n * name:howl labels.env:dev --\u003e The instance's name contains \"howl\" and\n it has the label \"env\" with its value\n containing \"dev\".", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Number of instances to be returned in the response. If 0 or less, defaults\nto the server's maximum allowed page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListInstancesResponse.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The name of the project for which a list of instances is\nrequested. Values are of the form `projects/\u003cproject\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/instances", + // "response": { + // "$ref": "ListInstancesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstancesListCall) Pages(ctx context.Context, f func(*ListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "spanner.projects.instances.patch": + +type ProjectsInstancesPatchCall struct { + s *Service + nameid string + updateinstancerequest *UpdateInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an instance, and begins allocating or releasing +// resources +// as requested. The returned long-running +// operation can be used to track the +// progress of updating the instance. If the named instance does +// not +// exist, returns `NOT_FOUND`. +// +// Immediately upon completion of this request: +// +// * For resource types for which a decrease in the instance's +// allocation +// has been requested, billing is based on the newly-requested +// level. +// +// Until completion of the returned operation: +// +// * Cancelling the operation sets its metadata's +// cancel_time, and begins +// restoring resources to their pre-request values. The operation +// is guaranteed to succeed at undoing all resource changes, +// after which point it terminates with a `CANCELLED` status. +// * All other attempts to modify the instance are rejected. +// * Reading the instance via the API continues to give the +// pre-request +// resource levels. +// +// Upon completion of the returned operation: +// +// * Billing begins for all successfully-allocated resources (some +// types +// may have lower than the requested levels). +// * All newly-reserved resources are available for serving the +// instance's +// tables. +// * The instance's new resource levels are readable via the API. +// +// The returned long-running operation will +// have a name of the format `/operations/` +// and +// can be used to track the instance modification. The +// metadata field type is +// UpdateInstanceMetadata. +// The response field type is +// Instance, if successful. +// +// Authorization requires `spanner.instances.update` permission +// on +// resource name. +func (r *ProjectsInstancesService) Patch(nameid string, updateinstancerequest *UpdateInstanceRequest) *ProjectsInstancesPatchCall { + c := &ProjectsInstancesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.updateinstancerequest = updateinstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesPatchCall) Fields(s ...googleapi.Field) *ProjectsInstancesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesPatchCall) Context(ctx context.Context) *ProjectsInstancesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateinstancerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an instance, and begins allocating or releasing resources\nas requested. The returned long-running\noperation can be used to track the\nprogress of updating the instance. If the named instance does not\nexist, returns `NOT_FOUND`.\n\nImmediately upon completion of this request:\n\n * For resource types for which a decrease in the instance's allocation\n has been requested, billing is based on the newly-requested level.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation sets its metadata's\n cancel_time, and begins\n restoring resources to their pre-request values. The operation\n is guaranteed to succeed at undoing all resource changes,\n after which point it terminates with a `CANCELLED` status.\n * All other attempts to modify the instance are rejected.\n * Reading the instance via the API continues to give the pre-request\n resource levels.\n\nUpon completion of the returned operation:\n\n * Billing begins for all successfully-allocated resources (some types\n may have lower than the requested levels).\n * All newly-reserved resources are available for serving the instance's\n tables.\n * The instance's new resource levels are readable via the API.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track the instance modification. The\nmetadata field type is\nUpdateInstanceMetadata.\nThe response field type is\nInstance, if successful.\n\nAuthorization requires `spanner.instances.update` permission on\nresource name.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", + // "httpMethod": "PATCH", + // "id": "spanner.projects.instances.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. A unique identifier for the instance, which cannot be changed\nafter the instance is created. Values are of the form\n`projects/\u003cproject\u003e/instances/a-z*[a-z0-9]`. The final\nsegment of the name must be between 6 and 30 characters in length.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "UpdateInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.setIamPolicy": + +type ProjectsInstancesSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on an instance resource. +// Replaces any +// existing policy. +// +// Authorization requires `spanner.instances.setIamPolicy` on +// resource. +func (r *ProjectsInstancesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesSetIamPolicyCall { + c := &ProjectsInstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesSetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on an instance resource. Replaces any\nexisting policy.\n\nAuthorization requires `spanner.instances.setIamPolicy` on\nresource.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:setIamPolicy", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:setIamPolicy", + // "request": { + // "$ref": "SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.testIamPermissions": + +type ProjectsInstancesTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that the caller has on the +// specified instance resource. +// +// Attempting this RPC on a non-existent Cloud Spanner instance resource +// will +// result in a NOT_FOUND error if the user has +// `spanner.instances.list` +// permission on the containing Google Cloud Project. Otherwise returns +// an +// empty set of permissions. +func (r *ProjectsInstancesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsInstancesTestIamPermissionsCall { + c := &ProjectsInstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsInstancesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesTestIamPermissionsCall) Context(ctx context.Context) *ProjectsInstancesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that the caller has on the specified instance resource.\n\nAttempting this RPC on a non-existent Cloud Spanner instance resource will\nresult in a NOT_FOUND error if the user has `spanner.instances.list`\npermission on the containing Google Cloud Project. Otherwise returns an\nempty set of permissions.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:testIamPermissions", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.create": + +type ProjectsInstancesDatabasesCreateCall struct { + s *Service + parent string + createdatabaserequest *CreateDatabaseRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new Cloud Spanner database and starts to prepare it +// for serving. +// The returned long-running operation will +// have a name of the format `/operations/` +// and +// can be used to track preparation of the database. The +// metadata field type is +// CreateDatabaseMetadata. The +// response field type is +// Database, if successful. +func (r *ProjectsInstancesDatabasesService) Create(parent string, createdatabaserequest *CreateDatabaseRequest) *ProjectsInstancesDatabasesCreateCall { + c := &ProjectsInstancesDatabasesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.createdatabaserequest = createdatabaserequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesCreateCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesCreateCall) Context(ctx context.Context) *ProjectsInstancesDatabasesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createdatabaserequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/databases") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new Cloud Spanner database and starts to prepare it for serving.\nThe returned long-running operation will\nhave a name of the format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track preparation of the database. The\nmetadata field type is\nCreateDatabaseMetadata. The\nresponse field type is\nDatabase, if successful.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the instance that will serve the new database.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/databases", + // "request": { + // "$ref": "CreateDatabaseRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.dropDatabase": + +type ProjectsInstancesDatabasesDropDatabaseCall struct { + s *Service + database string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DropDatabase: Drops (aka deletes) a Cloud Spanner database. +func (r *ProjectsInstancesDatabasesService) DropDatabase(database string) *ProjectsInstancesDatabasesDropDatabaseCall { + c := &ProjectsInstancesDatabasesDropDatabaseCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.database = database + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesDropDatabaseCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesDropDatabaseCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesDropDatabaseCall) Context(ctx context.Context) *ProjectsInstancesDatabasesDropDatabaseCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesDropDatabaseCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesDropDatabaseCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+database}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "database": c.database, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.dropDatabase" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesDropDatabaseCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Drops (aka deletes) a Cloud Spanner database.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}", + // "httpMethod": "DELETE", + // "id": "spanner.projects.instances.databases.dropDatabase", + // "parameterOrder": [ + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Required. The database to be dropped.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+database}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.get": + +type ProjectsInstancesDatabasesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the state of a Cloud Spanner database. +func (r *ProjectsInstancesDatabasesService) Get(name string) *ProjectsInstancesDatabasesGetCall { + c := &ProjectsInstancesDatabasesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesDatabasesGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesGetCall) Context(ctx context.Context) *ProjectsInstancesDatabasesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.get" call. +// Exactly one of *Database or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Database.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesGetCall) Do(opts ...googleapi.CallOption) (*Database, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Database{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the state of a Cloud Spanner database.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.databases.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the requested database. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Database" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.getDdl": + +type ProjectsInstancesDatabasesGetDdlCall struct { + s *Service + database string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetDdl: Returns the schema of a Cloud Spanner database as a list of +// formatted +// DDL statements. This method does not show pending schema updates, +// those may +// be queried using the Operations API. +func (r *ProjectsInstancesDatabasesService) GetDdl(database string) *ProjectsInstancesDatabasesGetDdlCall { + c := &ProjectsInstancesDatabasesGetDdlCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.database = database + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesGetDdlCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesGetDdlCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesDatabasesGetDdlCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesGetDdlCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesGetDdlCall) Context(ctx context.Context) *ProjectsInstancesDatabasesGetDdlCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesGetDdlCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesGetDdlCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+database}/ddl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "database": c.database, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.getDdl" call. +// Exactly one of *GetDatabaseDdlResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GetDatabaseDdlResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesGetDdlCall) Do(opts ...googleapi.CallOption) (*GetDatabaseDdlResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GetDatabaseDdlResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the schema of a Cloud Spanner database as a list of formatted\nDDL statements. This method does not show pending schema updates, those may\nbe queried using the Operations API.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.databases.getDdl", + // "parameterOrder": [ + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Required. The database whose schema we wish to get.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+database}/ddl", + // "response": { + // "$ref": "GetDatabaseDdlResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.getIamPolicy": + +type ProjectsInstancesDatabasesGetIamPolicyCall struct { + s *Service + resource string + getiampolicyrequest *GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a database resource. +// Returns an empty +// policy if a database exists but does not have a policy +// set. +// +// Authorization requires `spanner.databases.getIamPolicy` permission +// on +// resource. +func (r *ProjectsInstancesDatabasesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesDatabasesGetIamPolicyCall { + c := &ProjectsInstancesDatabasesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.getiampolicyrequest = getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesGetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesDatabasesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a database resource. Returns an empty\npolicy if a database exists but does not have a policy set.\n\nAuthorization requires `spanner.databases.getIamPolicy` permission on\nresource.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:getIamPolicy", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:getIamPolicy", + // "request": { + // "$ref": "GetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.list": + +type ProjectsInstancesDatabasesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists Cloud Spanner databases. +func (r *ProjectsInstancesDatabasesService) List(parent string) *ProjectsInstancesDatabasesListCall { + c := &ProjectsInstancesDatabasesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Number of databases +// to be returned in the response. If 0 or less, +// defaults to the server's maximum allowed page size. +func (c *ProjectsInstancesDatabasesListCall) PageSize(pageSize int64) *ProjectsInstancesDatabasesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If non-empty, +// `page_token` should contain a +// next_page_token from a +// previous ListDatabasesResponse. +func (c *ProjectsInstancesDatabasesListCall) PageToken(pageToken string) *ProjectsInstancesDatabasesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesListCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesDatabasesListCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesListCall) Context(ctx context.Context) *ProjectsInstancesDatabasesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/databases") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.list" call. +// Exactly one of *ListDatabasesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListDatabasesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesListCall) Do(opts ...googleapi.CallOption) (*ListDatabasesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListDatabasesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists Cloud Spanner databases.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.databases.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Number of databases to be returned in the response. If 0 or less,\ndefaults to the server's maximum allowed page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListDatabasesResponse.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The instance whose databases should be listed.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/databases", + // "response": { + // "$ref": "ListDatabasesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstancesDatabasesListCall) Pages(ctx context.Context, f func(*ListDatabasesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "spanner.projects.instances.databases.setIamPolicy": + +type ProjectsInstancesDatabasesSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on a database resource. +// Replaces any +// existing policy. +// +// Authorization requires `spanner.databases.setIamPolicy` permission +// on +// resource. +func (r *ProjectsInstancesDatabasesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesDatabasesSetIamPolicyCall { + c := &ProjectsInstancesDatabasesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on a database resource. Replaces any\nexisting policy.\n\nAuthorization requires `spanner.databases.setIamPolicy` permission on\nresource.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:setIamPolicy", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:setIamPolicy", + // "request": { + // "$ref": "SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.testIamPermissions": + +type ProjectsInstancesDatabasesTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that the caller has on the +// specified database resource. +// +// Attempting this RPC on a non-existent Cloud Spanner database will +// result in +// a NOT_FOUND error if the user has `spanner.databases.list` permission +// on +// the containing Cloud Spanner instance. Otherwise returns an empty set +// of +// permissions. +func (r *ProjectsInstancesDatabasesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsInstancesDatabasesTestIamPermissionsCall { + c := &ProjectsInstancesDatabasesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) Context(ctx context.Context) *ProjectsInstancesDatabasesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that the caller has on the specified database resource.\n\nAttempting this RPC on a non-existent Cloud Spanner database will result in\na NOT_FOUND error if the user has `spanner.databases.list` permission on\nthe containing Cloud Spanner instance. Otherwise returns an empty set of\npermissions.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:testIamPermissions", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.updateDdl": + +type ProjectsInstancesDatabasesUpdateDdlCall struct { + s *Service + database string + updatedatabaseddlrequest *UpdateDatabaseDdlRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdateDdl: Updates the schema of a Cloud Spanner database +// by +// creating/altering/dropping tables, columns, indexes, etc. The +// returned +// long-running operation will have a name of +// the format `/operations/` and can be +// used to +// track execution of the schema change(s). The +// metadata field type is +// UpdateDatabaseDdlMetadata. The operation has no response. +func (r *ProjectsInstancesDatabasesService) UpdateDdl(database string, updatedatabaseddlrequest *UpdateDatabaseDdlRequest) *ProjectsInstancesDatabasesUpdateDdlCall { + c := &ProjectsInstancesDatabasesUpdateDdlCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.database = database + c.updatedatabaseddlrequest = updatedatabaseddlrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesUpdateDdlCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesUpdateDdlCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesUpdateDdlCall) Context(ctx context.Context) *ProjectsInstancesDatabasesUpdateDdlCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesUpdateDdlCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesUpdateDdlCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatedatabaseddlrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+database}/ddl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "database": c.database, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.updateDdl" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesUpdateDdlCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the schema of a Cloud Spanner database by\ncreating/altering/dropping tables, columns, indexes, etc. The returned\nlong-running operation will have a name of\nthe format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and can be used to\ntrack execution of the schema change(s). The\nmetadata field type is\nUpdateDatabaseDdlMetadata. The operation has no response.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl", + // "httpMethod": "PATCH", + // "id": "spanner.projects.instances.databases.updateDdl", + // "parameterOrder": [ + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Required. The database to update.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+database}/ddl", + // "request": { + // "$ref": "UpdateDatabaseDdlRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.operations.cancel": + +type ProjectsInstancesDatabasesOperationsCancelCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running operation. +// The server +// makes a best effort to cancel the operation, but success is +// not +// guaranteed. If the server doesn't support this method, it +// returns +// `google.rpc.Code.UNIMPLEMENTED`. Clients can +// use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether +// the +// operation completed despite cancellation. On successful +// cancellation, +// the operation is not deleted; instead, it becomes an operation +// with +// an Operation.error value with a google.rpc.Status.code of +// 1, +// corresponding to `Code.CANCELLED`. +func (r *ProjectsInstancesDatabasesOperationsService) Cancel(name string) *ProjectsInstancesDatabasesOperationsCancelCall { + c := &ProjectsInstancesDatabasesOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesOperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesOperationsCancelCall) Context(ctx context.Context) *ProjectsInstancesDatabasesOperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesOperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesOperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.operations.cancel" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}:cancel", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.operations.cancel", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource to be cancelled.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:cancel", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.operations.delete": + +type ProjectsInstancesDatabasesOperationsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a long-running operation. This method indicates that +// the client is +// no longer interested in the operation result. It does not cancel +// the +// operation. If the server doesn't support this method, it +// returns +// `google.rpc.Code.UNIMPLEMENTED`. +func (r *ProjectsInstancesDatabasesOperationsService) Delete(name string) *ProjectsInstancesDatabasesOperationsDeleteCall { + c := &ProjectsInstancesDatabasesOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesOperationsDeleteCall) Context(ctx context.Context) *ProjectsInstancesDatabasesOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.operations.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}", + // "httpMethod": "DELETE", + // "id": "spanner.projects.instances.databases.operations.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource to be deleted.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.operations.get": + +type ProjectsInstancesDatabasesOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this +// method to poll the operation result at intervals as recommended by +// the API +// service. +func (r *ProjectsInstancesDatabasesOperationsService) Get(name string) *ProjectsInstancesDatabasesOperationsGetCall { + c := &ProjectsInstancesDatabasesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesDatabasesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesOperationsGetCall) Context(ctx context.Context) *ProjectsInstancesDatabasesOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.databases.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.operations.list": + +type ProjectsInstancesDatabasesOperationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the +// request. If the +// server doesn't support this method, it returns +// `UNIMPLEMENTED`. +// +// NOTE: the `name` binding below allows API services to override the +// binding +// to use different resource name schemes, such as `users/*/operations`. +func (r *ProjectsInstancesDatabasesOperationsService) List(name string) *ProjectsInstancesDatabasesOperationsListCall { + c := &ProjectsInstancesDatabasesOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *ProjectsInstancesDatabasesOperationsListCall) Filter(filter string) *ProjectsInstancesDatabasesOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *ProjectsInstancesDatabasesOperationsListCall) PageSize(pageSize int64) *ProjectsInstancesDatabasesOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *ProjectsInstancesDatabasesOperationsListCall) PageToken(pageToken string) *ProjectsInstancesDatabasesOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesDatabasesOperationsListCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesOperationsListCall) Context(ctx context.Context) *ProjectsInstancesDatabasesOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.databases.operations.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name of the operation collection.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/operations$", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstancesDatabasesOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "spanner.projects.instances.databases.sessions.beginTransaction": + +type ProjectsInstancesDatabasesSessionsBeginTransactionCall struct { + s *Service + session string + begintransactionrequest *BeginTransactionRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BeginTransaction: Begins a new transaction. This step can often be +// skipped: +// Read, ExecuteSql and +// Commit can begin a new transaction as a +// side-effect. +func (r *ProjectsInstancesDatabasesSessionsService) BeginTransaction(session string, begintransactionrequest *BeginTransactionRequest) *ProjectsInstancesDatabasesSessionsBeginTransactionCall { + c := &ProjectsInstancesDatabasesSessionsBeginTransactionCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.session = session + c.begintransactionrequest = begintransactionrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsBeginTransactionCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsBeginTransactionCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.begintransactionrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+session}:beginTransaction") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.session, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.beginTransaction" call. +// Exactly one of *Transaction or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Transaction.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) Do(opts ...googleapi.CallOption) (*Transaction, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Transaction{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Begins a new transaction. This step can often be skipped:\nRead, ExecuteSql and\nCommit can begin a new transaction as a\nside-effect.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:beginTransaction", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.beginTransaction", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The session in which the transaction runs.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+session}:beginTransaction", + // "request": { + // "$ref": "BeginTransactionRequest" + // }, + // "response": { + // "$ref": "Transaction" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.commit": + +type ProjectsInstancesDatabasesSessionsCommitCall struct { + s *Service + session string + commitrequest *CommitRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Commit: Commits a transaction. The request includes the mutations to +// be +// applied to rows in the database. +// +// `Commit` might return an `ABORTED` error. This can occur at any +// time; +// commonly, the cause is conflicts with concurrent +// transactions. However, it can also happen for a variety of +// other +// reasons. If `Commit` returns `ABORTED`, the caller should +// re-attempt +// the transaction from the beginning, re-using the same session. +func (r *ProjectsInstancesDatabasesSessionsService) Commit(session string, commitrequest *CommitRequest) *ProjectsInstancesDatabasesSessionsCommitCall { + c := &ProjectsInstancesDatabasesSessionsCommitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.session = session + c.commitrequest = commitrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsCommitCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsCommitCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsCommitCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsCommitCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsCommitCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsCommitCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+session}:commit") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.session, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.commit" call. +// Exactly one of *CommitResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CommitResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesSessionsCommitCall) Do(opts ...googleapi.CallOption) (*CommitResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &CommitResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Commits a transaction. The request includes the mutations to be\napplied to rows in the database.\n\n`Commit` might return an `ABORTED` error. This can occur at any time;\ncommonly, the cause is conflicts with concurrent\ntransactions. However, it can also happen for a variety of other\nreasons. If `Commit` returns `ABORTED`, the caller should re-attempt\nthe transaction from the beginning, re-using the same session.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:commit", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.commit", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The session in which the transaction to be committed is running.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+session}:commit", + // "request": { + // "$ref": "CommitRequest" + // }, + // "response": { + // "$ref": "CommitResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.create": + +type ProjectsInstancesDatabasesSessionsCreateCall struct { + s *Service + database string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new session. A session can be used to +// perform +// transactions that read and/or modify data in a Cloud Spanner +// database. +// Sessions are meant to be reused for many +// consecutive +// transactions. +// +// Sessions can only execute one transaction at a time. To +// execute +// multiple concurrent read-write/write-only transactions, +// create +// multiple sessions. Note that standalone reads and queries use +// a +// transaction internally, and count toward the one +// transaction +// limit. +// +// Cloud Spanner limits the number of sessions that can exist at any +// given +// time; thus, it is a good idea to delete idle and/or unneeded +// sessions. +// Aside from explicit deletes, Cloud Spanner can delete sessions +// for +// which no operations are sent for more than an hour, or due +// to +// internal errors. If a session is deleted, requests to it +// return `NOT_FOUND`. +// +// Idle sessions can be kept alive by sending a trivial SQL +// query +// periodically, e.g., "SELECT 1". +func (r *ProjectsInstancesDatabasesSessionsService) Create(database string) *ProjectsInstancesDatabasesSessionsCreateCall { + c := &ProjectsInstancesDatabasesSessionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.database = database + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsCreateCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsCreateCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+database}/sessions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "database": c.database, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.create" call. +// Exactly one of *Session or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Session.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesSessionsCreateCall) Do(opts ...googleapi.CallOption) (*Session, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Session{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new session. A session can be used to perform\ntransactions that read and/or modify data in a Cloud Spanner database.\nSessions are meant to be reused for many consecutive\ntransactions.\n\nSessions can only execute one transaction at a time. To execute\nmultiple concurrent read-write/write-only transactions, create\nmultiple sessions. Note that standalone reads and queries use a\ntransaction internally, and count toward the one transaction\nlimit.\n\nCloud Spanner limits the number of sessions that can exist at any given\ntime; thus, it is a good idea to delete idle and/or unneeded sessions.\nAside from explicit deletes, Cloud Spanner can delete sessions for\nwhich no operations are sent for more than an hour, or due to\ninternal errors. If a session is deleted, requests to it\nreturn `NOT_FOUND`.\n\nIdle sessions can be kept alive by sending a trivial SQL query\nperiodically, e.g., `\"SELECT 1\"`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.create", + // "parameterOrder": [ + // "database" + // ], + // "parameters": { + // "database": { + // "description": "Required. The database in which the new session is created.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+database}/sessions", + // "response": { + // "$ref": "Session" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.delete": + +type ProjectsInstancesDatabasesSessionsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Ends a session, releasing server resources associated with +// it. +func (r *ProjectsInstancesDatabasesSessionsService) Delete(name string) *ProjectsInstancesDatabasesSessionsDeleteCall { + c := &ProjectsInstancesDatabasesSessionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsDeleteCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesSessionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Ends a session, releasing server resources associated with it.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", + // "httpMethod": "DELETE", + // "id": "spanner.projects.instances.databases.sessions.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the session to delete.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.executeSql": + +type ProjectsInstancesDatabasesSessionsExecuteSqlCall struct { + s *Service + session string + executesqlrequest *ExecuteSqlRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ExecuteSql: Executes an SQL query, returning all rows in a single +// reply. This +// method cannot be used to return a result set larger than 10 MiB; +// if the query yields more data than that, the query fails with +// a `FAILED_PRECONDITION` error. +// +// Queries inside read-write transactions might return `ABORTED`. +// If +// this occurs, the application should restart the transaction from +// the beginning. See Transaction for more details. +// +// Larger result sets can be fetched in streaming fashion by +// calling +// ExecuteStreamingSql instead. +func (r *ProjectsInstancesDatabasesSessionsService) ExecuteSql(session string, executesqlrequest *ExecuteSqlRequest) *ProjectsInstancesDatabasesSessionsExecuteSqlCall { + c := &ProjectsInstancesDatabasesSessionsExecuteSqlCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.session = session + c.executesqlrequest = executesqlrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsExecuteSqlCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsExecuteSqlCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.executesqlrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+session}:executeSql") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.session, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.executeSql" call. +// Exactly one of *ResultSet or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ResultSet.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) Do(opts ...googleapi.CallOption) (*ResultSet, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResultSet{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Executes an SQL query, returning all rows in a single reply. This\nmethod cannot be used to return a result set larger than 10 MiB;\nif the query yields more data than that, the query fails with\na `FAILED_PRECONDITION` error.\n\nQueries inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be fetched in streaming fashion by calling\nExecuteStreamingSql instead.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeSql", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.executeSql", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The session in which the SQL query should be performed.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+session}:executeSql", + // "request": { + // "$ref": "ExecuteSqlRequest" + // }, + // "response": { + // "$ref": "ResultSet" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.executeStreamingSql": + +type ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall struct { + s *Service + session string + executesqlrequest *ExecuteSqlRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ExecuteStreamingSql: Like ExecuteSql, except returns the result +// set as a stream. Unlike ExecuteSql, there +// is no limit on the size of the returned result set. However, +// no +// individual row in the result set can exceed 100 MiB, and no +// column value can exceed 10 MiB. +func (r *ProjectsInstancesDatabasesSessionsService) ExecuteStreamingSql(session string, executesqlrequest *ExecuteSqlRequest) *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall { + c := &ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.session = session + c.executesqlrequest = executesqlrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.executesqlrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+session}:executeStreamingSql") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.session, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.executeStreamingSql" call. +// Exactly one of *PartialResultSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PartialResultSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) Do(opts ...googleapi.CallOption) (*PartialResultSet, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PartialResultSet{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Like ExecuteSql, except returns the result\nset as a stream. Unlike ExecuteSql, there\nis no limit on the size of the returned result set. However, no\nindividual row in the result set can exceed 100 MiB, and no\ncolumn value can exceed 10 MiB.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeStreamingSql", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.executeStreamingSql", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The session in which the SQL query should be performed.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+session}:executeStreamingSql", + // "request": { + // "$ref": "ExecuteSqlRequest" + // }, + // "response": { + // "$ref": "PartialResultSet" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.get": + +type ProjectsInstancesDatabasesSessionsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a session. Returns `NOT_FOUND` if the session does not +// exist. +// This is mainly useful for determining whether a session is +// still +// alive. +func (r *ProjectsInstancesDatabasesSessionsService) Get(name string) *ProjectsInstancesDatabasesSessionsGetCall { + c := &ProjectsInstancesDatabasesSessionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesDatabasesSessionsGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesDatabasesSessionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsGetCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.get" call. +// Exactly one of *Session or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Session.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesSessionsGetCall) Do(opts ...googleapi.CallOption) (*Session, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Session{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a session. Returns `NOT_FOUND` if the session does not exist.\nThis is mainly useful for determining whether a session is still\nalive.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.databases.sessions.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the session to retrieve.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Session" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.read": + +type ProjectsInstancesDatabasesSessionsReadCall struct { + s *Service + session string + readrequest *ReadRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Read: Reads rows from the database using key lookups and scans, as +// a +// simple key/value style alternative to +// ExecuteSql. This method cannot be used to +// return a result set larger than 10 MiB; if the read matches more +// data than that, the read fails with a +// `FAILED_PRECONDITION` +// error. +// +// Reads inside read-write transactions might return `ABORTED`. If +// this occurs, the application should restart the transaction from +// the beginning. See Transaction for more details. +// +// Larger result sets can be yielded in streaming fashion by +// calling +// StreamingRead instead. +func (r *ProjectsInstancesDatabasesSessionsService) Read(session string, readrequest *ReadRequest) *ProjectsInstancesDatabasesSessionsReadCall { + c := &ProjectsInstancesDatabasesSessionsReadCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.session = session + c.readrequest = readrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsReadCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsReadCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsReadCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsReadCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsReadCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsReadCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.readrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+session}:read") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.session, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.read" call. +// Exactly one of *ResultSet or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ResultSet.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesSessionsReadCall) Do(opts ...googleapi.CallOption) (*ResultSet, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResultSet{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Reads rows from the database using key lookups and scans, as a\nsimple key/value style alternative to\nExecuteSql. This method cannot be used to\nreturn a result set larger than 10 MiB; if the read matches more\ndata than that, the read fails with a `FAILED_PRECONDITION`\nerror.\n\nReads inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be yielded in streaming fashion by calling\nStreamingRead instead.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:read", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.read", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The session in which the read should be performed.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+session}:read", + // "request": { + // "$ref": "ReadRequest" + // }, + // "response": { + // "$ref": "ResultSet" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.rollback": + +type ProjectsInstancesDatabasesSessionsRollbackCall struct { + s *Service + session string + rollbackrequest *RollbackRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rollback: Rolls back a transaction, releasing any locks it holds. It +// is a good +// idea to call this for any transaction that includes one or more +// Read or ExecuteSql requests and +// ultimately decides not to commit. +// +// `Rollback` returns `OK` if it successfully aborts the transaction, +// the +// transaction was already aborted, or the transaction is not +// found. `Rollback` never returns `ABORTED`. +func (r *ProjectsInstancesDatabasesSessionsService) Rollback(session string, rollbackrequest *RollbackRequest) *ProjectsInstancesDatabasesSessionsRollbackCall { + c := &ProjectsInstancesDatabasesSessionsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.session = session + c.rollbackrequest = rollbackrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsRollbackCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsRollbackCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsRollbackCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsRollbackCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsRollbackCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsRollbackCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbackrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+session}:rollback") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.session, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.rollback" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesDatabasesSessionsRollbackCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rolls back a transaction, releasing any locks it holds. It is a good\nidea to call this for any transaction that includes one or more\nRead or ExecuteSql requests and\nultimately decides not to commit.\n\n`Rollback` returns `OK` if it successfully aborts the transaction, the\ntransaction was already aborted, or the transaction is not\nfound. `Rollback` never returns `ABORTED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:rollback", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.rollback", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The session in which the transaction to roll back is running.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+session}:rollback", + // "request": { + // "$ref": "RollbackRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.databases.sessions.streamingRead": + +type ProjectsInstancesDatabasesSessionsStreamingReadCall struct { + s *Service + session string + readrequest *ReadRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// StreamingRead: Like Read, except returns the result set as a +// stream. Unlike Read, there is no limit on the +// size of the returned result set. However, no individual row in +// the result set can exceed 100 MiB, and no column value can exceed +// 10 MiB. +func (r *ProjectsInstancesDatabasesSessionsService) StreamingRead(session string, readrequest *ReadRequest) *ProjectsInstancesDatabasesSessionsStreamingReadCall { + c := &ProjectsInstancesDatabasesSessionsStreamingReadCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.session = session + c.readrequest = readrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesSessionsStreamingReadCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) Context(ctx context.Context) *ProjectsInstancesDatabasesSessionsStreamingReadCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.readrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+session}:streamingRead") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.session, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.sessions.streamingRead" call. +// Exactly one of *PartialResultSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PartialResultSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) Do(opts ...googleapi.CallOption) (*PartialResultSet, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PartialResultSet{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Like Read, except returns the result set as a\nstream. Unlike Read, there is no limit on the\nsize of the returned result set. However, no individual row in\nthe result set can exceed 100 MiB, and no column value can exceed\n10 MiB.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:streamingRead", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.databases.sessions.streamingRead", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The session in which the read should be performed.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+session}:streamingRead", + // "request": { + // "$ref": "ReadRequest" + // }, + // "response": { + // "$ref": "PartialResultSet" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.operations.cancel": + +type ProjectsInstancesOperationsCancelCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running operation. +// The server +// makes a best effort to cancel the operation, but success is +// not +// guaranteed. If the server doesn't support this method, it +// returns +// `google.rpc.Code.UNIMPLEMENTED`. Clients can +// use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether +// the +// operation completed despite cancellation. On successful +// cancellation, +// the operation is not deleted; instead, it becomes an operation +// with +// an Operation.error value with a google.rpc.Status.code of +// 1, +// corresponding to `Code.CANCELLED`. +func (r *ProjectsInstancesOperationsService) Cancel(name string) *ProjectsInstancesOperationsCancelCall { + c := &ProjectsInstancesOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsInstancesOperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesOperationsCancelCall) Context(ctx context.Context) *ProjectsInstancesOperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesOperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesOperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.operations.cancel" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}:cancel", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.operations.cancel", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource to be cancelled.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:cancel", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.operations.delete": + +type ProjectsInstancesOperationsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a long-running operation. This method indicates that +// the client is +// no longer interested in the operation result. It does not cancel +// the +// operation. If the server doesn't support this method, it +// returns +// `google.rpc.Code.UNIMPLEMENTED`. +func (r *ProjectsInstancesOperationsService) Delete(name string) *ProjectsInstancesOperationsDeleteCall { + c := &ProjectsInstancesOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsInstancesOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesOperationsDeleteCall) Context(ctx context.Context) *ProjectsInstancesOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.operations.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}", + // "httpMethod": "DELETE", + // "id": "spanner.projects.instances.operations.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource to be deleted.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.operations.get": + +type ProjectsInstancesOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this +// method to poll the operation result at intervals as recommended by +// the API +// service. +func (r *ProjectsInstancesOperationsService) Get(name string) *ProjectsInstancesOperationsGetCall { + c := &ProjectsInstancesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesOperationsGetCall) Context(ctx context.Context) *ProjectsInstancesOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "spanner.projects.instances.operations.list": + +type ProjectsInstancesOperationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the +// request. If the +// server doesn't support this method, it returns +// `UNIMPLEMENTED`. +// +// NOTE: the `name` binding below allows API services to override the +// binding +// to use different resource name schemes, such as `users/*/operations`. +func (r *ProjectsInstancesOperationsService) List(name string) *ProjectsInstancesOperationsListCall { + c := &ProjectsInstancesOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *ProjectsInstancesOperationsListCall) Filter(filter string) *ProjectsInstancesOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *ProjectsInstancesOperationsListCall) PageSize(pageSize int64) *ProjectsInstancesOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *ProjectsInstancesOperationsListCall) PageToken(pageToken string) *ProjectsInstancesOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsInstancesOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesOperationsListCall) IfNoneMatch(entityTag string) *ProjectsInstancesOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesOperationsListCall) Context(ctx context.Context) *ProjectsInstancesOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.operations.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name of the operation collection.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/operations$", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstancesOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/vendor/google.golang.org/api/spectrum/v1explorer/spectrum-gen.go b/vendor/google.golang.org/api/spectrum/v1explorer/spectrum-gen.go index 2d7f1f0c9..d17b1f322 100644 --- a/vendor/google.golang.org/api/spectrum/v1explorer/spectrum-gen.go +++ b/vendor/google.golang.org/api/spectrum/v1explorer/spectrum-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Paws *PawsService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewPawsService(s *Service) *PawsService { rs := &PawsService{s: s} return rs @@ -1999,6 +2004,7 @@ func (c *PawsGetSpectrumCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pawsgetspectrumrequest) if err != nil { @@ -2114,6 +2120,7 @@ func (c *PawsGetSpectrumBatchCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pawsgetspectrumbatchrequest) if err != nil { @@ -2229,6 +2236,7 @@ func (c *PawsInitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pawsinitrequest) if err != nil { @@ -2347,6 +2355,7 @@ func (c *PawsNotifySpectrumUseCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pawsnotifyspectrumuserequest) if err != nil { @@ -2463,6 +2472,7 @@ func (c *PawsRegisterCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pawsregisterrequest) if err != nil { @@ -2580,6 +2590,7 @@ func (c *PawsVerifyDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pawsverifydevicerequest) if err != nil { diff --git a/vendor/google.golang.org/api/speech/v1beta1/speech-api.json b/vendor/google.golang.org/api/speech/v1beta1/speech-api.json index 295611438..770eb91a9 100644 --- a/vendor/google.golang.org/api/speech/v1beta1/speech-api.json +++ b/vendor/google.golang.org/api/speech/v1beta1/speech-api.json @@ -1,5 +1,11 @@ { - "id": "speech:v1beta1", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "protocol": "rest", + "version": "v1beta1", + "baseUrl": "https://speech.googleapis.com/", "auth": { "oauth2": { "scopes": { @@ -9,290 +15,419 @@ } } }, + "servicePath": "", "description": "Google Cloud Speech API.", - "protocol": "rest", + "kind": "discovery#restDescription", + "rootUrl": "https://speech.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "speech", + "batchPath": "batch", + "documentationLink": "https://cloud.google.com/speech/", + "id": "speech:v1beta1", + "revision": "20170217", "title": "Google Cloud Speech API", + "ownerName": "Google", + "discoveryVersion": "v1", + "version_module": "True", "resources": { "operations": { "methods": { - "get": { - "id": "speech.operations.get", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "name" - ], - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", - "flatPath": "v1beta1/operations/{operationsId}", - "httpMethod": "GET", - "parameters": { - "name": { - "description": "The name of the operation resource.", - "required": true, - "pattern": "^[^/]+$", - "location": "path", - "type": "string" - } - }, - "path": "v1beta1/operations/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, "list": { - "id": "speech.operations.list", "response": { "$ref": "ListOperationsResponse" }, "parameterOrder": [], - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`.", - "flatPath": "v1beta1/operations", "httpMethod": "GET", "parameters": { - "pageSize": { - "description": "The standard list page size.", - "location": "query", - "type": "integer", - "format": "int32" - }, "filter": { - "description": "The standard list filter.", + "type": "string", "location": "query", - "type": "string" + "description": "The standard list filter." }, "name": { - "description": "The name of the operation collection.", + "type": "string", "location": "query", - "type": "string" + "description": "The name of the operation collection." }, "pageToken": { - "description": "The standard list page token.", "location": "query", + "description": "The standard list page token.", "type": "string" + }, + "pageSize": { + "location": "query", + "description": "The standard list page size.", + "format": "int32", + "type": "integer" } }, - "path": "v1beta1/operations", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] + ], + "flatPath": "v1beta1/operations", + "path": "v1beta1/operations", + "id": "speech.operations.list", + "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding below allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`." }, - "delete": { - "id": "speech.operations.delete", - "response": { - "$ref": "Empty" - }, + "get": { + "httpMethod": "GET", "parameterOrder": [ "name" ], - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", - "flatPath": "v1beta1/operations/{operationsId}", - "httpMethod": "DELETE", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { "name": { - "description": "The name of the operation resource to be deleted.", "required": true, + "type": "string", "pattern": "^[^/]+$", "location": "path", - "type": "string" + "description": "The name of the operation resource." } }, + "flatPath": "v1beta1/operations/{operationsId}", + "id": "speech.operations.get", "path": "v1beta1/operations/{+name}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice." }, "cancel": { - "id": "speech.operations.cancel", + "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], "response": { "$ref": "Empty" }, + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^[^/]+$", + "location": "path", + "description": "The name of the operation resource to be cancelled." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1beta1/operations/{operationsId}:cancel", + "id": "speech.operations.cancel", + "path": "v1beta1/operations/{+name}:cancel" + }, + "delete": { + "httpMethod": "DELETE", "parameterOrder": [ "name" ], - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", - "request": { - "$ref": "CancelOperationRequest" + "response": { + "$ref": "Empty" }, - "flatPath": "v1beta1/operations/{operationsId}:cancel", - "httpMethod": "POST", "parameters": { "name": { - "description": "The name of the operation resource to be cancelled.", - "required": true, - "pattern": "^[^/]+$", "location": "path", - "type": "string" + "description": "The name of the operation resource to be deleted.", + "required": true, + "type": "string", + "pattern": "^[^/]+$" } }, - "path": "v1beta1/operations/{+name}:cancel", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] + ], + "flatPath": "v1beta1/operations/{operationsId}", + "id": "speech.operations.delete", + "path": "v1beta1/operations/{+name}", + "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`." } } }, "speech": { "methods": { - "syncrecognize": { - "id": "speech.speech.syncrecognize", + "asyncrecognize": { "response": { - "$ref": "SyncRecognizeResponse" + "$ref": "Operation" }, "parameterOrder": [], - "description": "Performs synchronous speech recognition: receive results after all audio\nhas been sent and processed.", - "request": { - "$ref": "SyncRecognizeRequest" - }, - "flatPath": "v1beta1/speech:syncrecognize", "httpMethod": "POST", "parameters": {}, - "path": "v1beta1/speech:syncrecognize", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "asyncrecognize": { + ], + "flatPath": "v1beta1/speech:asyncrecognize", + "path": "v1beta1/speech:asyncrecognize", "id": "speech.speech.asyncrecognize", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [], - "description": "Performs asynchronous speech recognition: receive results via the\n[google.longrunning.Operations]\n(/speech/reference/rest/v1beta1/operations#Operation)\ninterface. Returns either an\n`Operation.error` or an `Operation.response` which contains\nan `AsyncRecognizeResponse` message.", "request": { "$ref": "AsyncRecognizeRequest" }, - "flatPath": "v1beta1/speech:asyncrecognize", + "description": "Performs asynchronous speech recognition: receive results via the\n[google.longrunning.Operations]\n(/speech/reference/rest/v1beta1/operations#Operation)\ninterface. Returns either an\n`Operation.error` or an `Operation.response` which contains\nan `AsyncRecognizeResponse` message." + }, + "syncrecognize": { + "flatPath": "v1beta1/speech:syncrecognize", + "id": "speech.speech.syncrecognize", + "path": "v1beta1/speech:syncrecognize", + "description": "Performs synchronous speech recognition: receive results after all audio\nhas been sent and processed.", + "request": { + "$ref": "SyncRecognizeRequest" + }, "httpMethod": "POST", - "parameters": {}, - "path": "v1beta1/speech:asyncrecognize", + "parameterOrder": [], + "response": { + "$ref": "SyncRecognizeResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] + ], + "parameters": {} } } } }, + "parameters": { + "upload_protocol": { + "type": "string", + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\")." + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "fields": { + "type": "string", + "location": "query", + "description": "Selector specifying which fields to include in a partial response." + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "$.xgafv": { + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format." + }, + "callback": { + "type": "string", + "location": "query", + "description": "JSONP" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "access_token": { + "type": "string", + "location": "query", + "description": "OAuth access token." + }, + "key": { + "type": "string", + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token." + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "oauth_token": { + "type": "string", + "location": "query", + "description": "OAuth 2.0 token for the current user." + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + } + }, "schemas": { - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", "type": "object", "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "operations": { "type": "array", "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "$ref": "Operation" + }, + "description": "A list of operations that matches the specified filter in the request." + } + }, + "id": "ListOperationsResponse" + }, + "SpeechContext": { + "description": "Provides \"hints\" to the speech recognizer to favor specific words and phrases\nin the results.", + "type": "object", + "properties": { + "phrases": { + "description": "*Optional* A list of strings containing words and phrases \"hints\" so that\nthe speech recognition is more likely to recognize them. This can be used\nto improve the accuracy for specific words and phrases, for example, if\nspecific commands are typically spoken by the user. This can also be used\nto add additional words to the vocabulary of the recognizer. See\n[usage limits](https://cloud.google.com/speech/limits#content).", + "type": "array", + "items": { + "type": "string" } + } + }, + "id": "SpeechContext" + }, + "SpeechRecognitionAlternative": { + "type": "object", + "properties": { + "confidence": { + "type": "number", + "description": "*Output-only* The confidence estimate between 0.0 and 1.0. A higher number\nindicates an estimated greater likelihood that the recognized words are\ncorrect. This field is typically provided only for the top hypothesis, and\nonly for `is_final=true` results. Clients should not rely on the\n`confidence` field as it is not guaranteed to be accurate, or even set, in\nany of the results.\nThe default of 0.0 is a sentinel value indicating `confidence` was not set.", + "format": "float" }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" + "transcript": { + "type": "string", + "description": "*Output-only* Transcript text representing the words that the user spoke." } }, - "id": "Status" + "id": "SpeechRecognitionAlternative", + "description": "Alternative hypotheses (a.k.a. n-best list)." + }, + "SpeechRecognitionResult": { + "description": "A speech recognition result corresponding to a portion of the audio.", + "type": "object", + "properties": { + "alternatives": { + "description": "*Output-only* May contain one or more recognition hypotheses (up to the\nmaximum specified in `max_alternatives`).", + "type": "array", + "items": { + "$ref": "SpeechRecognitionAlternative" + } + } + }, + "id": "SpeechRecognitionResult" + }, + "AsyncRecognizeRequest": { + "type": "object", + "properties": { + "config": { + "$ref": "RecognitionConfig", + "description": "*Required* Provides information to the recognizer that specifies how to\nprocess the request." + }, + "audio": { + "$ref": "RecognitionAudio", + "description": "*Required* The audio data to be recognized." + } + }, + "id": "AsyncRecognizeRequest", + "description": "The top-level message sent by the client for the `AsyncRecognize` method." }, "RecognitionAudio": { - "description": "Contains audio data in the encoding specified in the `RecognitionConfig`.\nEither `content` or `uri` must be supplied. Supplying both or neither\nreturns google.rpc.Code.INVALID_ARGUMENT. See\n[audio limits](https://cloud.google.com/speech/limits#content).", "type": "object", "properties": { "content": { - "description": "The audio data bytes encoded as specified in\n`RecognitionConfig`. Note: as with all bytes fields, protobuffers use a\npure binary representation, whereas JSON representations use base64.", "type": "string", + "description": "The audio data bytes encoded as specified in\n`RecognitionConfig`. Note: as with all bytes fields, protobuffers use a\npure binary representation, whereas JSON representations use base64.", "format": "byte" }, "uri": { - "description": "URI that points to a file that contains audio data bytes as specified in\n`RecognitionConfig`. Currently, only Google Cloud Storage URIs are\nsupported, which must be specified in the following format:\n`gs://bucket_name/object_name` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/reference-uris).", - "type": "string" + "type": "string", + "description": "URI that points to a file that contains audio data bytes as specified in\n`RecognitionConfig`. Currently, only Google Cloud Storage URIs are\nsupported, which must be specified in the following format:\n`gs://bucket_name/object_name` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/reference-uris)." } }, - "id": "RecognitionAudio" + "id": "RecognitionAudio", + "description": "Contains audio data in the encoding specified in the `RecognitionConfig`.\nEither `content` or `uri` must be supplied. Supplying both or neither\nreturns google.rpc.Code.INVALID_ARGUMENT. See\n[audio limits](https://cloud.google.com/speech/limits#content)." }, "Operation": { "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", "type": "object", "properties": { - "error": { - "description": "The error result of the operation in case of failure or cancellation.", - "$ref": "Status" - }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" - }, - "metadata": { - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "response": { + "type": "object", "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" + "type": "any", + "description": "Properties of the object. Contains field @type with type URL." }, - "type": "object" + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`." }, - "response": { - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "name": { + "type": "string", + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`." + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "type": "object", "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "type": "object" + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any." }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", - "type": "string" + "done": { + "type": "boolean", + "description": "If the value is `false`, it means the operation is still in progress.\nIf true, the operation is completed, and either `error` or `response` is\navailable." } }, "id": "Operation" }, - "SpeechRecognitionAlternative": { - "description": "Alternative hypotheses (a.k.a. n-best list).", - "type": "object", - "properties": { - "transcript": { - "description": "[Output-only] Transcript text representing the words that the user spoke.", - "type": "string" - }, - "confidence": { - "description": "[Output-only] The confidence estimate between 0.0 and 1.0. A higher number\nmeans the system is more confident that the recognition is correct.\nThis field is typically provided only for the top hypothesis, and only for\n`is_final=true` results.\nThe default of 0.0 is a sentinel value indicating confidence was not set.", - "type": "number", - "format": "float" - } - }, - "id": "SpeechRecognitionAlternative" - }, - "CancelOperationRequest": { - "description": "The request message for Operations.CancelOperation.", - "type": "object", - "properties": {}, - "id": "CancelOperationRequest" - }, "RecognitionConfig": { - "description": "The `RecognitionConfig` message provides information to the recognizer\nthat specifies how to process the request.", + "description": "Provides information to the recognizer that specifies how to process the\nrequest.", "type": "object", "properties": { "maxAlternatives": { - "description": "[Optional] Maximum number of recognition hypotheses to be returned.\nSpecifically, the maximum number of `SpeechRecognitionAlternative` messages\nwithin each `SpeechRecognitionResult`.\nThe server may return fewer than `max_alternatives`.\nValid values are `0`-`30`. A value of `0` or `1` will return a maximum of\n`1`. If omitted, defaults to `1`.", "type": "integer", + "description": "*Optional* Maximum number of recognition hypotheses to be returned.\nSpecifically, the maximum number of `SpeechRecognitionAlternative` messages\nwithin each `SpeechRecognitionResult`.\nThe server may return fewer than `max_alternatives`.\nValid values are `0`-`30`. A value of `0` or `1` will return a maximum of\none. If omitted, will return a maximum of one.", "format": "int32" }, + "sampleRate": { + "description": "*Required* Sample rate in Hertz of the audio data sent in all\n`RecognitionAudio` messages. Valid values are: 8000-48000.\n16000 is optimal. For best results, set the sampling rate of the audio\nsource to 16000 Hz. If that's not possible, use the native sample rate of\nthe audio source (instead of re-sampling).", + "format": "int32", + "type": "integer" + }, "languageCode": { - "description": "[Optional] The language of the supplied audio as a BCP-47 language tag.\nExample: \"en-GB\" https://www.rfc-editor.org/rfc/bcp/bcp47.txt\nIf omitted, defaults to \"en-US\". See\n[Language Support](https://cloud.google.com/speech/docs/languages)\nfor a list of the currently supported language codes.", + "description": "*Optional* The language of the supplied audio as a BCP-47 language tag.\nExample: \"en-GB\" https://www.rfc-editor.org/rfc/bcp/bcp47.txt\nIf omitted, defaults to \"en-US\". See\n[Language Support](https://cloud.google.com/speech/docs/languages)\nfor a list of the currently supported language codes.", "type": "string" }, "speechContext": { - "description": "[Optional] A means to provide context to assist the speech recognition.", - "$ref": "SpeechContext" + "$ref": "SpeechContext", + "description": "*Optional* A means to provide context to assist the speech recognition." }, "encoding": { - "description": "[Required] Encoding of audio data sent in all `RecognitionAudio` messages.", "enum": [ "ENCODING_UNSPECIFIED", "LINEAR16", @@ -301,6 +436,8 @@ "AMR", "AMR_WB" ], + "description": "*Required* Encoding of audio data sent in all `RecognitionAudio` messages.", + "type": "string", "enumDescriptions": [ "Not specified. Will return result google.rpc.Code.INVALID_ARGUMENT.", "Uncompressed 16-bit signed little-endian samples (Linear PCM).\nThis is the only encoding that may be used by `AsyncRecognize`.", @@ -308,222 +445,76 @@ "8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.", "Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz.", "Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz." - ], - "type": "string" + ] }, "profanityFilter": { - "description": "[Optional] If set to `true`, the server will attempt to filter out\nprofanities, replacing all but the initial character in each filtered word\nwith asterisks, e.g. \"f***\". If set to `false` or omitted, profanities\nwon't be filtered out.", - "type": "boolean" - }, - "sampleRate": { - "description": "[Required] Sample rate in Hertz of the audio data sent in all\n`RecognitionAudio` messages. Valid values are: 8000-48000.\n16000 is optimal. For best results, set the sampling rate of the audio\nsource to 16000 Hz. If that's not possible, use the native sample rate of\nthe audio source (instead of re-sampling).", - "type": "integer", - "format": "int32" + "type": "boolean", + "description": "*Optional* If set to `true`, the server will attempt to filter out\nprofanities, replacing all but the initial character in each filtered word\nwith asterisks, e.g. \"f***\". If set to `false` or omitted, profanities\nwon't be filtered out." } }, "id": "RecognitionConfig" }, "SyncRecognizeRequest": { - "description": "`SyncRecognizeRequest` is the top-level message sent by the client for\nthe `SyncRecognize` method.", + "description": "The top-level message sent by the client for the `SyncRecognize` method.", "type": "object", "properties": { - "audio": { - "description": "[Required] The audio data to be recognized.", - "$ref": "RecognitionAudio" - }, "config": { - "description": "[Required] The `config` message provides information to the recognizer\nthat specifies how to process the request.", - "$ref": "RecognitionConfig" - } - }, - "id": "SyncRecognizeRequest" - }, - "SpeechRecognitionResult": { - "description": "A speech recognition result corresponding to a portion of the audio.", - "type": "object", - "properties": { - "alternatives": { - "description": "[Output-only] May contain one or more recognition hypotheses (up to the\nmaximum specified in `max_alternatives`).", - "type": "array", - "items": { - "$ref": "SpeechRecognitionAlternative" - } - } - }, - "id": "SpeechRecognitionResult" - }, - "ListOperationsResponse": { - "description": "The response message for Operations.ListOperations.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" + "$ref": "RecognitionConfig", + "description": "*Required* Provides information to the recognizer that specifies how to\nprocess the request." }, - "operations": { - "description": "A list of operations that matches the specified filter in the request.", - "type": "array", - "items": { - "$ref": "Operation" - } + "audio": { + "$ref": "RecognitionAudio", + "description": "*Required* The audio data to be recognized." } }, - "id": "ListOperationsResponse" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" + "id": "SyncRecognizeRequest" }, "SyncRecognizeResponse": { - "description": "`SyncRecognizeResponse` is the only message returned to the client by\n`SyncRecognize`. It contains the result as zero or more sequential\n`SpeechRecognitionResult` messages.", + "description": "The only message returned to the client by `SyncRecognize`. method. It\ncontains the result as zero or more sequential `SpeechRecognitionResult`\nmessages.", "type": "object", "properties": { "results": { - "description": "[Output-only] Sequential list of transcription results corresponding to\nsequential portions of audio.", "type": "array", "items": { "$ref": "SpeechRecognitionResult" - } + }, + "description": "*Output-only* Sequential list of transcription results corresponding to\nsequential portions of audio." } }, "id": "SyncRecognizeResponse" }, - "SpeechContext": { - "description": "Provides \"hints\" to the speech recognizer to favor specific words and phrases\nin the results.", + "Status": { "type": "object", "properties": { - "phrases": { - "description": "[Optional] A list of strings containing words and phrases \"hints\" so that\nthe speech recognition is more likely to recognize them. This can be used\nto improve the accuracy for specific words and phrases, for example, if\nspecific commands are typically spoken by the user. This can also be used\nto add additional words to the vocabulary of the recognizer. See\n[usage limits](https://cloud.google.com/speech/limits#content).", + "code": { + "type": "integer", + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { "type": "array", "items": { - "type": "string" - } + "additionalProperties": { + "type": "any", + "description": "Properties of the object. Contains field @type with type URL." + }, + "type": "object" + }, + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use." } }, - "id": "SpeechContext" + "id": "Status", + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons." }, - "AsyncRecognizeRequest": { - "description": "`AsyncRecognizeRequest` is the top-level message sent by the client for\nthe `AsyncRecognize` method.", + "Empty": { "type": "object", - "properties": { - "audio": { - "description": "[Required] The audio data to be recognized.", - "$ref": "RecognitionAudio" - }, - "config": { - "description": "[Required] The `config` message provides information to the recognizer\nthat specifies how to process the request.", - "$ref": "RecognitionConfig" - } - }, - "id": "AsyncRecognizeRequest" - } - }, - "revision": "20170109", - "basePath": "", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "version_module": "True", - "discoveryVersion": "v1", - "baseUrl": "https://speech.googleapis.com/", - "name": "speech", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "properties": {}, + "id": "Empty", + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`." } - }, - "documentationLink": "https://cloud.google.com/speech/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1beta1", - "rootUrl": "https://speech.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go b/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go index 0b452fe16..bc8c508eb 100644 --- a/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go +++ b/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go @@ -62,9 +62,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Operations *OperationsService @@ -78,6 +79,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -96,16 +101,15 @@ type SpeechService struct { s *Service } -// AsyncRecognizeRequest: `AsyncRecognizeRequest` is the top-level -// message sent by the client for +// AsyncRecognizeRequest: The top-level message sent by the client for // the `AsyncRecognize` method. type AsyncRecognizeRequest struct { - // Audio: [Required] The audio data to be recognized. + // Audio: *Required* The audio data to be recognized. Audio *RecognitionAudio `json:"audio,omitempty"` - // Config: [Required] The `config` message provides information to the - // recognizer - // that specifies how to process the request. + // Config: *Required* Provides information to the recognizer that + // specifies how to + // process the request. Config *RecognitionConfig `json:"config,omitempty"` // ForceSendFields is a list of field names (e.g. "Audio") to @@ -131,11 +135,6 @@ func (s *AsyncRecognizeRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CancelOperationRequest: The request message for -// Operations.CancelOperation. -type CancelOperationRequest struct { -} - // Empty: A generic empty message that you can re-use to avoid defining // duplicated // empty messages in your APIs. A typical example is to use it as the @@ -315,11 +314,11 @@ func (s *RecognitionAudio) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RecognitionConfig: The `RecognitionConfig` message provides -// information to the recognizer -// that specifies how to process the request. +// RecognitionConfig: Provides information to the recognizer that +// specifies how to process the +// request. type RecognitionConfig struct { - // Encoding: [Required] Encoding of audio data sent in all + // Encoding: *Required* Encoding of audio data sent in all // `RecognitionAudio` messages. // // Possible values: @@ -347,7 +346,7 @@ type RecognitionConfig struct { // be 16000 Hz. Encoding string `json:"encoding,omitempty"` - // LanguageCode: [Optional] The language of the supplied audio as a + // LanguageCode: *Optional* The language of the supplied audio as a // BCP-47 language tag. // Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt // If omitted, defaults to "en-US". See @@ -356,7 +355,7 @@ type RecognitionConfig struct { // for a list of the currently supported language codes. LanguageCode string `json:"languageCode,omitempty"` - // MaxAlternatives: [Optional] Maximum number of recognition hypotheses + // MaxAlternatives: *Optional* Maximum number of recognition hypotheses // to be returned. // Specifically, the maximum number of `SpeechRecognitionAlternative` // messages @@ -364,10 +363,10 @@ type RecognitionConfig struct { // The server may return fewer than `max_alternatives`. // Valid values are `0`-`30`. A value of `0` or `1` will return a // maximum of - // `1`. If omitted, defaults to `1`. + // one. If omitted, will return a maximum of one. MaxAlternatives int64 `json:"maxAlternatives,omitempty"` - // ProfanityFilter: [Optional] If set to `true`, the server will attempt + // ProfanityFilter: *Optional* If set to `true`, the server will attempt // to filter out // profanities, replacing all but the initial character in each filtered // word @@ -376,7 +375,7 @@ type RecognitionConfig struct { // won't be filtered out. ProfanityFilter bool `json:"profanityFilter,omitempty"` - // SampleRate: [Required] Sample rate in Hertz of the audio data sent in + // SampleRate: *Required* Sample rate in Hertz of the audio data sent in // all // `RecognitionAudio` messages. Valid values are: 8000-48000. // 16000 is optimal. For best results, set the sampling rate of the @@ -386,7 +385,7 @@ type RecognitionConfig struct { // the audio source (instead of re-sampling). SampleRate int64 `json:"sampleRate,omitempty"` - // SpeechContext: [Optional] A means to provide context to assist the + // SpeechContext: *Optional* A means to provide context to assist the // speech recognition. SpeechContext *SpeechContext `json:"speechContext,omitempty"` @@ -417,7 +416,7 @@ func (s *RecognitionConfig) MarshalJSON() ([]byte, error) { // specific words and phrases // in the results. type SpeechContext struct { - // Phrases: [Optional] A list of strings containing words and phrases + // Phrases: *Optional* A list of strings containing words and phrases // "hints" so that // the speech recognition is more likely to recognize them. This can be // used @@ -456,18 +455,22 @@ func (s *SpeechContext) MarshalJSON() ([]byte, error) { // SpeechRecognitionAlternative: Alternative hypotheses (a.k.a. n-best // list). type SpeechRecognitionAlternative struct { - // Confidence: [Output-only] The confidence estimate between 0.0 and + // Confidence: *Output-only* The confidence estimate between 0.0 and // 1.0. A higher number - // means the system is more confident that the recognition is - // correct. - // This field is typically provided only for the top hypothesis, and - // only for - // `is_final=true` results. - // The default of 0.0 is a sentinel value indicating confidence was not - // set. + // indicates an estimated greater likelihood that the recognized words + // are + // correct. This field is typically provided only for the top + // hypothesis, and + // only for `is_final=true` results. Clients should not rely on + // the + // `confidence` field as it is not guaranteed to be accurate, or even + // set, in + // any of the results. + // The default of 0.0 is a sentinel value indicating `confidence` was + // not set. Confidence float64 `json:"confidence,omitempty"` - // Transcript: [Output-only] Transcript text representing the words that + // Transcript: *Output-only* Transcript text representing the words that // the user spoke. Transcript string `json:"transcript,omitempty"` @@ -511,7 +514,7 @@ func (s *SpeechRecognitionAlternative) UnmarshalJSON(data []byte) error { // SpeechRecognitionResult: A speech recognition result corresponding to // a portion of the audio. type SpeechRecognitionResult struct { - // Alternatives: [Output-only] May contain one or more recognition + // Alternatives: *Output-only* May contain one or more recognition // hypotheses (up to the // maximum specified in `max_alternatives`). Alternatives []*SpeechRecognitionAlternative `json:"alternatives,omitempty"` @@ -658,16 +661,15 @@ func (s *Status) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SyncRecognizeRequest: `SyncRecognizeRequest` is the top-level message -// sent by the client for +// SyncRecognizeRequest: The top-level message sent by the client for // the `SyncRecognize` method. type SyncRecognizeRequest struct { - // Audio: [Required] The audio data to be recognized. + // Audio: *Required* The audio data to be recognized. Audio *RecognitionAudio `json:"audio,omitempty"` - // Config: [Required] The `config` message provides information to the - // recognizer - // that specifies how to process the request. + // Config: *Required* Provides information to the recognizer that + // specifies how to + // process the request. Config *RecognitionConfig `json:"config,omitempty"` // ForceSendFields is a list of field names (e.g. "Audio") to @@ -693,13 +695,13 @@ func (s *SyncRecognizeRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SyncRecognizeResponse: `SyncRecognizeResponse` is the only message -// returned to the client by -// `SyncRecognize`. It contains the result as zero or more -// sequential -// `SpeechRecognitionResult` messages. +// SyncRecognizeResponse: The only message returned to the client by +// `SyncRecognize`. method. It +// contains the result as zero or more sequential +// `SpeechRecognitionResult` +// messages. type SyncRecognizeResponse struct { - // Results: [Output-only] Sequential list of transcription results + // Results: *Output-only* Sequential list of transcription results // corresponding to // sequential portions of audio. Results []*SpeechRecognitionResult `json:"results,omitempty"` @@ -734,12 +736,11 @@ func (s *SyncRecognizeResponse) MarshalJSON() ([]byte, error) { // method id "speech.operations.cancel": type OperationsCancelCall struct { - s *Service - name string - canceloperationrequest *CancelOperationRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // Cancel: Starts asynchronous cancellation on a long-running operation. @@ -760,10 +761,9 @@ type OperationsCancelCall struct { // an Operation.error value with a google.rpc.Status.code of // 1, // corresponding to `Code.CANCELLED`. -func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { +func (r *OperationsService) Cancel(name string) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.canceloperationrequest = canceloperationrequest return c } @@ -798,12 +798,8 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/operations/{+name}:cancel") urls += "?" + c.urlParams_.Encode() @@ -870,9 +866,6 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) // } // }, // "path": "v1beta1/operations/{+name}:cancel", - // "request": { - // "$ref": "CancelOperationRequest" - // }, // "response": { // "$ref": "Empty" // }, @@ -937,6 +930,7 @@ func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/operations/{+name}") @@ -1077,6 +1071,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1249,6 +1244,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1415,6 +1411,7 @@ func (c *SpeechAsyncrecognizeCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.asyncrecognizerequest) if err != nil { @@ -1537,6 +1534,7 @@ func (c *SpeechSyncrecognizeCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.syncrecognizerequest) if err != nil { diff --git a/vendor/google.golang.org/api/sqladmin/v1beta3/sqladmin-gen.go b/vendor/google.golang.org/api/sqladmin/v1beta3/sqladmin-gen.go index 4c704dbfe..b1634b5ab 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta3/sqladmin-gen.go +++ b/vendor/google.golang.org/api/sqladmin/v1beta3/sqladmin-gen.go @@ -69,9 +69,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BackupRuns *BackupRunsService @@ -93,6 +94,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBackupRunsService(s *Service) *BackupRunsService { rs := &BackupRunsService{s: s} return rs @@ -1996,6 +2001,7 @@ func (c *BackupRunsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2175,6 +2181,7 @@ func (c *BackupRunsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2359,6 +2366,7 @@ func (c *FlagsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2474,6 +2482,7 @@ func (c *InstancesCloneCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesclonerequest) if err != nil { @@ -2608,6 +2617,7 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}") @@ -2745,6 +2755,7 @@ func (c *InstancesExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesexportrequest) if err != nil { @@ -2897,6 +2908,7 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3037,6 +3049,7 @@ func (c *InstancesImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesimportrequest) if err != nil { @@ -3178,6 +3191,7 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) if err != nil { @@ -3337,6 +3351,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3501,6 +3516,7 @@ func (c *InstancesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) if err != nil { @@ -3644,6 +3660,7 @@ func (c *InstancesPromoteReplicaCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/promoteReplica") @@ -3779,6 +3796,7 @@ func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/resetSslConfig") @@ -3913,6 +3931,7 @@ func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/restart") @@ -4049,6 +4068,7 @@ func (c *InstancesRestoreBackupCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/restoreBackup") @@ -4200,6 +4220,7 @@ func (c *InstancesSetRootPasswordCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesetrootpasswordrequest) if err != nil { @@ -4344,6 +4365,7 @@ func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) if err != nil { @@ -4501,6 +4523,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4673,6 +4696,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4844,6 +4868,7 @@ func (c *SslCertsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}") @@ -5000,6 +5025,7 @@ func (c *SslCertsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5148,6 +5174,7 @@ func (c *SslCertsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertsinsertrequest) if err != nil { @@ -5302,6 +5329,7 @@ func (c *SslCertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5449,6 +5477,7 @@ func (c *TiersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json index 7cc77d478..5cdb9d7c0 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/t9edOSlewaa7VyZ4dauqNzzg4MU\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/HY0tRuJ-vWPnM8lK2hqBBFUYsUk\"", "discoveryVersion": "v1", "id": "sqladmin:v1beta4", "name": "sqladmin", "canonicalName": "SQL Admin", "version": "v1beta4", - "revision": "20161220", + "revision": "20170217", "title": "Cloud SQL Administration API", "description": "Creates and configures Cloud SQL instances, which provide fully-managed MySQL databases.", "ownerDomain": "google.com", @@ -789,6 +789,21 @@ } } }, + "Labels": { + "id": "Labels", + "type": "object", + "description": "User defined labels for Cloud SQL instances.", + "properties": { + "key": { + "type": "string", + "description": "The key of the label." + }, + "value": { + "type": "string", + "description": "The value of the label." + } + } + }, "LocationPreference": { "id": "LocationPreference", "type": "object", @@ -1099,6 +1114,10 @@ "type": "string" } }, + "availabilityType": { + "type": "string", + "description": "Reserved for future use." + }, "backupConfiguration": { "$ref": "BackupConfiguration", "description": "The daily backup configuration for the instance." @@ -1136,6 +1155,13 @@ "description": "This is always sql#settings.", "default": "sql#settings" }, + "labels": { + "type": "array", + "description": "User defined labels.", + "items": { + "$ref": "Labels" + } + }, "locationPreference": { "$ref": "LocationPreference", "description": "The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or GCE zone for better performance. App Engine co-location is only applicable to First Generation instances." @@ -1166,6 +1192,11 @@ "type": "boolean", "description": "Configuration to increase storage size automatically. The default value is false. Applies only to Second Generation instances." }, + "storageAutoResizeLimit": { + "type": "string", + "description": "The maximum size to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit. Applies only to Second Generation instances.", + "format": "int64" + }, "tier": { "type": "string", "description": "The tier of service for this instance, for example D1, D2. For more information, see pricing.", @@ -1825,6 +1856,13 @@ "path": "flags", "httpMethod": "GET", "description": "List all available database flags for Google Cloud SQL instances.", + "parameters": { + "databaseVersion": { + "type": "string", + "description": "Database version for flag retrieval. Flags are specific to the database version.", + "location": "query" + } + }, "response": { "$ref": "FlagsListResponse" }, @@ -2066,6 +2104,11 @@ "httpMethod": "GET", "description": "Lists instances under a given project in the alphabetical order of the instance name.", "parameters": { + "filter": { + "type": "string", + "description": "A filter expression for filtering listed instances.", + "location": "query" + }, "maxResults": { "type": "integer", "description": "The maximum number of results to return per response.", diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go index a49440130..9f6dba8a9 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go @@ -71,9 +71,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BackupRuns *BackupRunsService @@ -99,6 +100,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBackupRunsService(s *Service) *BackupRunsService { rs := &BackupRunsService{s: s} return rs @@ -1362,6 +1367,37 @@ func (s *IpMapping) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Labels: User defined labels for Cloud SQL instances. +type Labels struct { + // Key: The key of the label. + Key string `json:"key,omitempty"` + + // Value: The value of the label. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Labels) MarshalJSON() ([]byte, error) { + type noMethod Labels + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LocationPreference: Preferred location. This specifies where a Cloud // SQL instance should preferably be located, either in a specific // Compute Engine zone, or co-located with an App Engine application. @@ -1841,6 +1877,9 @@ type Settings struct { // instances. AuthorizedGaeApplications []string `json:"authorizedGaeApplications,omitempty"` + // AvailabilityType: Reserved for future use. + AvailabilityType string `json:"availabilityType,omitempty"` + // BackupConfiguration: The daily backup configuration for the instance. BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` @@ -1875,6 +1914,9 @@ type Settings struct { // Kind: This is always sql#settings. Kind string `json:"kind,omitempty"` + // Labels: User defined labels. + Labels []*Labels `json:"labels,omitempty"` + // LocationPreference: The location preference settings. This allows the // instance to be located as near as possible to either an App Engine // app or GCE zone for better performance. App Engine co-location is @@ -1907,6 +1949,12 @@ type Settings struct { // Generation instances. StorageAutoResize bool `json:"storageAutoResize,omitempty"` + // StorageAutoResizeLimit: The maximum size to which storage capacity + // can be automatically increased. The default value is 0, which + // specifies that there is no limit. Applies only to Second Generation + // instances. + StorageAutoResizeLimit int64 `json:"storageAutoResizeLimit,omitempty,string"` + // Tier: The tier of service for this instance, for example D1, D2. For // more information, see pricing. Tier string `json:"tier,omitempty"` @@ -2419,6 +2467,7 @@ func (c *BackupRunsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/backupRuns/{id}") @@ -2575,6 +2624,7 @@ func (c *BackupRunsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2724,6 +2774,7 @@ func (c *BackupRunsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.backuprun) if err != nil { @@ -2894,6 +2945,7 @@ func (c *BackupRunsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3065,6 +3117,7 @@ func (c *DatabasesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/databases/{database}") @@ -3221,6 +3274,7 @@ func (c *DatabasesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3369,6 +3423,7 @@ func (c *DatabasesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.database) if err != nil { @@ -3522,6 +3577,7 @@ func (c *DatabasesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3664,6 +3720,7 @@ func (c *DatabasesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.database2) if err != nil { @@ -3819,6 +3876,7 @@ func (c *DatabasesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.database2) if err != nil { @@ -3936,6 +3994,14 @@ func (r *FlagsService) List() *FlagsListCall { return c } +// DatabaseVersion sets the optional parameter "databaseVersion": +// Database version for flag retrieval. Flags are specific to the +// database version. +func (c *FlagsListCall) DatabaseVersion(databaseVersion string) *FlagsListCall { + c.urlParams_.Set("databaseVersion", databaseVersion) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3977,6 +4043,7 @@ func (c *FlagsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4030,6 +4097,13 @@ func (c *FlagsListCall) Do(opts ...googleapi.CallOption) (*FlagsListResponse, er // "description": "List all available database flags for Google Cloud SQL instances.", // "httpMethod": "GET", // "id": "sql.flags.list", + // "parameters": { + // "databaseVersion": { + // "description": "Database version for flag retrieval. Flags are specific to the database version.", + // "location": "query", + // "type": "string" + // } + // }, // "path": "flags", // "response": { // "$ref": "FlagsListResponse" @@ -4095,6 +4169,7 @@ func (c *InstancesCloneCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesclonerequest) if err != nil { @@ -4237,6 +4312,7 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}") @@ -4374,6 +4450,7 @@ func (c *InstancesExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesexportrequest) if err != nil { @@ -4517,6 +4594,7 @@ func (c *InstancesFailoverCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesfailoverrequest) if err != nil { @@ -4671,6 +4749,7 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4811,6 +4890,7 @@ func (c *InstancesImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesimportrequest) if err != nil { @@ -4952,6 +5032,7 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) if err != nil { @@ -5055,6 +5136,13 @@ func (r *InstancesService) List(project string) *InstancesListCall { return c } +// Filter sets the optional parameter "filter": A filter expression for +// filtering listed instances. +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results to return per response. func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { @@ -5111,6 +5199,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5171,6 +5260,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstancesListResp // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression for filtering listed instances.", + // "location": "query", + // "type": "string" + // }, // "maxResults": { // "description": "The maximum number of results to return per response.", // "format": "uint32", @@ -5277,6 +5371,7 @@ func (c *InstancesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) if err != nil { @@ -5420,6 +5515,7 @@ func (c *InstancesPromoteReplicaCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/promoteReplica") @@ -5558,6 +5654,7 @@ func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/resetSslConfig") @@ -5692,6 +5789,7 @@ func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/restart") @@ -5828,6 +5926,7 @@ func (c *InstancesRestoreBackupCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesrestorebackuprequest) if err != nil { @@ -5970,6 +6069,7 @@ func (c *InstancesStartReplicaCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/startReplica") @@ -6104,6 +6204,7 @@ func (c *InstancesStopReplicaCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/stopReplica") @@ -6240,6 +6341,7 @@ func (c *InstancesTruncateLogCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancestruncatelogrequest) if err != nil { @@ -6386,6 +6488,7 @@ func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.databaseinstance) if err != nil { @@ -6541,6 +6644,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6705,6 +6809,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6878,6 +6983,7 @@ func (c *SslCertsCreateEphemeralCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertscreateephemeralrequest) if err != nil { @@ -7023,6 +7129,7 @@ func (c *SslCertsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}") @@ -7180,6 +7287,7 @@ func (c *SslCertsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7329,6 +7437,7 @@ func (c *SslCertsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertsinsertrequest) if err != nil { @@ -7482,6 +7591,7 @@ func (c *SslCertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7629,6 +7739,7 @@ func (c *TiersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7760,6 +7871,7 @@ func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/instances/{instance}/users") @@ -7910,6 +8022,7 @@ func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { @@ -8063,6 +8176,7 @@ func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8204,6 +8318,7 @@ func (c *UsersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) if err != nil { diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 67fe1a210..dacb5fc5d 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/sMgjc4eoIFjgub4daTU-MGW0WMA\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/HgbrZgh9zgUkvtwaM_qfO-xyD4k\"", "discoveryVersion": "v1", "id": "storage:v1", "name": "storage", "version": "v1", - "revision": "20161109", + "revision": "20170208", "title": "Cloud Storage JSON API", "description": "Stores and retrieves potentially large, immutable data objects.", "ownerDomain": "google.com", @@ -156,7 +156,7 @@ }, "id": { "type": "string", - "description": "The ID of the bucket." + "description": "The ID of the bucket. For buckets, the id and name properities are the same." }, "kind": { "type": "string", @@ -616,7 +616,7 @@ }, "id": { "type": "string", - "description": "The ID of the object." + "description": "The ID of the object, including the bucket name, object name, and generation number." }, "kind": { "type": "string", @@ -646,7 +646,7 @@ }, "name": { "type": "string", - "description": "The name of this object. Required if not specified by URL parameter." + "description": "The name of the object. Required if not specified by URL parameter." }, "owner": { "type": "object", diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 0f1d094a7..bdb425c65 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -78,9 +78,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BucketAccessControls *BucketAccessControlsService @@ -102,6 +103,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { rs := &BucketAccessControlsService{s: s} return rs @@ -172,7 +177,8 @@ type Bucket struct { // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` - // Id: The ID of the bucket. + // Id: The ID of the bucket. For buckets, the id and name properities + // are the same. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For buckets, this is always @@ -985,7 +991,8 @@ type Object struct { // versioning. Generation int64 `json:"generation,omitempty,string"` - // Id: The ID of the object. + // Id: The ID of the object, including the bucket name, object name, and + // generation number. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For objects, this is always @@ -1009,7 +1016,7 @@ type Object struct { // of a particular generation of a particular object. Metageneration int64 `json:"metageneration,omitempty,string"` - // Name: The name of this object. Required if not specified by URL + // Name: The name of the object. Required if not specified by URL // parameter. Name string `json:"name,omitempty"` @@ -1438,6 +1445,7 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") @@ -1556,6 +1564,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1693,6 +1702,7 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -1836,6 +1846,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1968,6 +1979,7 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -2112,6 +2124,7 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -2268,6 +2281,7 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") @@ -2417,6 +2431,7 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2625,6 +2640,7 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { @@ -2851,6 +2867,7 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3101,6 +3118,7 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { @@ -3370,6 +3388,7 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { @@ -3567,6 +3586,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -3664,6 +3684,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") @@ -3782,6 +3803,7 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3920,6 +3942,7 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4080,6 +4103,7 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4224,6 +4248,7 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4368,6 +4393,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4521,6 +4547,7 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") @@ -4663,6 +4690,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4824,6 +4852,7 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4991,6 +5020,7 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5147,6 +5177,7 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -5315,6 +5346,7 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -5511,6 +5543,7 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) if err != nil { @@ -5823,6 +5856,7 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { @@ -6131,6 +6165,7 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") @@ -6331,6 +6366,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6673,6 +6709,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { @@ -7005,6 +7042,7 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7274,6 +7312,7 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { @@ -7623,6 +7662,7 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { @@ -7955,6 +7995,7 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { @@ -8233,6 +8274,7 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { diff --git a/vendor/google.golang.org/api/storage/v1beta1/storage-gen.go b/vendor/google.golang.org/api/storage/v1beta1/storage-gen.go index d5467f946..795086566 100644 --- a/vendor/google.golang.org/api/storage/v1beta1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1beta1/storage-gen.go @@ -70,9 +70,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BucketAccessControls *BucketAccessControlsService @@ -90,6 +91,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { rs := &BucketAccessControlsService{s: s} return rs @@ -765,6 +770,7 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") @@ -882,6 +888,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1018,6 +1025,7 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -1160,6 +1168,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1291,6 +1300,7 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -1434,6 +1444,7 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -1573,6 +1584,7 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") @@ -1691,6 +1703,7 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1845,6 +1858,7 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { @@ -2014,6 +2028,7 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2199,6 +2214,7 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { @@ -2357,6 +2373,7 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { @@ -2507,6 +2524,7 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") @@ -2634,6 +2652,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2780,6 +2799,7 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -2932,6 +2952,7 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3073,6 +3094,7 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -3226,6 +3248,7 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -3375,6 +3398,7 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") @@ -3503,6 +3527,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3744,6 +3769,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { @@ -4015,6 +4041,7 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4216,6 +4243,7 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { @@ -4384,6 +4412,7 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { diff --git a/vendor/google.golang.org/api/storage/v1beta2/storage-gen.go b/vendor/google.golang.org/api/storage/v1beta2/storage-gen.go index a4a574bd3..b7be395dc 100644 --- a/vendor/google.golang.org/api/storage/v1beta2/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1beta2/storage-gen.go @@ -72,9 +72,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only BucketAccessControls *BucketAccessControlsService @@ -96,6 +97,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { rs := &BucketAccessControlsService{s: s} return rs @@ -1223,6 +1228,7 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") @@ -1340,6 +1346,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1476,6 +1483,7 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -1618,6 +1626,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1749,6 +1758,7 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -1892,6 +1902,7 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { @@ -2049,6 +2060,7 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") @@ -2197,6 +2209,7 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2364,6 +2377,7 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { @@ -2542,6 +2556,7 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2744,6 +2759,7 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { @@ -2932,6 +2948,7 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { @@ -3089,6 +3106,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -3184,6 +3202,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") @@ -3301,6 +3320,7 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3438,6 +3458,7 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -3597,6 +3618,7 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3740,6 +3762,7 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -3883,6 +3906,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4035,6 +4059,7 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") @@ -4176,6 +4201,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4336,6 +4362,7 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4502,6 +4529,7 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4657,6 +4685,7 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4824,6 +4853,7 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { @@ -4998,6 +5028,7 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) if err != nil { @@ -5266,6 +5297,7 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { @@ -5551,6 +5583,7 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") @@ -5750,6 +5783,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6057,6 +6091,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { @@ -6359,6 +6394,7 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6606,6 +6642,7 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { @@ -6845,6 +6882,7 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { @@ -7099,6 +7137,7 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { diff --git a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go index f0731b699..836fbf7f2 100644 --- a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go +++ b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go @@ -64,9 +64,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only GoogleServiceAccounts *GoogleServiceAccountsService @@ -84,6 +85,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGoogleServiceAccountsService(s *Service) *GoogleServiceAccountsService { rs := &GoogleServiceAccountsService{s: s} return rs @@ -1236,6 +1241,7 @@ func (c *GoogleServiceAccountsGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1362,6 +1368,7 @@ func (c *TransferJobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.transferjob) if err != nil { @@ -1497,6 +1504,7 @@ func (c *TransferJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1666,6 +1674,7 @@ func (c *TransferJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1821,6 +1830,7 @@ func (c *TransferJobsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatetransferjobrequest) if err != nil { @@ -1955,6 +1965,7 @@ func (c *TransferOperationsCancelCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") @@ -2080,6 +2091,7 @@ func (c *TransferOperationsDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") @@ -2217,6 +2229,7 @@ func (c *TransferOperationsGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2380,6 +2393,7 @@ func (c *TransferOperationsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2546,6 +2560,7 @@ func (c *TransferOperationsPauseCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pausetransferoperationrequest) if err != nil { @@ -2680,6 +2695,7 @@ func (c *TransferOperationsResumeCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.resumetransferoperationrequest) if err != nil { @@ -2836,6 +2852,7 @@ func (c *V1GetGoogleServiceAccountCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/surveys/v2/surveys-gen.go b/vendor/google.golang.org/api/surveys/v2/surveys-gen.go index 0a51abce4..1cf92b448 100644 --- a/vendor/google.golang.org/api/surveys/v2/surveys-gen.go +++ b/vendor/google.golang.org/api/surveys/v2/surveys-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Mobileapppanels *MobileapppanelsService @@ -85,6 +86,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewMobileapppanelsService(s *Service) *MobileapppanelsService { rs := &MobileapppanelsService{s: s} return rs @@ -988,6 +993,7 @@ func (c *MobileapppanelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1143,6 +1149,7 @@ func (c *MobileapppanelsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1276,6 +1283,7 @@ func (c *MobileapppanelsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.mobileapppanel) if err != nil { @@ -1423,6 +1431,7 @@ func (c *ResultsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1571,6 +1580,7 @@ func (c *SurveysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "surveys/{surveyUrlId}") @@ -1706,6 +1716,7 @@ func (c *SurveysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1834,6 +1845,7 @@ func (c *SurveysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.survey) if err != nil { @@ -1979,6 +1991,7 @@ func (c *SurveysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2111,6 +2124,7 @@ func (c *SurveysStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.surveysstartrequest) if err != nil { @@ -2242,6 +2256,7 @@ func (c *SurveysStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "surveys/{resourceId}/stop") @@ -2368,6 +2383,7 @@ func (c *SurveysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.survey) if err != nil { diff --git a/vendor/google.golang.org/api/tagmanager/v1/tagmanager-gen.go b/vendor/google.golang.org/api/tagmanager/v1/tagmanager-gen.go index 2eb596029..cd36ee939 100644 --- a/vendor/google.golang.org/api/tagmanager/v1/tagmanager-gen.go +++ b/vendor/google.golang.org/api/tagmanager/v1/tagmanager-gen.go @@ -81,9 +81,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Accounts *AccountsService } @@ -95,6 +96,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewAccountsService(s *Service) *AccountsService { rs := &AccountsService{s: s} rs.Containers = NewAccountsContainersService(s) @@ -1893,6 +1898,7 @@ func (c *AccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2030,6 +2036,7 @@ func (c *AccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2154,6 +2161,7 @@ func (c *AccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.account) if err != nil { @@ -2292,6 +2300,7 @@ func (c *AccountsContainersCreateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.container) if err != nil { @@ -2425,6 +2434,7 @@ func (c *AccountsContainersDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}") @@ -2541,6 +2551,7 @@ func (c *AccountsContainersGetCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2687,6 +2698,7 @@ func (c *AccountsContainersListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2826,6 +2838,7 @@ func (c *AccountsContainersUpdateCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.container) if err != nil { @@ -2974,6 +2987,7 @@ func (c *AccountsContainersEnvironmentsCreateCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.environment) if err != nil { @@ -3117,6 +3131,7 @@ func (c *AccountsContainersEnvironmentsDeleteCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/environments/{environmentId}") @@ -3243,6 +3258,7 @@ func (c *AccountsContainersEnvironmentsGetCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3399,6 +3415,7 @@ func (c *AccountsContainersEnvironmentsListCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3549,6 +3566,7 @@ func (c *AccountsContainersEnvironmentsPatchCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.environment) if err != nil { @@ -3715,6 +3733,7 @@ func (c *AccountsContainersEnvironmentsUpdateCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.environment) if err != nil { @@ -3871,6 +3890,7 @@ func (c *AccountsContainersFoldersCreateCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) if err != nil { @@ -4014,6 +4034,7 @@ func (c *AccountsContainersFoldersDeleteCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/folders/{folderId}") @@ -4140,6 +4161,7 @@ func (c *AccountsContainersFoldersGetCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4296,6 +4318,7 @@ func (c *AccountsContainersFoldersListCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4444,6 +4467,7 @@ func (c *AccountsContainersFoldersUpdateCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) if err != nil { @@ -4611,6 +4635,7 @@ func (c *AccountsContainersFoldersEntitiesListCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4781,6 +4806,7 @@ func (c *AccountsContainersMoveFoldersUpdateCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) if err != nil { @@ -4924,6 +4950,7 @@ func (c *AccountsContainersReauthorizeEnvironmentsUpdateCall) doRequest(alt stri reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.environment) if err != nil { @@ -5075,6 +5102,7 @@ func (c *AccountsContainersTagsCreateCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tag) if err != nil { @@ -5218,6 +5246,7 @@ func (c *AccountsContainersTagsDeleteCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/tags/{tagId}") @@ -5344,6 +5373,7 @@ func (c *AccountsContainersTagsGetCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5500,6 +5530,7 @@ func (c *AccountsContainersTagsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5648,6 +5679,7 @@ func (c *AccountsContainersTagsUpdateCall) doRequest(alt string) (*http.Response reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tag) if err != nil { @@ -5804,6 +5836,7 @@ func (c *AccountsContainersTriggersCreateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.trigger) if err != nil { @@ -5947,6 +5980,7 @@ func (c *AccountsContainersTriggersDeleteCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/triggers/{triggerId}") @@ -6073,6 +6107,7 @@ func (c *AccountsContainersTriggersGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6229,6 +6264,7 @@ func (c *AccountsContainersTriggersListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6378,6 +6414,7 @@ func (c *AccountsContainersTriggersUpdateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.trigger) if err != nil { @@ -6534,6 +6571,7 @@ func (c *AccountsContainersVariablesCreateCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variable) if err != nil { @@ -6677,6 +6715,7 @@ func (c *AccountsContainersVariablesDeleteCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/variables/{variableId}") @@ -6803,6 +6842,7 @@ func (c *AccountsContainersVariablesGetCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6959,6 +6999,7 @@ func (c *AccountsContainersVariablesListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7108,6 +7149,7 @@ func (c *AccountsContainersVariablesUpdateCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.variable) if err != nil { @@ -7264,6 +7306,7 @@ func (c *AccountsContainersVersionsCreateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createcontainerversionrequestversionoptions) if err != nil { @@ -7407,6 +7450,7 @@ func (c *AccountsContainersVersionsDeleteCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/versions/{containerVersionId}") @@ -7533,6 +7577,7 @@ func (c *AccountsContainersVersionsGetCall) doRequest(alt string) (*http.Respons reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7704,6 +7749,7 @@ func (c *AccountsContainersVersionsListCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7864,6 +7910,7 @@ func (c *AccountsContainersVersionsPublishCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/versions/{containerVersionId}/publish") @@ -8015,6 +8062,7 @@ func (c *AccountsContainersVersionsRestoreCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/versions/{containerVersionId}/restore") @@ -8158,6 +8206,7 @@ func (c *AccountsContainersVersionsUndeleteCall) doRequest(alt string) (*http.Re reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/containers/{containerId}/versions/{containerVersionId}/undelete") @@ -8311,6 +8360,7 @@ func (c *AccountsContainersVersionsUpdateCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.containerversion) if err != nil { @@ -8465,6 +8515,7 @@ func (c *AccountsPermissionsCreateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.useraccess) if err != nil { @@ -8599,6 +8650,7 @@ func (c *AccountsPermissionsDeleteCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "accounts/{accountId}/permissions/{permissionId}") @@ -8715,6 +8767,7 @@ func (c *AccountsPermissionsGetCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8861,6 +8914,7 @@ func (c *AccountsPermissionsListCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8991,6 +9045,7 @@ func (c *AccountsPermissionsUpdateCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.useraccess) if err != nil { diff --git a/vendor/google.golang.org/api/taskqueue/v1beta1/taskqueue-gen.go b/vendor/google.golang.org/api/taskqueue/v1beta1/taskqueue-gen.go index ee42a0319..403465a18 100644 --- a/vendor/google.golang.org/api/taskqueue/v1beta1/taskqueue-gen.go +++ b/vendor/google.golang.org/api/taskqueue/v1beta1/taskqueue-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Taskqueues *TaskqueuesService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewTaskqueuesService(s *Service) *TaskqueuesService { rs := &TaskqueuesService{s: s} return rs @@ -410,6 +415,7 @@ func (c *TaskqueuesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -554,6 +560,7 @@ func (c *TasksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/taskqueues/{taskqueue}/tasks/{task}") @@ -681,6 +688,7 @@ func (c *TasksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -828,6 +836,7 @@ func (c *TasksLeaseCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/taskqueues/{taskqueue}/tasks/lease") @@ -989,6 +998,7 @@ func (c *TasksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/taskqueue/v1beta2/taskqueue-gen.go b/vendor/google.golang.org/api/taskqueue/v1beta2/taskqueue-gen.go index bae1862da..699388f14 100644 --- a/vendor/google.golang.org/api/taskqueue/v1beta2/taskqueue-gen.go +++ b/vendor/google.golang.org/api/taskqueue/v1beta2/taskqueue-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Taskqueues *TaskqueuesService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewTaskqueuesService(s *Service) *TaskqueuesService { rs := &TaskqueuesService{s: s} return rs @@ -417,6 +422,7 @@ func (c *TaskqueuesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -561,6 +567,7 @@ func (c *TasksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/taskqueues/{taskqueue}/tasks/{task}") @@ -688,6 +695,7 @@ func (c *TasksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -835,6 +843,7 @@ func (c *TasksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.task) if err != nil { @@ -995,6 +1004,7 @@ func (c *TasksLeaseCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/taskqueues/{taskqueue}/tasks/lease") @@ -1166,6 +1176,7 @@ func (c *TasksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1309,6 +1320,7 @@ func (c *TasksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.task2) if err != nil { @@ -1470,6 +1482,7 @@ func (c *TasksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.task2) if err != nil { diff --git a/vendor/google.golang.org/api/tasks/v1/tasks-gen.go b/vendor/google.golang.org/api/tasks/v1/tasks-gen.go index d48f0d8c8..2c5179fe5 100644 --- a/vendor/google.golang.org/api/tasks/v1/tasks-gen.go +++ b/vendor/google.golang.org/api/tasks/v1/tasks-gen.go @@ -65,9 +65,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Tasklists *TasklistsService @@ -81,6 +82,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewTasklistsService(s *Service) *TasklistsService { rs := &TasklistsService{s: s} return rs @@ -398,6 +403,7 @@ func (c *TasklistsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "users/@me/lists/{tasklist}") @@ -504,6 +510,7 @@ func (c *TasklistsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -632,6 +639,7 @@ func (c *TasklistsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tasklist) if err != nil { @@ -772,6 +780,7 @@ func (c *TasklistsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -922,6 +931,7 @@ func (c *TasklistsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tasklist) if err != nil { @@ -1055,6 +1065,7 @@ func (c *TasklistsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tasklist) if err != nil { @@ -1188,6 +1199,7 @@ func (c *TasksClearCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "lists/{tasklist}/clear") @@ -1285,6 +1297,7 @@ func (c *TasksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "lists/{tasklist}/tasks/{task}") @@ -1401,6 +1414,7 @@ func (c *TasksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1553,6 +1567,7 @@ func (c *TasksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.task) if err != nil { @@ -1783,6 +1798,7 @@ func (c *TasksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2002,6 +2018,7 @@ func (c *TasksMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "lists/{tasklist}/tasks/{task}/move") @@ -2148,6 +2165,7 @@ func (c *TasksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.task) if err != nil { @@ -2291,6 +2309,7 @@ func (c *TasksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.task) if err != nil { diff --git a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json index 7eaa5a304..15e26390f 100644 --- a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json +++ b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/NLNRi_fS4FqWh_1V7egeBkDSD6g\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/VM1w9YmZ1mtUIDQpfA2CebEbL94\"", "discoveryVersion": "v1", "id": "toolresults:v1beta3", "name": "toolresults", "canonicalName": "Tool Results", "version": "v1beta3", - "revision": "20170124", + "revision": "20170222", "title": "Cloud Tool Results API", "description": "Reads and publishes results from Cloud Test Lab.", "ownerDomain": "google.com", @@ -1490,7 +1490,10 @@ ], "response": { "$ref": "PerfMetricsSummary" - } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] }, "list": { "id": "toolresults.projects.histories.executions.steps.list", diff --git a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go index 7b4f0e6ab..60ce17014 100644 --- a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go +++ b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Projects *ProjectsService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Histories = NewProjectsHistoriesService(s) @@ -2540,6 +2545,7 @@ func (c *ProjectsGetSettingsCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2692,6 +2698,7 @@ func (c *ProjectsInitializeSettingsCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}:initializeSettings") @@ -2835,6 +2842,7 @@ func (c *ProjectsHistoriesCreateCall) doRequest(alt string) (*http.Response, err reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.history) if err != nil { @@ -2990,6 +2998,7 @@ func (c *ProjectsHistoriesGetCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3170,6 +3179,7 @@ func (c *ProjectsHistoriesListCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3355,6 +3365,7 @@ func (c *ProjectsHistoriesExecutionsCreateCall) doRequest(alt string) (*http.Res reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.execution) if err != nil { @@ -3520,6 +3531,7 @@ func (c *ProjectsHistoriesExecutionsGetCall) doRequest(alt string) (*http.Respon reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3702,6 +3714,7 @@ func (c *ProjectsHistoriesExecutionsListCall) doRequest(alt string) (*http.Respo reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3892,6 +3905,7 @@ func (c *ProjectsHistoriesExecutionsPatchCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.execution) if err != nil { @@ -4069,6 +4083,7 @@ func (c *ProjectsHistoriesExecutionsStepsCreateCall) doRequest(alt string) (*htt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.step) if err != nil { @@ -4244,6 +4259,7 @@ func (c *ProjectsHistoriesExecutionsStepsGetCall) doRequest(alt string) (*http.R reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4414,6 +4430,7 @@ func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) doRequest(al reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4508,7 +4525,10 @@ func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) Do(opts ...g // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary", // "response": { // "$ref": "PerfMetricsSummary" - // } + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] // } } @@ -4605,6 +4625,7 @@ func (c *ProjectsHistoriesExecutionsStepsListCall) doRequest(alt string) (*http. reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4807,6 +4828,7 @@ func (c *ProjectsHistoriesExecutionsStepsPatchCall) doRequest(alt string) (*http reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.step) if err != nil { @@ -4983,6 +5005,7 @@ func (c *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall) doRequest(alt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishxunitxmlfilesrequest) if err != nil { @@ -5150,6 +5173,7 @@ func (c *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall) doRequest reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.perfmetricssummary) if err != nil { @@ -5317,6 +5341,7 @@ func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall) doRequest(a reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.perfsampleseries) if err != nil { @@ -5494,6 +5519,7 @@ func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall) doRequest(alt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5689,6 +5715,7 @@ func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) doRequest(alt reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5876,6 +5903,7 @@ func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchcreateperfsamplesrequest) if err != nil { @@ -6083,6 +6111,7 @@ func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) doRequ reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6313,6 +6342,7 @@ func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) doRequest(alt strin reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/toolresults/v1beta3firstparty/toolresults-api.json b/vendor/google.golang.org/api/toolresults/v1beta3firstparty/toolresults-api.json new file mode 100644 index 000000000..8ba6c8d85 --- /dev/null +++ b/vendor/google.golang.org/api/toolresults/v1beta3firstparty/toolresults-api.json @@ -0,0 +1,1977 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/SM82fIVoITJUqTD6qY99EIKCqqw\"", + "discoveryVersion": "v1", + "id": "toolresults:v1beta3firstparty", + "name": "toolresults", + "canonicalName": "Tool Results", + "version": "v1beta3firstparty", + "revision": "20170222", + "title": "Cloud Tool Results firstparty API", + "description": "Reads and publishes results from Cloud Test Lab.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/cloud-test-lab/", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/toolresults/v1beta3firstparty/projects/", + "basePath": "/toolresults/v1beta3firstparty/projects/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "toolresults/v1beta3firstparty/projects/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "schemas": { + "Any": { + "id": "Any", + "type": "object", + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a URL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form of utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\nFoo foo = ...; Any any; any.PackFrom(foo); ... if (any.UnpackTo(&foo)) { ... }\n\nExample 2: Pack and unpack a message in Java.\n\nFoo foo = ...; Any any = Any.pack(foo); ... if (any.is(Foo.class)) { foo = any.unpack(Foo.class); }\n\nExample 3: Pack and unpack a message in Python.\n\nfoo = Foo(...) any = Any() any.Pack(foo) ... if any.Is(Foo.DESCRIPTOR): any.Unpack(foo) ...\n\nThe pack methods provided by protobuf library will by default use 'type.googleapis.com/full.type.name' as the type URL and the unpack methods only use the fully qualified type name after the last '/' in the type URL, for example \"foo.bar.com/x/y.z\" will yield type name \"y.z\".\n\n\n\nJSON ==== The JSON representation of an `Any` value uses the regular representation of the deserialized, embedded message, with an additional field `@type` which contains the type URL. Example:\n\npackage google.profile; message Person { string first_name = 1; string last_name = 2; }\n\n{ \"@type\": \"type.googleapis.com/google.profile.Person\", \"firstName\": , \"lastName\": }\n\nIf the embedded message type is well-known and has a custom JSON representation, that representation will be embedded adding a field `value` which holds the custom JSON in addition to the `@type` field. Example (for message [google.protobuf.Duration][]):\n\n{ \"@type\": \"type.googleapis.com/google.protobuf.Duration\", \"value\": \"1.212s\" }", + "properties": { + "typeUrl": { + "type": "string", + "description": "A URL/resource name whose content describes the type of the serialized protocol buffer message.\n\nFor URLs which use the scheme `http`, `https`, or no scheme, the following restrictions and interpretations apply:\n\n* If no scheme is provided, `https` is assumed. * The last segment of the URL's path must represent the fully qualified name of the type (as in `path/google.protobuf.Duration`). The name should be in a canonical form (e.g., leading \".\" is not accepted). * An HTTP GET on the URL must yield a [google.protobuf.Type][] value in binary format, or produce an error. * Applications are allowed to cache lookup results based on the URL, or have them precompiled into a binary to avoid any lookup. Therefore, binary compatibility needs to be preserved on changes to types. (Use versioned type names to manage breaking changes.)\n\nSchemes other than `http`, `https` (or the empty scheme) might be used with implementation specific semantics." + }, + "value": { + "type": "string", + "description": "Must be a valid serialized protocol buffer of the above specified type.", + "format": "byte" + } + } + }, + "BasicPerfSampleSeries": { + "id": "BasicPerfSampleSeries", + "type": "object", + "description": "Encapsulates the metadata for basic sample series represented by a line chart", + "properties": { + "perfMetricType": { + "type": "string", + "enum": [ + "cpu", + "memory", + "network", + "perfMetricTypeUnspecified" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "perfUnit": { + "type": "string", + "enum": [ + "kibibyte", + "percent", + "perfUnitUnspecified" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "sampleSeriesLabel": { + "type": "string", + "enum": [ + "cpuKernel", + "cpuTotal", + "cpuUser", + "memoryRssPrivate", + "memoryRssShared", + "memoryRssTotal", + "ntBytesReceived", + "ntBytesTransferred", + "sampleSeriesTypeUnspecified" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + } + } + }, + "BatchCreatePerfSamplesRequest": { + "id": "BatchCreatePerfSamplesRequest", + "type": "object", + "description": "The request must provide up to a maximum of 5000 samples to be created; a larger sample size will cause an INVALID_ARGUMENT error", + "properties": { + "perfSamples": { + "type": "array", + "description": "The set of PerfSamples to create should not include existing timestamps", + "items": { + "$ref": "PerfSample" + } + } + } + }, + "BatchCreatePerfSamplesResponse": { + "id": "BatchCreatePerfSamplesResponse", + "type": "object", + "properties": { + "perfSamples": { + "type": "array", + "items": { + "$ref": "PerfSample" + } + } + } + }, + "CPUInfo": { + "id": "CPUInfo", + "type": "object", + "properties": { + "cpuProcessor": { + "type": "string", + "description": "description of the device processor ie '1.8 GHz hexa core 64-bit ARMv8-A'" + }, + "cpuSpeedInGhz": { + "type": "number", + "description": "the CPU clock speed in GHz", + "format": "float" + }, + "numberOfCores": { + "type": "integer", + "description": "the number of CPU cores", + "format": "int32" + } + } + }, + "Duration": { + "id": "Duration", + "type": "object", + "description": "A Duration represents a signed, fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like \"day\" or \"month\". It is related to Timestamp in that the difference between two Timestamp values is a Duration and it can be added or subtracted from a Timestamp. Range is approximately +-10,000 years.\n\n# Examples\n\nExample 1: Compute Duration from two Timestamps in pseudo code.\n\nTimestamp start = ...; Timestamp end = ...; Duration duration = ...;\n\nduration.seconds = end.seconds - start.seconds; duration.nanos = end.nanos - start.nanos;\n\nif (duration.seconds 0) { duration.seconds += 1; duration.nanos -= 1000000000; } else if (durations.seconds \u003e 0 && duration.nanos \u003c 0) { duration.seconds -= 1; duration.nanos += 1000000000; }\n\nExample 2: Compute Timestamp from Timestamp + Duration in pseudo code.\n\nTimestamp start = ...; Duration duration = ...; Timestamp end = ...;\n\nend.seconds = start.seconds + duration.seconds; end.nanos = start.nanos + duration.nanos;\n\nif (end.nanos = 1000000000) { end.seconds += 1; end.nanos -= 1000000000; }\n\nExample 3: Compute Duration from datetime.timedelta in Python.\n\ntd = datetime.timedelta(days=3, minutes=10) duration = Duration() duration.FromTimedelta(td)\n\n# JSON Mapping\n\nIn JSON format, the Duration type is encoded as a string rather than an object, where the string ends in the suffix \"s\" (indicating seconds) and is preceded by the number of seconds, with nanoseconds expressed as fractional seconds. For example, 3 seconds with 0 nanoseconds should be encoded in JSON format as \"3s\", while 3 seconds and 1 nanosecond should be expressed in JSON format as \"3.000000001s\", and 3 seconds and 1 microsecond should be expressed in JSON format as \"3.000001s\".", + "properties": { + "nanos": { + "type": "integer", + "description": "Signed fractions of a second at nanosecond resolution of the span of time. Durations less than one second are represented with a 0 `seconds` field and a positive or negative `nanos` field. For durations of one second or more, a non-zero value for the `nanos` field must be of the same sign as the `seconds` field. Must be from -999,999,999 to +999,999,999 inclusive.", + "format": "int32" + }, + "seconds": { + "type": "string", + "description": "Signed seconds of the span of time. Must be from -315,576,000,000 to +315,576,000,000 inclusive.", + "format": "int64" + } + } + }, + "Execution": { + "id": "Execution", + "type": "object", + "description": "An Execution represents a collection of Steps. For instance, it could represent: - a mobile test executed across a range of device configurations - a jenkins job with a build step followed by a test step\n\nThe maximum size of an execution message is 1 MiB.\n\nAn Execution can be updated until its state is set to COMPLETE at which point it becomes immutable.", + "properties": { + "completionTime": { + "$ref": "Timestamp", + "description": "The time when the Execution status transitioned to COMPLETE.\n\nThis value will be set automatically when state transitions to COMPLETE.\n\n- In response: set if the execution state is COMPLETE. - In create/update request: never set" + }, + "creationTime": { + "$ref": "Timestamp", + "description": "The time when the Execution was created.\n\nThis value will be set automatically when CreateExecution is called.\n\n- In response: always set - In create/update request: never set" + }, + "executionId": { + "type": "string", + "description": "A unique identifier within a History for this Execution.\n\nReturns INVALID_ARGUMENT if this field is set or overwritten by the caller.\n\n- In response always set - In create/update request: never set" + }, + "outcome": { + "$ref": "Outcome", + "description": "Classify the result, for example into SUCCESS or FAILURE\n\n- In response: present if set by create/update request - In create/update request: optional" + }, + "state": { + "type": "string", + "description": "The initial state is IN_PROGRESS.\n\nThe only legal state transitions is from IN_PROGRESS to COMPLETE.\n\nA PRECONDITION_FAILED will be returned if an invalid transition is requested.\n\nThe state can only be set to COMPLETE once. A FAILED_PRECONDITION will be returned if the state is set to COMPLETE multiple times.\n\nIf the state is set to COMPLETE, all the in-progress steps within the execution will be set as COMPLETE. If the outcome of the step is not set, the outcome will be set to INCONCLUSIVE.\n\n- In response always set - In create/update request: optional", + "enum": [ + "complete", + "inProgress", + "pending", + "unknownState" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "testExecutionMatrixId": { + "type": "string", + "description": "TestExecution Matrix ID that the Test Service uses.\n\n- In response: present if set by create - In create: optional - In update: never set" + } + } + }, + "FailureDetail": { + "id": "FailureDetail", + "type": "object", + "properties": { + "crashed": { + "type": "boolean", + "description": "If the failure was severe because the system under test crashed." + }, + "notInstalled": { + "type": "boolean", + "description": "If an app is not installed and thus no test can be run with the app. This might be caused by trying to run a test on an unsupported platform." + }, + "otherNativeCrash": { + "type": "boolean", + "description": "If a native process other than the app crashed." + }, + "timedOut": { + "type": "boolean", + "description": "If the test overran some time limit, and that is why it failed." + }, + "unableToCrawl": { + "type": "boolean", + "description": "If the robo was unable to crawl the app; perhaps because the app did not start." + } + } + }, + "FileReference": { + "id": "FileReference", + "type": "object", + "description": "A reference to a file.", + "properties": { + "fileUri": { + "type": "string", + "description": "The URI of a file stored in Google Cloud Storage.\n\nFor example: http://storage.googleapis.com/mybucket/path/to/test.xml or in gsutil format: gs://mybucket/path/to/test.xml with version-specific info, gs://mybucket/path/to/test.xml#1360383693690000\n\nAn INVALID_ARGUMENT error will be returned if the URI format is not supported.\n\n- In response: always set - In create/update request: always set" + } + } + }, + "History": { + "id": "History", + "type": "object", + "description": "A History represents a sorted list of Executions ordered by the start_timestamp_millis field (descending). It can be used to group all the Executions of a continuous build.\n\nNote that the ordering only operates on one-dimension. If a repository has multiple branches, it means that multiple histories will need to be used in order to order Executions per branch.", + "properties": { + "displayName": { + "type": "string", + "description": "A short human-readable (plain text) name to display in the UI. Maximum of 100 characters.\n\n- In response: present if set during create. - In create request: optional" + }, + "historyId": { + "type": "string", + "description": "A unique identifier within a project for this History.\n\nReturns INVALID_ARGUMENT if this field is set or overwritten by the caller.\n\n- In response always set - In create request: never set" + }, + "name": { + "type": "string", + "description": "A name to uniquely identify a history within a project. Maximum of 100 characters.\n\n- In response always set - In create request: always set" + } + } + }, + "Image": { + "id": "Image", + "type": "object", + "description": "An image, with a link to the main image and a thumbnail.", + "properties": { + "error": { + "$ref": "Status", + "description": "An error explaining why the thumbnail could not be rendered." + }, + "sourceImage": { + "$ref": "ToolOutputReference", + "description": "A reference to the full-size, original image.\n\nThis is the same as the tool_outputs entry for the image under its Step.\n\nAlways set." + }, + "stepId": { + "type": "string", + "description": "The step to which the image is attached.\n\nAlways set." + }, + "thumbnail": { + "$ref": "Thumbnail", + "description": "The thumbnail." + } + } + }, + "InconclusiveDetail": { + "id": "InconclusiveDetail", + "type": "object", + "properties": { + "abortedByUser": { + "type": "boolean", + "description": "If the end user aborted the test execution before a pass or fail could be determined. For example, the user pressed ctrl-c which sent a kill signal to the test runner while the test was running." + }, + "infrastructureFailure": { + "type": "boolean", + "description": "If the test runner could not determine success or failure because the test depends on a component other than the system under test which failed.\n\nFor example, a mobile test requires provisioning a device where the test executes, and that provisioning can fail." + } + } + }, + "ListExecutionsResponse": { + "id": "ListExecutionsResponse", + "type": "object", + "properties": { + "executions": { + "type": "array", + "description": "Executions.\n\nAlways set.", + "items": { + "$ref": "Execution" + } + }, + "nextPageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nWill only be set if there are more Executions to fetch." + } + } + }, + "ListHistoriesResponse": { + "id": "ListHistoriesResponse", + "type": "object", + "description": "Response message for HistoryService.List", + "properties": { + "histories": { + "type": "array", + "description": "Histories.", + "items": { + "$ref": "History" + } + }, + "nextPageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nWill only be set if there are more histories to fetch.\n\nTokens are valid for up to one hour from the time of the first list request. For instance, if you make a list request at 1PM and use the token from this first request 10 minutes later, the token from this second response will only be valid for 50 minutes." + } + } + }, + "ListPerfSampleSeriesResponse": { + "id": "ListPerfSampleSeriesResponse", + "type": "object", + "properties": { + "perfSampleSeries": { + "type": "array", + "description": "The resulting PerfSampleSeries sorted by id", + "items": { + "$ref": "PerfSampleSeries" + } + } + } + }, + "ListPerfSamplesResponse": { + "id": "ListPerfSamplesResponse", + "type": "object", + "properties": { + "nextPageToken": { + "type": "string", + "description": "Optional, returned if result size exceeds the page size specified in the request (or the default page size, 500, if unspecified). It indicates the last sample timestamp to be used as page_token in subsequent request" + }, + "perfSamples": { + "type": "array", + "items": { + "$ref": "PerfSample" + } + } + } + }, + "ListStepThumbnailsResponse": { + "id": "ListStepThumbnailsResponse", + "type": "object", + "description": "A response containing the thumbnails in a step.", + "properties": { + "nextPageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nIf set, indicates that there are more thumbnails to read, by calling list again with this value in the page_token field." + }, + "thumbnails": { + "type": "array", + "description": "A list of image data.\n\nImages are returned in a deterministic order; they are ordered by these factors, in order of importance: * First, by their associated test case. Images without a test case are considered greater than images with one. * Second, by their creation time. Images without a creation time are greater than images with one. * Third, by the order in which they were added to the step (by calls to CreateStep or UpdateStep).", + "items": { + "$ref": "Image" + } + } + } + }, + "ListStepsResponse": { + "id": "ListStepsResponse", + "type": "object", + "description": "Response message for StepService.List.", + "properties": { + "nextPageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nIf set, indicates that there are more steps to read, by calling list again with this value in the page_token field." + }, + "steps": { + "type": "array", + "description": "Steps.", + "items": { + "$ref": "Step" + } + } + } + }, + "MemoryInfo": { + "id": "MemoryInfo", + "type": "object", + "properties": { + "memoryCapInKibibyte": { + "type": "string", + "description": "Maximum memory that can be allocated to the process in KiB", + "format": "int64" + }, + "memoryTotalInKibibyte": { + "type": "string", + "description": "Total memory available on the device in KiB", + "format": "int64" + } + } + }, + "Outcome": { + "id": "Outcome", + "type": "object", + "description": "Interprets a result so that humans and machines can act on it.", + "properties": { + "failureDetail": { + "$ref": "FailureDetail", + "description": "More information about a FAILURE outcome.\n\nReturns INVALID_ARGUMENT if this field is set but the summary is not FAILURE.\n\nOptional" + }, + "inconclusiveDetail": { + "$ref": "InconclusiveDetail", + "description": "More information about an INCONCLUSIVE outcome.\n\nReturns INVALID_ARGUMENT if this field is set but the summary is not INCONCLUSIVE.\n\nOptional" + }, + "skippedDetail": { + "$ref": "SkippedDetail", + "description": "More information about a SKIPPED outcome.\n\nReturns INVALID_ARGUMENT if this field is set but the summary is not SKIPPED.\n\nOptional" + }, + "successDetail": { + "$ref": "SuccessDetail", + "description": "More information about a SUCCESS outcome.\n\nReturns INVALID_ARGUMENT if this field is set but the summary is not SUCCESS.\n\nOptional" + }, + "summary": { + "type": "string", + "description": "The simplest way to interpret a result.\n\nRequired", + "enum": [ + "failure", + "inconclusive", + "skipped", + "success", + "unset" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + } + } + }, + "PerfEnvironment": { + "id": "PerfEnvironment", + "type": "object", + "description": "Encapsulates performance environment info", + "properties": { + "cpuInfo": { + "$ref": "CPUInfo", + "description": "CPU related environment info" + }, + "memoryInfo": { + "$ref": "MemoryInfo", + "description": "Memory related environment info" + } + } + }, + "PerfMetricsSummary": { + "id": "PerfMetricsSummary", + "type": "object", + "description": "A summary of perf metrics collected and performance environment info", + "properties": { + "executionId": { + "type": "string", + "description": "A tool results execution ID." + }, + "historyId": { + "type": "string", + "description": "A tool results history ID." + }, + "perfEnvironment": { + "$ref": "PerfEnvironment", + "description": "Describes the environment in which the performance metrics were collected" + }, + "perfMetrics": { + "type": "array", + "description": "Set of resource collected", + "items": { + "type": "string", + "enum": [ + "cpu", + "memory", + "network", + "perfMetricTypeUnspecified" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + } + }, + "projectId": { + "type": "string", + "description": "The cloud project" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID." + } + } + }, + "PerfSample": { + "id": "PerfSample", + "type": "object", + "description": "Resource representing a single performance measure or data point", + "properties": { + "sampleTime": { + "$ref": "Timestamp", + "description": "Timestamp of collection" + }, + "value": { + "type": "number", + "description": "Value observed", + "format": "double" + } + } + }, + "PerfSampleSeries": { + "id": "PerfSampleSeries", + "type": "object", + "description": "Resource representing a collection of performance samples (or data points)", + "properties": { + "basicPerfSampleSeries": { + "$ref": "BasicPerfSampleSeries", + "description": "Basic series represented by a line chart" + }, + "executionId": { + "type": "string", + "description": "A tool results execution ID." + }, + "historyId": { + "type": "string", + "description": "A tool results history ID." + }, + "projectId": { + "type": "string", + "description": "The cloud project" + }, + "sampleSeriesId": { + "type": "string", + "description": "A sample series id" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID." + } + } + }, + "ProjectSettings": { + "id": "ProjectSettings", + "type": "object", + "description": "Per-project settings for the Tool Results service.", + "properties": { + "defaultBucket": { + "type": "string", + "description": "The name of the Google Cloud Storage bucket to which results are written.\n\nBy default, this is unset.\n\nIn update request: optional In response: optional" + }, + "name": { + "type": "string", + "description": "The name of the project's settings.\n\nAlways of the form: projects/{project-id}/settings\n\nIn update request: never set In response: always set" + } + } + }, + "PublishXunitXmlFilesRequest": { + "id": "PublishXunitXmlFilesRequest", + "type": "object", + "description": "Request message for StepService.PublishXunitXmlFiles.", + "properties": { + "xunitXmlFiles": { + "type": "array", + "description": "URI of the Xunit XML files to publish.\n\nThe maximum size of the file this reference is pointing to is 50MB.\n\nRequired.", + "items": { + "$ref": "FileReference" + } + } + } + }, + "SkippedDetail": { + "id": "SkippedDetail", + "type": "object", + "properties": { + "incompatibleAppVersion": { + "type": "boolean", + "description": "If the App doesn't support the specific API level." + }, + "incompatibleArchitecture": { + "type": "boolean", + "description": "If the App doesn't run on the specific architecture, for example, x86." + }, + "incompatibleDevice": { + "type": "boolean", + "description": "If the requested OS version doesn't run on the specific device model." + } + } + }, + "StackTrace": { + "id": "StackTrace", + "type": "object", + "description": "A stacktrace.", + "properties": { + "exception": { + "type": "string", + "description": "The stack trace message.\n\nRequired" + } + } + }, + "Status": { + "id": "Status", + "type": "object", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users - Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of [google.rpc.Code][], but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers *understand* and *resolve* the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it is not necessarily the actual wire format. When the `Status` message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.", + "properties": { + "code": { + "type": "integer", + "description": "The status code, which should be an enum value of [google.rpc.Code][].", + "format": "int32" + }, + "details": { + "type": "array", + "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.", + "items": { + "$ref": "Any" + } + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the [google.rpc.Status.details][] field, or localized by the client." + } + } + }, + "Step": { + "id": "Step", + "type": "object", + "description": "A Step represents a single operation performed as part of Execution. A step can be used to represent the execution of a tool ( for example a test runner execution or an execution of a compiler).\n\nSteps can overlap (for instance two steps might have the same start time if some operations are done in parallel).\n\nHere is an example, let's consider that we have a continuous build is executing a test runner for each iteration. The workflow would look like: - user creates a Execution with id 1 - user creates an TestExecutionStep with id 100 for Execution 1 - user update TestExecutionStep with id 100 to add a raw xml log + the service parses the xml logs and returns a TestExecutionStep with updated TestResult(s). - user update the status of TestExecutionStep with id 100 to COMPLETE\n\nA Step can be updated until its state is set to COMPLETE at which points it becomes immutable.", + "properties": { + "completionTime": { + "$ref": "Timestamp", + "description": "The time when the step status was set to complete.\n\nThis value will be set automatically when state transitions to COMPLETE.\n\n- In response: set if the execution state is COMPLETE. - In create/update request: never set" + }, + "creationTime": { + "$ref": "Timestamp", + "description": "The time when the step was created.\n\n- In response: always set - In create/update request: never set" + }, + "description": { + "type": "string", + "description": "A description of this tool For example: mvn clean package -D skipTests=true\n\n- In response: present if set by create/update request - In create/update request: optional" + }, + "deviceUsageDuration": { + "$ref": "Duration", + "description": "How much the device resource is used to perform the test.\n\nThis is the device usage used for billing purpose, which is different from the run_duration, for example, infrastructure failure won't be charged for device usage.\n\nPRECONDITION_FAILED will be returned if one attempts to set a device_usage on a step which already has this field set.\n\n- In response: present if previously set. - In create request: optional - In update request: optional" + }, + "dimensionValue": { + "type": "array", + "description": "If the execution containing this step has any dimension_definition set, then this field allows the child to specify the values of the dimensions.\n\nThe keys must exactly match the dimension_definition of the execution.\n\nFor example, if the execution has `dimension_definition = ['attempt', 'device']` then a step must define values for those dimensions, eg. `dimension_value = ['attempt': '1', 'device': 'Nexus 6']`\n\nIf a step does not participate in one dimension of the matrix, the value for that dimension should be empty string. For example, if one of the tests is executed by a runner which does not support retries, the step could have `dimension_value = ['attempt': '', 'device': 'Nexus 6']`\n\nIf the step does not participate in any dimensions of the matrix, it may leave dimension_value unset.\n\nA PRECONDITION_FAILED will be returned if any of the keys do not exist in the dimension_definition of the execution.\n\nA PRECONDITION_FAILED will be returned if another step in this execution already has the same name and dimension_value, but differs on other data fields, for example, step field is different.\n\nA PRECONDITION_FAILED will be returned if dimension_value is set, and there is a dimension_definition in the execution which is not specified as one of the keys.\n\n- In response: present if set by create - In create request: optional - In update request: never set", + "items": { + "$ref": "StepDimensionValueEntry" + } + }, + "hasImages": { + "type": "boolean", + "description": "Whether any of the outputs of this step are images whose thumbnails can be fetched with ListThumbnails.\n\n- In response: always set - In create/update request: never set" + }, + "labels": { + "type": "array", + "description": "Arbitrary user-supplied key/value pairs that are associated with the step.\n\nUsers are responsible for managing the key namespace such that keys don't accidentally collide.\n\nAn INVALID_ARGUMENT will be returned if the number of labels exceeds 100 or if the length of any of the keys or values exceeds 100 characters.\n\n- In response: always set - In create request: optional - In update request: optional; any new key/value pair will be added to the map, and any new value for an existing key will update that key's value", + "items": { + "$ref": "StepLabelsEntry" + } + }, + "name": { + "type": "string", + "description": "A short human-readable name to display in the UI. Maximum of 100 characters. For example: Clean build\n\nA PRECONDITION_FAILED will be returned upon creating a new step if it shares its name and dimension_value with an existing step. If two steps represent a similar action, but have different dimension values, they should share the same name. For instance, if the same set of tests is run on two different platforms, the two steps should have the same name.\n\n- In response: always set - In create request: always set - In update request: never set" + }, + "outcome": { + "$ref": "Outcome", + "description": "Classification of the result, for example into SUCCESS or FAILURE\n\n- In response: present if set by create/update request - In create/update request: optional" + }, + "runDuration": { + "$ref": "Duration", + "description": "How long it took for this step to run.\n\nIf unset, this is set to the difference between creation_time and completion_time when the step is set to the COMPLETE state. In some cases, it is appropriate to set this value separately: For instance, if a step is created, but the operation it represents is queued for a few minutes before it executes, it would be appropriate not to include the time spent queued in its run_duration.\n\nPRECONDITION_FAILED will be returned if one attempts to set a run_duration on a step which already has this field set.\n\n- In response: present if previously set; always present on COMPLETE step - In create request: optional - In update request: optional" + }, + "state": { + "type": "string", + "description": "The initial state is IN_PROGRESS. The only legal state transitions are * IN_PROGRESS -\u003e COMPLETE\n\nA PRECONDITION_FAILED will be returned if an invalid transition is requested.\n\nIt is valid to create Step with a state set to COMPLETE. The state can only be set to COMPLETE once. A PRECONDITION_FAILED will be returned if the state is set to COMPLETE multiple times.\n\n- In response: always set - In create/update request: optional", + "enum": [ + "complete", + "inProgress", + "pending", + "unknownState" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "stepId": { + "type": "string", + "description": "A unique identifier within a Execution for this Step.\n\nReturns INVALID_ARGUMENT if this field is set or overwritten by the caller.\n\n- In response: always set - In create/update request: never set" + }, + "testExecutionStep": { + "$ref": "TestExecutionStep", + "description": "An execution of a test runner." + }, + "toolExecutionStep": { + "$ref": "ToolExecutionStep", + "description": "An execution of a tool (used for steps we don't explicitly support)." + } + } + }, + "StepDimensionValueEntry": { + "id": "StepDimensionValueEntry", + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "StepLabelsEntry": { + "id": "StepLabelsEntry", + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "SuccessDetail": { + "id": "SuccessDetail", + "type": "object", + "properties": { + "otherNativeCrash": { + "type": "boolean", + "description": "If a native process other than the app crashed." + } + } + }, + "TestCaseReference": { + "id": "TestCaseReference", + "type": "object", + "description": "A reference to a test case.\n\nTest case references are canonically ordered lexicographically by these three factors: * First, by test_suite_name. * Second, by class_name. * Third, by name.", + "properties": { + "className": { + "type": "string", + "description": "The name of the class." + }, + "name": { + "type": "string", + "description": "The name of the test case.\n\nRequired." + }, + "testSuiteName": { + "type": "string", + "description": "The name of the test suite to which this test case belongs." + } + } + }, + "TestExecutionStep": { + "id": "TestExecutionStep", + "type": "object", + "description": "A step that represents running tests.\n\nIt accepts ant-junit xml files which will be parsed into structured test results by the service. Xml file paths are updated in order to append more files, however they can't be deleted.\n\nUsers can also add test results manually by using the test_result field.", + "properties": { + "testIssues": { + "type": "array", + "description": "Issues observed during the test execution.\n\nFor example, if the mobile app under test crashed during the test, the error message and the stack trace content can be recorded here to assist debugging.\n\n- In response: present if set by create or update - In create/update request: optional", + "items": { + "$ref": "TestIssue" + } + }, + "testSuiteOverviews": { + "type": "array", + "description": "List of test suite overview contents. This could be parsed from xUnit XML log by server, or uploaded directly by user. This references should only be called when test suites are fully parsed or uploaded.\n\nThe maximum allowed number of test suite overviews per step is 1000.\n\n- In response: always set - In create request: optional - In update request: never (use publishXunitXmlFiles custom method instead)", + "items": { + "$ref": "TestSuiteOverview" + } + }, + "testTiming": { + "$ref": "TestTiming", + "description": "The timing break down of the test execution.\n\n- In response: present if set by create or update - In create/update request: optional" + }, + "toolExecution": { + "$ref": "ToolExecution", + "description": "Represents the execution of the test runner.\n\nThe exit code of this tool will be used to determine if the test passed.\n\n- In response: always set - In create/update request: optional" + } + } + }, + "TestIssue": { + "id": "TestIssue", + "type": "object", + "description": "An abnormal event observed during the test execution.", + "properties": { + "errorMessage": { + "type": "string", + "description": "A brief human-readable message describing the abnormal event.\n\nRequired." + }, + "stackTrace": { + "$ref": "StackTrace", + "description": "Optional." + } + } + }, + "TestSuiteOverview": { + "id": "TestSuiteOverview", + "type": "object", + "description": "A summary of a test suite result either parsed from XML or uploaded directly by a user.\n\nNote: the API related comments are for StepService only. This message is also being used in ExecutionService in a read only mode for the corresponding step.", + "properties": { + "errorCount": { + "type": "integer", + "description": "Number of test cases in error, typically set by the service by parsing the xml_source.\n\n- In create/response: always set - In update request: never", + "format": "int32" + }, + "failureCount": { + "type": "integer", + "description": "Number of failed test cases, typically set by the service by parsing the xml_source. May also be set by the user.\n\n- In create/response: always set - In update request: never", + "format": "int32" + }, + "name": { + "type": "string", + "description": "The name of the test suite.\n\n- In create/response: always set - In update request: never" + }, + "skippedCount": { + "type": "integer", + "description": "Number of test cases not run, typically set by the service by parsing the xml_source.\n\n- In create/response: always set - In update request: never", + "format": "int32" + }, + "totalCount": { + "type": "integer", + "description": "Number of test cases, typically set by the service by parsing the xml_source.\n\n- In create/response: always set - In update request: never", + "format": "int32" + }, + "xmlSource": { + "$ref": "FileReference", + "description": "If this test suite was parsed from XML, this is the URI where the original XML file is stored.\n\nNote: Multiple test suites can share the same xml_source\n\nReturns INVALID_ARGUMENT if the uri format is not supported.\n\n- In create/response: optional - In update request: never" + } + } + }, + "TestTiming": { + "id": "TestTiming", + "type": "object", + "description": "Testing timing break down to know phases.", + "properties": { + "testProcessDuration": { + "$ref": "Duration", + "description": "How long it took to run the test process.\n\n- In response: present if previously set. - In create/update request: optional" + } + } + }, + "Thumbnail": { + "id": "Thumbnail", + "type": "object", + "description": "A single thumbnail, with its size and format.", + "properties": { + "contentType": { + "type": "string", + "description": "The thumbnail's content type, i.e. \"image/png\".\n\nAlways set." + }, + "data": { + "type": "string", + "description": "The thumbnail file itself.\n\nThat is, the bytes here are precisely the bytes that make up the thumbnail file; they can be served as an image as-is (with the appropriate content type.)\n\nAlways set.", + "format": "byte" + }, + "heightPx": { + "type": "integer", + "description": "The height of the thumbnail, in pixels.\n\nAlways set.", + "format": "int32" + }, + "widthPx": { + "type": "integer", + "description": "The width of the thumbnail, in pixels.\n\nAlways set.", + "format": "int32" + } + } + }, + "Timestamp": { + "id": "Timestamp", + "type": "object", + "description": "A Timestamp represents a point in time independent of any time zone or calendar, represented as seconds and fractions of seconds at nanosecond resolution in UTC Epoch time. It is encoded using the Proleptic Gregorian Calendar which extends the Gregorian calendar backwards to year one. It is encoded assuming all minutes are 60 seconds long, i.e. leap seconds are \"smeared\" so that no leap second table is needed for interpretation. Range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By restricting to that range, we ensure that we can convert to and from RFC 3339 date strings. See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).\n\n# Examples\n\nExample 1: Compute Timestamp from POSIX `time()`.\n\nTimestamp timestamp; timestamp.set_seconds(time(NULL)); timestamp.set_nanos(0);\n\nExample 2: Compute Timestamp from POSIX `gettimeofday()`.\n\nstruct timeval tv; gettimeofday(&tv, NULL);\n\nTimestamp timestamp; timestamp.set_seconds(tv.tv_sec); timestamp.set_nanos(tv.tv_usec * 1000);\n\nExample 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n\nFILETIME ft; GetSystemTimeAsFileTime(&ft); UINT64 ticks = (((UINT64)ft.dwHighDateTime) \u003c\u003c 32) | ft.dwLowDateTime;\n\n// A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. Timestamp timestamp; timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n\nExample 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n\nlong millis = System.currentTimeMillis();\n\nTimestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) .setNanos((int) ((millis % 1000) * 1000000)).build();\n\n\n\nExample 5: Compute Timestamp from current time in Python.\n\ntimestamp = Timestamp() timestamp.GetCurrentTime()\n\n# JSON Mapping\n\nIn JSON format, the Timestamp type is encoded as a string in the [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\" where {year} is always expressed using four digits while {month}, {day}, {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone is required, though only UTC (as indicated by \"Z\") is presently supported.\n\nFor example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past 01:30 UTC on January 15, 2017.\n\nIn JavaScript, one can convert a Date object to this format using the standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] method. In Python, a standard `datetime.datetime` object can be converted to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) to obtain a formatter capable of generating timestamps in this format.", + "properties": { + "nanos": { + "type": "integer", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive.", + "format": "int32" + }, + "seconds": { + "type": "string", + "description": "Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.", + "format": "int64" + } + } + }, + "ToolExecution": { + "id": "ToolExecution", + "type": "object", + "description": "An execution of an arbitrary tool. It could be a test runner or a tool copying artifacts or deploying code.", + "properties": { + "commandLineArguments": { + "type": "array", + "description": "The full tokenized command line including the program name (equivalent to argv in a C program).\n\n- In response: present if set by create request - In create request: optional - In update request: never set", + "items": { + "type": "string" + } + }, + "exitCode": { + "$ref": "ToolExitCode", + "description": "Tool execution exit code. This field will be set once the tool has exited.\n\n- In response: present if set by create/update request - In create request: optional - In update request: optional, a FAILED_PRECONDITION error will be returned if an exit_code is already set." + }, + "toolLogs": { + "type": "array", + "description": "References to any plain text logs output the tool execution.\n\nThis field can be set before the tool has exited in order to be able to have access to a live view of the logs while the tool is running.\n\nThe maximum allowed number of tool logs per step is 1000.\n\n- In response: present if set by create/update request - In create request: optional - In update request: optional, any value provided will be appended to the existing list", + "items": { + "$ref": "FileReference" + } + }, + "toolOutputs": { + "type": "array", + "description": "References to opaque files of any format output by the tool execution.\n\nThe maximum allowed number of tool outputs per step is 1000.\n\n- In response: present if set by create/update request - In create request: optional - In update request: optional, any value provided will be appended to the existing list", + "items": { + "$ref": "ToolOutputReference" + } + } + } + }, + "ToolExecutionStep": { + "id": "ToolExecutionStep", + "type": "object", + "description": "Generic tool step to be used for binaries we do not explicitly support. For example: running cp to copy artifacts from one location to another.", + "properties": { + "toolExecution": { + "$ref": "ToolExecution", + "description": "A Tool execution.\n\n- In response: present if set by create/update request - In create/update request: optional" + } + } + }, + "ToolExitCode": { + "id": "ToolExitCode", + "type": "object", + "description": "Exit code from a tool execution.", + "properties": { + "number": { + "type": "integer", + "description": "Tool execution exit code. A value of 0 means that the execution was successful.\n\n- In response: always set - In create/update request: always set", + "format": "int32" + } + } + }, + "ToolOutputReference": { + "id": "ToolOutputReference", + "type": "object", + "description": "A reference to a ToolExecution output file.", + "properties": { + "creationTime": { + "$ref": "Timestamp", + "description": "The creation time of the file.\n\n- In response: present if set by create/update request - In create/update request: optional" + }, + "output": { + "$ref": "FileReference", + "description": "A FileReference to an output file.\n\n- In response: always set - In create/update request: always set" + }, + "testCase": { + "$ref": "TestCaseReference", + "description": "The test case to which this output file belongs.\n\n- In response: present if set by create/update request - In create/update request: optional" + } + } + } + }, + "resources": { + "projects": { + "methods": { + "getSettings": { + "id": "toolresults.projects.getSettings", + "path": "{projectId}/settings", + "httpMethod": "GET", + "description": "Gets the Tool Results settings for a project.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read from project", + "parameters": { + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "ProjectSettings" + } + }, + "initializeSettings": { + "id": "toolresults.projects.initializeSettings", + "path": "{projectId}:initializeSettings", + "httpMethod": "POST", + "description": "Creates resources for settings which have not yet been set.\n\nCurrently, this creates a single resource: a Google Cloud Storage bucket, to be used as the default bucket for this project. The bucket is created in the name of the user calling. Except in rare cases, calling this method in parallel from multiple clients will only create a single bucket. In order to avoid unnecessary storage charges, the bucket is configured to automatically delete objects older than 90 days.\n\nThe bucket is created with the project-private ACL: All project team members are given permissions to the bucket and objects created within it according to their roles. Project owners have owners rights, and so on. The default ACL on objects created in the bucket is project-private as well. See Google Cloud Storage documentation for more details.\n\nIf there is already a default bucket set and the project can access the bucket, this call does nothing. However, if the project doesn't have the permission to access the bucket or the bucket is deteleted, a new bucket will be created.\n\nMay return any canonical error codes, including the following:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - Any error code raised by Google Cloud Storage", + "parameters": { + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "ProjectSettings" + } + } + }, + "resources": { + "histories": { + "methods": { + "create": { + "id": "toolresults.projects.histories.create", + "path": "{projectId}/histories", + "httpMethod": "POST", + "description": "Creates a History.\n\nThe returned History will have the id set.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing project does not exist", + "parameters": { + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + }, + "requestId": { + "type": "string", + "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + "location": "query" + } + }, + "parameterOrder": [ + "projectId" + ], + "request": { + "$ref": "History" + }, + "response": { + "$ref": "History" + } + }, + "get": { + "id": "toolresults.projects.histories.get", + "path": "{projectId}/histories/{historyId}", + "httpMethod": "GET", + "description": "Gets a History.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the History does not exist", + "parameters": { + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId" + ], + "response": { + "$ref": "History" + } + }, + "list": { + "id": "toolresults.projects.histories.list", + "path": "{projectId}/histories", + "httpMethod": "GET", + "description": "Lists Histories for a given Project.\n\nThe histories are sorted by modification time in descending order. The history_id key will be used to order the history with the same modification time.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the History does not exist", + "parameters": { + "filterByName": { + "type": "string", + "description": "If set, only return histories with the given name.\n\nOptional.", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of Histories to fetch.\n\nDefault value: 20. The server will use this default if the field is not set or has a value of 0. Any value greater than 100 will be treated as 100.\n\nOptional.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nOptional.", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "ListHistoriesResponse" + } + } + }, + "resources": { + "executions": { + "methods": { + "create": { + "id": "toolresults.projects.histories.executions.create", + "path": "{projectId}/histories/{historyId}/executions", + "httpMethod": "POST", + "description": "Creates an Execution.\n\nThe returned Execution will have the id set.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing History does not exist", + "parameters": { + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + }, + "requestId": { + "type": "string", + "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + "location": "query" + } + }, + "parameterOrder": [ + "projectId", + "historyId" + ], + "request": { + "$ref": "Execution" + }, + "response": { + "$ref": "Execution" + } + }, + "get": { + "id": "toolresults.projects.histories.executions.get", + "path": "{projectId}/histories/{historyId}/executions/{executionId}", + "httpMethod": "GET", + "description": "Gets an Execution.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the Execution does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "An Execution id.\n\nRequired.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId" + ], + "response": { + "$ref": "Execution" + } + }, + "list": { + "id": "toolresults.projects.histories.executions.list", + "path": "{projectId}/histories/{historyId}/executions", + "httpMethod": "GET", + "description": "Lists Histories for a given Project.\n\nThe executions are sorted by creation_time in descending order. The execution_id key will be used to order the executions with the same creation_time.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing History does not exist", + "parameters": { + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of Executions to fetch.\n\nDefault value: 25. The server will use this default if the field is not set or has a value of 0.\n\nOptional.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nOptional.", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId" + ], + "response": { + "$ref": "ListExecutionsResponse" + } + }, + "patch": { + "id": "toolresults.projects.histories.executions.patch", + "path": "{projectId}/histories/{historyId}/executions/{executionId}", + "httpMethod": "PATCH", + "description": "Updates an existing Execution with the supplied partial entity.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the requested state transition is illegal - NOT_FOUND - if the containing History does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "Required.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "Required.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id. Required.", + "required": true, + "location": "path" + }, + "requestId": { + "type": "string", + "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + "location": "query" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId" + ], + "request": { + "$ref": "Execution" + }, + "response": { + "$ref": "Execution" + } + } + }, + "resources": { + "steps": { + "methods": { + "create": { + "id": "toolresults.projects.histories.executions.steps.create", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps", + "httpMethod": "POST", + "description": "Creates a Step.\n\nThe returned Step will have the id set.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the step is too large (more than 10Mib) - NOT_FOUND - if the containing Execution does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A Execution id.\n\nRequired.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + }, + "requestId": { + "type": "string", + "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + "location": "query" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId" + ], + "request": { + "$ref": "Step" + }, + "response": { + "$ref": "Step" + } + }, + "get": { + "id": "toolresults.projects.histories.executions.steps.get", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}", + "httpMethod": "GET", + "description": "Gets a Step.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the Step does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A Execution id.\n\nRequired.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A Step id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "response": { + "$ref": "Step" + } + }, + "getPerfMetricsSummary": { + "id": "toolresults.projects.histories.executions.steps.getPerfMetricsSummary", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary", + "httpMethod": "GET", + "description": "Retrieves a PerfMetricsSummary.\n\nMay return any of the following error code(s): - NOT_FOUND - The specified PerfMetricsSummary does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A tool results execution ID.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A tool results history ID.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "The cloud project", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "response": { + "$ref": "PerfMetricsSummary" + } + }, + "list": { + "id": "toolresults.projects.histories.executions.steps.list", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps", + "httpMethod": "GET", + "description": "Lists Steps for a given Execution.\n\nThe steps are sorted by creation_time in descending order. The step_id key will be used to order the steps with the same creation_time.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if an argument in the request happens to be invalid; e.g. if an attempt is made to list the children of a nonexistent Step - NOT_FOUND - if the containing Execution does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A Execution id.\n\nRequired.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of Steps to fetch.\n\nDefault value: 25. The server will use this default if the field is not set or has a value of 0.\n\nOptional.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nOptional.", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId" + ], + "response": { + "$ref": "ListStepsResponse" + } + }, + "patch": { + "id": "toolresults.projects.histories.executions.steps.patch", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}", + "httpMethod": "PATCH", + "description": "Updates an existing Step with the supplied partial entity.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the requested state transition is illegal (e.g try to upload a duplicate xml file), if the updated step is too large (more than 10Mib) - NOT_FOUND - if the containing Execution does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A Execution id.\n\nRequired.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + }, + "requestId": { + "type": "string", + "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + "location": "query" + }, + "stepId": { + "type": "string", + "description": "A Step id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "request": { + "$ref": "Step" + }, + "response": { + "$ref": "Step" + } + }, + "publishXunitXmlFiles": { + "id": "toolresults.projects.histories.executions.steps.publishXunitXmlFiles", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}:publishXunitXmlFiles", + "httpMethod": "POST", + "description": "Publish xml files to an existing Step.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the requested state transition is illegal, e.g try to upload a duplicate xml file or a file too large. - NOT_FOUND - if the containing Execution does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A Execution id.\n\nRequired.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A Step id. Note: This step must include a TestExecutionStep.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "request": { + "$ref": "PublishXunitXmlFilesRequest" + }, + "response": { + "$ref": "Step" + } + } + }, + "resources": { + "perfMetricsSummary": { + "methods": { + "create": { + "id": "toolresults.projects.histories.executions.steps.perfMetricsSummary.create", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary", + "httpMethod": "POST", + "description": "Creates a PerfMetricsSummary resource.\n\nMay return any of the following error code(s): - ALREADY_EXISTS - A PerfMetricSummary already exists for the given Step - NOT_FOUND - The containing Step does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A tool results execution ID.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A tool results history ID.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "The cloud project", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "request": { + "$ref": "PerfMetricsSummary" + }, + "response": { + "$ref": "PerfMetricsSummary" + } + } + } + }, + "perfSampleSeries": { + "methods": { + "create": { + "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.create", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries", + "httpMethod": "POST", + "description": "Creates a PerfSampleSeries.\n\nMay return any of the following error code(s): - ALREADY_EXISTS - PerfMetricSummary already exists for the given Step - NOT_FOUND - The containing Step does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A tool results execution ID.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A tool results history ID.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "The cloud project", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "request": { + "$ref": "PerfSampleSeries" + }, + "response": { + "$ref": "PerfSampleSeries" + } + }, + "get": { + "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.get", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}", + "httpMethod": "GET", + "description": "Gets a PerfSampleSeries.\n\nMay return any of the following error code(s): - NOT_FOUND - The specified PerfSampleSeries does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A tool results execution ID.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A tool results history ID.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "The cloud project", + "required": true, + "location": "path" + }, + "sampleSeriesId": { + "type": "string", + "description": "A sample series id", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId", + "sampleSeriesId" + ], + "response": { + "$ref": "PerfSampleSeries" + } + }, + "list": { + "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.list", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries", + "httpMethod": "GET", + "description": "Lists PerfSampleSeries for a given Step.\n\nThe request provides an optional filter which specifies one or more PerfMetricsType to include in the result; if none returns all. The resulting PerfSampleSeries are sorted by ids.\n\nMay return any of the following canonical error codes: - NOT_FOUND - The containing Step does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A tool results execution ID.", + "required": true, + "location": "path" + }, + "filter": { + "type": "string", + "description": "Specify one or more PerfMetricType values such as CPU to filter the result", + "enum": [ + "cpu", + "memory", + "network", + "perfMetricTypeUnspecified" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "repeated": true, + "location": "query" + }, + "historyId": { + "type": "string", + "description": "A tool results history ID.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "The cloud project", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "response": { + "$ref": "ListPerfSampleSeriesResponse" + } + } + }, + "resources": { + "samples": { + "methods": { + "batchCreate": { + "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.batchCreate", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples:batchCreate", + "httpMethod": "POST", + "description": "Creates a batch of PerfSamples - a client can submit multiple batches of Perf Samples through repeated calls to this method in order to split up a large request payload - duplicates and existing timestamp entries will be ignored. - the batch operation may partially succeed - the set of elements successfully inserted is returned in the response (omits items which already existed in the database).\n\nMay return any of the following canonical error codes: - NOT_FOUND - The containing PerfSampleSeries does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A tool results execution ID.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A tool results history ID.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "The cloud project", + "required": true, + "location": "path" + }, + "sampleSeriesId": { + "type": "string", + "description": "A sample series id", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId", + "sampleSeriesId" + ], + "request": { + "$ref": "BatchCreatePerfSamplesRequest" + }, + "response": { + "$ref": "BatchCreatePerfSamplesResponse" + } + }, + "list": { + "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.list", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples", + "httpMethod": "GET", + "description": "Lists the Performance Samples of a given Sample Series - The list results are sorted by timestamps ascending - The default page size is 500 samples; and maximum size allowed 5000 - The response token indicates the last returned PerfSample timestamp - When the results size exceeds the page size, submit a subsequent request including the page token to return the rest of the samples up to the page limit\n\nMay return any of the following canonical error codes: - OUT_OF_RANGE - The specified request page_token is out of valid range - NOT_FOUND - The containing PerfSampleSeries does not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "A tool results execution ID.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A tool results history ID.", + "required": true, + "location": "path" + }, + "pageSize": { + "type": "integer", + "description": "The default page size is 500 samples, and the maximum size is 5000. If the page_size is greater than 5000, the effective page size will be 5000", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional, the next_page_token returned in the previous response", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "The cloud project", + "required": true, + "location": "path" + }, + "sampleSeriesId": { + "type": "string", + "description": "A sample series id", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A tool results step ID.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId", + "sampleSeriesId" + ], + "response": { + "$ref": "ListPerfSamplesResponse" + } + } + } + } + } + }, + "thumbnails": { + "methods": { + "list": { + "id": "toolresults.projects.histories.executions.steps.thumbnails.list", + "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/thumbnails", + "httpMethod": "GET", + "description": "Lists thumbnails of images attached to a step.\n\nMay return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to read from the project, or from any of the images - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the step does not exist, or if any of the images do not exist", + "parameters": { + "executionId": { + "type": "string", + "description": "An Execution id.\n\nRequired.", + "required": true, + "location": "path" + }, + "historyId": { + "type": "string", + "description": "A History id.\n\nRequired.", + "required": true, + "location": "path" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of thumbnails to fetch.\n\nDefault value: 50. The server will use this default if the field is not set or has a value of 0.\n\nOptional.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A continuation token to resume the query at the next item.\n\nOptional.", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "A Project id.\n\nRequired.", + "required": true, + "location": "path" + }, + "stepId": { + "type": "string", + "description": "A Step id.\n\nRequired.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "historyId", + "executionId", + "stepId" + ], + "response": { + "$ref": "ListStepThumbnailsResponse" + } + } + } + } + } + } + } + } + } + } + } + } + } +} diff --git a/vendor/google.golang.org/api/toolresults/v1beta3firstparty/toolresults-gen.go b/vendor/google.golang.org/api/toolresults/v1beta3firstparty/toolresults-gen.go new file mode 100644 index 000000000..20f64b552 --- /dev/null +++ b/vendor/google.golang.org/api/toolresults/v1beta3firstparty/toolresults-gen.go @@ -0,0 +1,6406 @@ +// Package toolresults provides access to the Cloud Tool Results firstparty API. +// +// See https://developers.google.com/cloud-test-lab/ +// +// Usage example: +// +// import "google.golang.org/api/toolresults/v1beta3firstparty" +// ... +// toolresultsService, err := toolresults.New(oauthHttpClient) +package toolresults // import "google.golang.org/api/toolresults/v1beta3firstparty" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "toolresults:v1beta3firstparty" +const apiName = "toolresults" +const apiVersion = "v1beta3firstparty" +const basePath = "https://www.googleapis.com/toolresults/v1beta3firstparty/projects/" + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Histories = NewProjectsHistoriesService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Histories *ProjectsHistoriesService +} + +func NewProjectsHistoriesService(s *Service) *ProjectsHistoriesService { + rs := &ProjectsHistoriesService{s: s} + rs.Executions = NewProjectsHistoriesExecutionsService(s) + return rs +} + +type ProjectsHistoriesService struct { + s *Service + + Executions *ProjectsHistoriesExecutionsService +} + +func NewProjectsHistoriesExecutionsService(s *Service) *ProjectsHistoriesExecutionsService { + rs := &ProjectsHistoriesExecutionsService{s: s} + rs.Steps = NewProjectsHistoriesExecutionsStepsService(s) + return rs +} + +type ProjectsHistoriesExecutionsService struct { + s *Service + + Steps *ProjectsHistoriesExecutionsStepsService +} + +func NewProjectsHistoriesExecutionsStepsService(s *Service) *ProjectsHistoriesExecutionsStepsService { + rs := &ProjectsHistoriesExecutionsStepsService{s: s} + rs.PerfMetricsSummary = NewProjectsHistoriesExecutionsStepsPerfMetricsSummaryService(s) + rs.PerfSampleSeries = NewProjectsHistoriesExecutionsStepsPerfSampleSeriesService(s) + rs.Thumbnails = NewProjectsHistoriesExecutionsStepsThumbnailsService(s) + return rs +} + +type ProjectsHistoriesExecutionsStepsService struct { + s *Service + + PerfMetricsSummary *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService + + PerfSampleSeries *ProjectsHistoriesExecutionsStepsPerfSampleSeriesService + + Thumbnails *ProjectsHistoriesExecutionsStepsThumbnailsService +} + +func NewProjectsHistoriesExecutionsStepsPerfMetricsSummaryService(s *Service) *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService { + rs := &ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService{s: s} + return rs +} + +type ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService struct { + s *Service +} + +func NewProjectsHistoriesExecutionsStepsPerfSampleSeriesService(s *Service) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesService { + rs := &ProjectsHistoriesExecutionsStepsPerfSampleSeriesService{s: s} + rs.Samples = NewProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService(s) + return rs +} + +type ProjectsHistoriesExecutionsStepsPerfSampleSeriesService struct { + s *Service + + Samples *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService +} + +func NewProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService(s *Service) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService { + rs := &ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService{s: s} + return rs +} + +type ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService struct { + s *Service +} + +func NewProjectsHistoriesExecutionsStepsThumbnailsService(s *Service) *ProjectsHistoriesExecutionsStepsThumbnailsService { + rs := &ProjectsHistoriesExecutionsStepsThumbnailsService{s: s} + return rs +} + +type ProjectsHistoriesExecutionsStepsThumbnailsService struct { + s *Service +} + +// Any: `Any` contains an arbitrary serialized protocol buffer message +// along with a URL that describes the type of the serialized +// message. +// +// Protobuf library provides support to pack/unpack Any values in the +// form of utility functions or additional generated methods of the Any +// type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; Any any; any.PackFrom(foo); ... if +// (any.UnpackTo(&foo)) { ... } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; Any any = Any.pack(foo); ... if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) any = Any() any.Pack(foo) ... if +// any.Is(Foo.DESCRIPTOR): any.Unpack(foo) ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' in +// the type URL, for example "foo.bar.com/x/y.z" will yield type name +// "y.z". +// +// +// +// JSON ==== The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. +// Example: +// +// package google.profile; message Person { string first_name = 1; +// string last_name = 2; } +// +// { "@type": "type.googleapis.com/google.profile.Person", "firstName": +// , "lastName": } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` field. +// Example (for message [google.protobuf.Duration][]): +// +// { "@type": "type.googleapis.com/google.protobuf.Duration", "value": +// "1.212s" } +type Any struct { + // TypeUrl: A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. * The last segment of + // the URL's path must represent the fully qualified name of the type + // (as in `path/google.protobuf.Duration`). The name should be in a + // canonical form (e.g., leading "." is not accepted). * An HTTP GET on + // the URL must yield a [google.protobuf.Type][] value in binary format, + // or produce an error. * Applications are allowed to cache lookup + // results based on the URL, or have them precompiled into a binary to + // avoid any lookup. Therefore, binary compatibility needs to be + // preserved on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + TypeUrl string `json:"typeUrl,omitempty"` + + // Value: Must be a valid serialized protocol buffer of the above + // specified type. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TypeUrl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TypeUrl") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Any) MarshalJSON() ([]byte, error) { + type noMethod Any + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BasicPerfSampleSeries: Encapsulates the metadata for basic sample +// series represented by a line chart +type BasicPerfSampleSeries struct { + // Possible values: + // "cpu" + // "memory" + // "network" + // "perfMetricTypeUnspecified" + PerfMetricType string `json:"perfMetricType,omitempty"` + + // Possible values: + // "kibibyte" + // "percent" + // "perfUnitUnspecified" + PerfUnit string `json:"perfUnit,omitempty"` + + // Possible values: + // "cpuKernel" + // "cpuTotal" + // "cpuUser" + // "memoryRssPrivate" + // "memoryRssShared" + // "memoryRssTotal" + // "ntBytesReceived" + // "ntBytesTransferred" + // "sampleSeriesTypeUnspecified" + SampleSeriesLabel string `json:"sampleSeriesLabel,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PerfMetricType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PerfMetricType") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BasicPerfSampleSeries) MarshalJSON() ([]byte, error) { + type noMethod BasicPerfSampleSeries + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchCreatePerfSamplesRequest: The request must provide up to a +// maximum of 5000 samples to be created; a larger sample size will +// cause an INVALID_ARGUMENT error +type BatchCreatePerfSamplesRequest struct { + // PerfSamples: The set of PerfSamples to create should not include + // existing timestamps + PerfSamples []*PerfSample `json:"perfSamples,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PerfSamples") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PerfSamples") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchCreatePerfSamplesRequest) MarshalJSON() ([]byte, error) { + type noMethod BatchCreatePerfSamplesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BatchCreatePerfSamplesResponse struct { + PerfSamples []*PerfSample `json:"perfSamples,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "PerfSamples") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PerfSamples") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchCreatePerfSamplesResponse) MarshalJSON() ([]byte, error) { + type noMethod BatchCreatePerfSamplesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CPUInfo struct { + // CpuProcessor: description of the device processor ie '1.8 GHz hexa + // core 64-bit ARMv8-A' + CpuProcessor string `json:"cpuProcessor,omitempty"` + + // CpuSpeedInGhz: the CPU clock speed in GHz + CpuSpeedInGhz float64 `json:"cpuSpeedInGhz,omitempty"` + + // NumberOfCores: the number of CPU cores + NumberOfCores int64 `json:"numberOfCores,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CpuProcessor") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CpuProcessor") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CPUInfo) MarshalJSON() ([]byte, error) { + type noMethod CPUInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *CPUInfo) UnmarshalJSON(data []byte) error { + type noMethod CPUInfo + var s1 struct { + CpuSpeedInGhz gensupport.JSONFloat64 `json:"cpuSpeedInGhz"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.CpuSpeedInGhz = float64(s1.CpuSpeedInGhz) + return nil +} + +// Duration: A Duration represents a signed, fixed-length span of time +// represented as a count of seconds and fractions of seconds at +// nanosecond resolution. It is independent of any calendar and concepts +// like "day" or "month". It is related to Timestamp in that the +// difference between two Timestamp values is a Duration and it can be +// added or subtracted from a Timestamp. Range is approximately +-10,000 +// years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo +// code. +// +// Timestamp start = ...; Timestamp end = ...; Duration duration = +// ...; +// +// duration.seconds = end.seconds - start.seconds; duration.nanos = +// end.nanos - start.nanos; +// +// if (duration.seconds 0) { duration.seconds += 1; duration.nanos -= +// 1000000000; } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; duration.nanos += 1000000000; } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo +// code. +// +// Timestamp start = ...; Duration duration = ...; Timestamp end = +// ...; +// +// end.seconds = start.seconds + duration.seconds; end.nanos = +// start.nanos + duration.nanos; +// +// if (end.nanos = 1000000000) { end.seconds += 1; end.nanos -= +// 1000000000; } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than +// an object, where the string ends in the suffix "s" (indicating +// seconds) and is preceded by the number of seconds, with nanoseconds +// expressed as fractional seconds. For example, 3 seconds with 0 +// nanoseconds should be encoded in JSON format as "3s", while 3 seconds +// and 1 nanosecond should be expressed in JSON format as +// "3.000000001s", and 3 seconds and 1 microsecond should be expressed +// in JSON format as "3.000001s". +type Duration struct { + // Nanos: Signed fractions of a second at nanosecond resolution of the + // span of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For + // durations of one second or more, a non-zero value for the `nanos` + // field must be of the same sign as the `seconds` field. Must be from + // -999,999,999 to +999,999,999 inclusive. + Nanos int64 `json:"nanos,omitempty"` + + // Seconds: Signed seconds of the span of time. Must be from + // -315,576,000,000 to +315,576,000,000 inclusive. + Seconds int64 `json:"seconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Nanos") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nanos") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Duration) MarshalJSON() ([]byte, error) { + type noMethod Duration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Execution: An Execution represents a collection of Steps. For +// instance, it could represent: - a mobile test executed across a range +// of device configurations - a jenkins job with a build step followed +// by a test step +// +// The maximum size of an execution message is 1 MiB. +// +// An Execution can be updated until its state is set to COMPLETE at +// which point it becomes immutable. +type Execution struct { + // CompletionTime: The time when the Execution status transitioned to + // COMPLETE. + // + // This value will be set automatically when state transitions to + // COMPLETE. + // + // - In response: set if the execution state is COMPLETE. - In + // create/update request: never set + CompletionTime *Timestamp `json:"completionTime,omitempty"` + + // CreationTime: The time when the Execution was created. + // + // This value will be set automatically when CreateExecution is + // called. + // + // - In response: always set - In create/update request: never set + CreationTime *Timestamp `json:"creationTime,omitempty"` + + // ExecutionId: A unique identifier within a History for this + // Execution. + // + // Returns INVALID_ARGUMENT if this field is set or overwritten by the + // caller. + // + // - In response always set - In create/update request: never set + ExecutionId string `json:"executionId,omitempty"` + + // Outcome: Classify the result, for example into SUCCESS or FAILURE + // + // - In response: present if set by create/update request - In + // create/update request: optional + Outcome *Outcome `json:"outcome,omitempty"` + + // State: The initial state is IN_PROGRESS. + // + // The only legal state transitions is from IN_PROGRESS to COMPLETE. + // + // A PRECONDITION_FAILED will be returned if an invalid transition is + // requested. + // + // The state can only be set to COMPLETE once. A FAILED_PRECONDITION + // will be returned if the state is set to COMPLETE multiple times. + // + // If the state is set to COMPLETE, all the in-progress steps within the + // execution will be set as COMPLETE. If the outcome of the step is not + // set, the outcome will be set to INCONCLUSIVE. + // + // - In response always set - In create/update request: optional + // + // Possible values: + // "complete" + // "inProgress" + // "pending" + // "unknownState" + State string `json:"state,omitempty"` + + // TestExecutionMatrixId: TestExecution Matrix ID that the Test Service + // uses. + // + // - In response: present if set by create - In create: optional - In + // update: never set + TestExecutionMatrixId string `json:"testExecutionMatrixId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CompletionTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CompletionTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Execution) MarshalJSON() ([]byte, error) { + type noMethod Execution + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FailureDetail struct { + // Crashed: If the failure was severe because the system under test + // crashed. + Crashed bool `json:"crashed,omitempty"` + + // NotInstalled: If an app is not installed and thus no test can be run + // with the app. This might be caused by trying to run a test on an + // unsupported platform. + NotInstalled bool `json:"notInstalled,omitempty"` + + // OtherNativeCrash: If a native process other than the app crashed. + OtherNativeCrash bool `json:"otherNativeCrash,omitempty"` + + // TimedOut: If the test overran some time limit, and that is why it + // failed. + TimedOut bool `json:"timedOut,omitempty"` + + // UnableToCrawl: If the robo was unable to crawl the app; perhaps + // because the app did not start. + UnableToCrawl bool `json:"unableToCrawl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Crashed") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Crashed") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FailureDetail) MarshalJSON() ([]byte, error) { + type noMethod FailureDetail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FileReference: A reference to a file. +type FileReference struct { + // FileUri: The URI of a file stored in Google Cloud Storage. + // + // For example: http://storage.googleapis.com/mybucket/path/to/test.xml + // or in gsutil format: gs://mybucket/path/to/test.xml with + // version-specific info, + // gs://mybucket/path/to/test.xml#1360383693690000 + // + // An INVALID_ARGUMENT error will be returned if the URI format is not + // supported. + // + // - In response: always set - In create/update request: always set + FileUri string `json:"fileUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FileUri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FileUri") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FileReference) MarshalJSON() ([]byte, error) { + type noMethod FileReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// History: A History represents a sorted list of Executions ordered by +// the start_timestamp_millis field (descending). It can be used to +// group all the Executions of a continuous build. +// +// Note that the ordering only operates on one-dimension. If a +// repository has multiple branches, it means that multiple histories +// will need to be used in order to order Executions per branch. +type History struct { + // DisplayName: A short human-readable (plain text) name to display in + // the UI. Maximum of 100 characters. + // + // - In response: present if set during create. - In create request: + // optional + DisplayName string `json:"displayName,omitempty"` + + // HistoryId: A unique identifier within a project for this + // History. + // + // Returns INVALID_ARGUMENT if this field is set or overwritten by the + // caller. + // + // - In response always set - In create request: never set + HistoryId string `json:"historyId,omitempty"` + + // Name: A name to uniquely identify a history within a project. Maximum + // of 100 characters. + // + // - In response always set - In create request: always set + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *History) MarshalJSON() ([]byte, error) { + type noMethod History + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Image: An image, with a link to the main image and a thumbnail. +type Image struct { + // Error: An error explaining why the thumbnail could not be rendered. + Error *Status `json:"error,omitempty"` + + // SourceImage: A reference to the full-size, original image. + // + // This is the same as the tool_outputs entry for the image under its + // Step. + // + // Always set. + SourceImage *ToolOutputReference `json:"sourceImage,omitempty"` + + // StepId: The step to which the image is attached. + // + // Always set. + StepId string `json:"stepId,omitempty"` + + // Thumbnail: The thumbnail. + Thumbnail *Thumbnail `json:"thumbnail,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InconclusiveDetail struct { + // AbortedByUser: If the end user aborted the test execution before a + // pass or fail could be determined. For example, the user pressed + // ctrl-c which sent a kill signal to the test runner while the test was + // running. + AbortedByUser bool `json:"abortedByUser,omitempty"` + + // InfrastructureFailure: If the test runner could not determine success + // or failure because the test depends on a component other than the + // system under test which failed. + // + // For example, a mobile test requires provisioning a device where the + // test executes, and that provisioning can fail. + InfrastructureFailure bool `json:"infrastructureFailure,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AbortedByUser") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AbortedByUser") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InconclusiveDetail) MarshalJSON() ([]byte, error) { + type noMethod InconclusiveDetail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ListExecutionsResponse struct { + // Executions: Executions. + // + // Always set. + Executions []*Execution `json:"executions,omitempty"` + + // NextPageToken: A continuation token to resume the query at the next + // item. + // + // Will only be set if there are more Executions to fetch. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Executions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Executions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListExecutionsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListExecutionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListHistoriesResponse: Response message for HistoryService.List +type ListHistoriesResponse struct { + // Histories: Histories. + Histories []*History `json:"histories,omitempty"` + + // NextPageToken: A continuation token to resume the query at the next + // item. + // + // Will only be set if there are more histories to fetch. + // + // Tokens are valid for up to one hour from the time of the first list + // request. For instance, if you make a list request at 1PM and use the + // token from this first request 10 minutes later, the token from this + // second response will only be valid for 50 minutes. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Histories") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Histories") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListHistoriesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListHistoriesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ListPerfSampleSeriesResponse struct { + // PerfSampleSeries: The resulting PerfSampleSeries sorted by id + PerfSampleSeries []*PerfSampleSeries `json:"perfSampleSeries,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "PerfSampleSeries") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PerfSampleSeries") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ListPerfSampleSeriesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListPerfSampleSeriesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ListPerfSamplesResponse struct { + // NextPageToken: Optional, returned if result size exceeds the page + // size specified in the request (or the default page size, 500, if + // unspecified). It indicates the last sample timestamp to be used as + // page_token in subsequent request + NextPageToken string `json:"nextPageToken,omitempty"` + + PerfSamples []*PerfSample `json:"perfSamples,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListPerfSamplesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListPerfSamplesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListStepThumbnailsResponse: A response containing the thumbnails in a +// step. +type ListStepThumbnailsResponse struct { + // NextPageToken: A continuation token to resume the query at the next + // item. + // + // If set, indicates that there are more thumbnails to read, by calling + // list again with this value in the page_token field. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Thumbnails: A list of image data. + // + // Images are returned in a deterministic order; they are ordered by + // these factors, in order of importance: * First, by their associated + // test case. Images without a test case are considered greater than + // images with one. * Second, by their creation time. Images without a + // creation time are greater than images with one. * Third, by the order + // in which they were added to the step (by calls to CreateStep or + // UpdateStep). + Thumbnails []*Image `json:"thumbnails,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListStepThumbnailsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListStepThumbnailsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListStepsResponse: Response message for StepService.List. +type ListStepsResponse struct { + // NextPageToken: A continuation token to resume the query at the next + // item. + // + // If set, indicates that there are more steps to read, by calling list + // again with this value in the page_token field. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Steps: Steps. + Steps []*Step `json:"steps,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListStepsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListStepsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MemoryInfo struct { + // MemoryCapInKibibyte: Maximum memory that can be allocated to the + // process in KiB + MemoryCapInKibibyte int64 `json:"memoryCapInKibibyte,omitempty,string"` + + // MemoryTotalInKibibyte: Total memory available on the device in KiB + MemoryTotalInKibibyte int64 `json:"memoryTotalInKibibyte,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "MemoryCapInKibibyte") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MemoryCapInKibibyte") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MemoryInfo) MarshalJSON() ([]byte, error) { + type noMethod MemoryInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Outcome: Interprets a result so that humans and machines can act on +// it. +type Outcome struct { + // FailureDetail: More information about a FAILURE outcome. + // + // Returns INVALID_ARGUMENT if this field is set but the summary is not + // FAILURE. + // + // Optional + FailureDetail *FailureDetail `json:"failureDetail,omitempty"` + + // InconclusiveDetail: More information about an INCONCLUSIVE + // outcome. + // + // Returns INVALID_ARGUMENT if this field is set but the summary is not + // INCONCLUSIVE. + // + // Optional + InconclusiveDetail *InconclusiveDetail `json:"inconclusiveDetail,omitempty"` + + // SkippedDetail: More information about a SKIPPED outcome. + // + // Returns INVALID_ARGUMENT if this field is set but the summary is not + // SKIPPED. + // + // Optional + SkippedDetail *SkippedDetail `json:"skippedDetail,omitempty"` + + // SuccessDetail: More information about a SUCCESS outcome. + // + // Returns INVALID_ARGUMENT if this field is set but the summary is not + // SUCCESS. + // + // Optional + SuccessDetail *SuccessDetail `json:"successDetail,omitempty"` + + // Summary: The simplest way to interpret a result. + // + // Required + // + // Possible values: + // "failure" + // "inconclusive" + // "skipped" + // "success" + // "unset" + Summary string `json:"summary,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FailureDetail") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FailureDetail") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Outcome) MarshalJSON() ([]byte, error) { + type noMethod Outcome + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PerfEnvironment: Encapsulates performance environment info +type PerfEnvironment struct { + // CpuInfo: CPU related environment info + CpuInfo *CPUInfo `json:"cpuInfo,omitempty"` + + // MemoryInfo: Memory related environment info + MemoryInfo *MemoryInfo `json:"memoryInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CpuInfo") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CpuInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PerfEnvironment) MarshalJSON() ([]byte, error) { + type noMethod PerfEnvironment + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PerfMetricsSummary: A summary of perf metrics collected and +// performance environment info +type PerfMetricsSummary struct { + // ExecutionId: A tool results execution ID. + ExecutionId string `json:"executionId,omitempty"` + + // HistoryId: A tool results history ID. + HistoryId string `json:"historyId,omitempty"` + + // PerfEnvironment: Describes the environment in which the performance + // metrics were collected + PerfEnvironment *PerfEnvironment `json:"perfEnvironment,omitempty"` + + // PerfMetrics: Set of resource collected + // + // Possible values: + // "cpu" + // "memory" + // "network" + // "perfMetricTypeUnspecified" + PerfMetrics []string `json:"perfMetrics,omitempty"` + + // ProjectId: The cloud project + ProjectId string `json:"projectId,omitempty"` + + // StepId: A tool results step ID. + StepId string `json:"stepId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ExecutionId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExecutionId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PerfMetricsSummary) MarshalJSON() ([]byte, error) { + type noMethod PerfMetricsSummary + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PerfSample: Resource representing a single performance measure or +// data point +type PerfSample struct { + // SampleTime: Timestamp of collection + SampleTime *Timestamp `json:"sampleTime,omitempty"` + + // Value: Value observed + Value float64 `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SampleTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SampleTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PerfSample) MarshalJSON() ([]byte, error) { + type noMethod PerfSample + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *PerfSample) UnmarshalJSON(data []byte) error { + type noMethod PerfSample + var s1 struct { + Value gensupport.JSONFloat64 `json:"value"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Value = float64(s1.Value) + return nil +} + +// PerfSampleSeries: Resource representing a collection of performance +// samples (or data points) +type PerfSampleSeries struct { + // BasicPerfSampleSeries: Basic series represented by a line chart + BasicPerfSampleSeries *BasicPerfSampleSeries `json:"basicPerfSampleSeries,omitempty"` + + // ExecutionId: A tool results execution ID. + ExecutionId string `json:"executionId,omitempty"` + + // HistoryId: A tool results history ID. + HistoryId string `json:"historyId,omitempty"` + + // ProjectId: The cloud project + ProjectId string `json:"projectId,omitempty"` + + // SampleSeriesId: A sample series id + SampleSeriesId string `json:"sampleSeriesId,omitempty"` + + // StepId: A tool results step ID. + StepId string `json:"stepId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "BasicPerfSampleSeries") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BasicPerfSampleSeries") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PerfSampleSeries) MarshalJSON() ([]byte, error) { + type noMethod PerfSampleSeries + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ProjectSettings: Per-project settings for the Tool Results service. +type ProjectSettings struct { + // DefaultBucket: The name of the Google Cloud Storage bucket to which + // results are written. + // + // By default, this is unset. + // + // In update request: optional In response: optional + DefaultBucket string `json:"defaultBucket,omitempty"` + + // Name: The name of the project's settings. + // + // Always of the form: projects/{project-id}/settings + // + // In update request: never set In response: always set + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DefaultBucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultBucket") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ProjectSettings) MarshalJSON() ([]byte, error) { + type noMethod ProjectSettings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublishXunitXmlFilesRequest: Request message for +// StepService.PublishXunitXmlFiles. +type PublishXunitXmlFilesRequest struct { + // XunitXmlFiles: URI of the Xunit XML files to publish. + // + // The maximum size of the file this reference is pointing to is + // 50MB. + // + // Required. + XunitXmlFiles []*FileReference `json:"xunitXmlFiles,omitempty"` + + // ForceSendFields is a list of field names (e.g. "XunitXmlFiles") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "XunitXmlFiles") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublishXunitXmlFilesRequest) MarshalJSON() ([]byte, error) { + type noMethod PublishXunitXmlFilesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SkippedDetail struct { + // IncompatibleAppVersion: If the App doesn't support the specific API + // level. + IncompatibleAppVersion bool `json:"incompatibleAppVersion,omitempty"` + + // IncompatibleArchitecture: If the App doesn't run on the specific + // architecture, for example, x86. + IncompatibleArchitecture bool `json:"incompatibleArchitecture,omitempty"` + + // IncompatibleDevice: If the requested OS version doesn't run on the + // specific device model. + IncompatibleDevice bool `json:"incompatibleDevice,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "IncompatibleAppVersion") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IncompatibleAppVersion") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SkippedDetail) MarshalJSON() ([]byte, error) { + type noMethod SkippedDetail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StackTrace: A stacktrace. +type StackTrace struct { + // Exception: The stack trace message. + // + // Required + Exception string `json:"exception,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Exception") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Exception") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StackTrace) MarshalJSON() ([]byte, error) { + type noMethod StackTrace + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). The +// error model is designed to be: +// +// - Simple to use and understand for most users - Flexible enough to +// meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, and error details. The error code should be an enum value of +// [google.rpc.Code][], but it may accept additional error codes if +// needed. The error message should be a developer-facing English +// message that helps developers *understand* and *resolve* the error. +// If a localized user-facing error message is needed, put the localized +// message in the error details or localize it in the client. The +// optional error details may contain arbitrary information about the +// error. There is a predefined set of error detail types in the package +// `google.rpc` which can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it is not necessarily the actual wire format. When the +// `Status` message is exposed in different client libraries and +// different wire protocols, it can be mapped differently. For example, +// it will likely be mapped to some exceptions in Java, but more likely +// mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a consistent +// developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, it may embed the `Status` in the normal response to indicate +// the partial errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may have a `Status` message for error reporting purpose. +// +// - Batch operations. If a client uses batch request and batch +// response, the `Status` message should be used directly inside batch +// response, one for each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation results in its response, the status of those operations +// should be represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could be used directly after any stripping needed for +// security/privacy reasons. +type Status struct { + // Code: The status code, which should be an enum value of + // [google.rpc.Code][]. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There will + // be a common set of message types for APIs to use. + Details []*Any `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the [google.rpc.Status.details][] field, or localized by the + // client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type noMethod Status + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Step: A Step represents a single operation performed as part of +// Execution. A step can be used to represent the execution of a tool ( +// for example a test runner execution or an execution of a +// compiler). +// +// Steps can overlap (for instance two steps might have the same start +// time if some operations are done in parallel). +// +// Here is an example, let's consider that we have a continuous build is +// executing a test runner for each iteration. The workflow would look +// like: - user creates a Execution with id 1 - user creates an +// TestExecutionStep with id 100 for Execution 1 - user update +// TestExecutionStep with id 100 to add a raw xml log + the service +// parses the xml logs and returns a TestExecutionStep with updated +// TestResult(s). - user update the status of TestExecutionStep with id +// 100 to COMPLETE +// +// A Step can be updated until its state is set to COMPLETE at which +// points it becomes immutable. +type Step struct { + // CompletionTime: The time when the step status was set to + // complete. + // + // This value will be set automatically when state transitions to + // COMPLETE. + // + // - In response: set if the execution state is COMPLETE. - In + // create/update request: never set + CompletionTime *Timestamp `json:"completionTime,omitempty"` + + // CreationTime: The time when the step was created. + // + // - In response: always set - In create/update request: never set + CreationTime *Timestamp `json:"creationTime,omitempty"` + + // Description: A description of this tool For example: mvn clean + // package -D skipTests=true + // + // - In response: present if set by create/update request - In + // create/update request: optional + Description string `json:"description,omitempty"` + + // DeviceUsageDuration: How much the device resource is used to perform + // the test. + // + // This is the device usage used for billing purpose, which is different + // from the run_duration, for example, infrastructure failure won't be + // charged for device usage. + // + // PRECONDITION_FAILED will be returned if one attempts to set a + // device_usage on a step which already has this field set. + // + // - In response: present if previously set. - In create request: + // optional - In update request: optional + DeviceUsageDuration *Duration `json:"deviceUsageDuration,omitempty"` + + // DimensionValue: If the execution containing this step has any + // dimension_definition set, then this field allows the child to specify + // the values of the dimensions. + // + // The keys must exactly match the dimension_definition of the + // execution. + // + // For example, if the execution has `dimension_definition = ['attempt', + // 'device']` then a step must define values for those dimensions, eg. + // `dimension_value = ['attempt': '1', 'device': 'Nexus 6']` + // + // If a step does not participate in one dimension of the matrix, the + // value for that dimension should be empty string. For example, if one + // of the tests is executed by a runner which does not support retries, + // the step could have `dimension_value = ['attempt': '', 'device': + // 'Nexus 6']` + // + // If the step does not participate in any dimensions of the matrix, it + // may leave dimension_value unset. + // + // A PRECONDITION_FAILED will be returned if any of the keys do not + // exist in the dimension_definition of the execution. + // + // A PRECONDITION_FAILED will be returned if another step in this + // execution already has the same name and dimension_value, but differs + // on other data fields, for example, step field is different. + // + // A PRECONDITION_FAILED will be returned if dimension_value is set, and + // there is a dimension_definition in the execution which is not + // specified as one of the keys. + // + // - In response: present if set by create - In create request: optional + // - In update request: never set + DimensionValue []*StepDimensionValueEntry `json:"dimensionValue,omitempty"` + + // HasImages: Whether any of the outputs of this step are images whose + // thumbnails can be fetched with ListThumbnails. + // + // - In response: always set - In create/update request: never set + HasImages bool `json:"hasImages,omitempty"` + + // Labels: Arbitrary user-supplied key/value pairs that are associated + // with the step. + // + // Users are responsible for managing the key namespace such that keys + // don't accidentally collide. + // + // An INVALID_ARGUMENT will be returned if the number of labels exceeds + // 100 or if the length of any of the keys or values exceeds 100 + // characters. + // + // - In response: always set - In create request: optional - In update + // request: optional; any new key/value pair will be added to the map, + // and any new value for an existing key will update that key's value + Labels []*StepLabelsEntry `json:"labels,omitempty"` + + // Name: A short human-readable name to display in the UI. Maximum of + // 100 characters. For example: Clean build + // + // A PRECONDITION_FAILED will be returned upon creating a new step if it + // shares its name and dimension_value with an existing step. If two + // steps represent a similar action, but have different dimension + // values, they should share the same name. For instance, if the same + // set of tests is run on two different platforms, the two steps should + // have the same name. + // + // - In response: always set - In create request: always set - In update + // request: never set + Name string `json:"name,omitempty"` + + // Outcome: Classification of the result, for example into SUCCESS or + // FAILURE + // + // - In response: present if set by create/update request - In + // create/update request: optional + Outcome *Outcome `json:"outcome,omitempty"` + + // RunDuration: How long it took for this step to run. + // + // If unset, this is set to the difference between creation_time and + // completion_time when the step is set to the COMPLETE state. In some + // cases, it is appropriate to set this value separately: For instance, + // if a step is created, but the operation it represents is queued for a + // few minutes before it executes, it would be appropriate not to + // include the time spent queued in its + // run_duration. + // + // PRECONDITION_FAILED will be returned if one attempts to set a + // run_duration on a step which already has this field set. + // + // - In response: present if previously set; always present on COMPLETE + // step - In create request: optional - In update request: optional + RunDuration *Duration `json:"runDuration,omitempty"` + + // State: The initial state is IN_PROGRESS. The only legal state + // transitions are * IN_PROGRESS -> COMPLETE + // + // A PRECONDITION_FAILED will be returned if an invalid transition is + // requested. + // + // It is valid to create Step with a state set to COMPLETE. The state + // can only be set to COMPLETE once. A PRECONDITION_FAILED will be + // returned if the state is set to COMPLETE multiple times. + // + // - In response: always set - In create/update request: optional + // + // Possible values: + // "complete" + // "inProgress" + // "pending" + // "unknownState" + State string `json:"state,omitempty"` + + // StepId: A unique identifier within a Execution for this + // Step. + // + // Returns INVALID_ARGUMENT if this field is set or overwritten by the + // caller. + // + // - In response: always set - In create/update request: never set + StepId string `json:"stepId,omitempty"` + + // TestExecutionStep: An execution of a test runner. + TestExecutionStep *TestExecutionStep `json:"testExecutionStep,omitempty"` + + // ToolExecutionStep: An execution of a tool (used for steps we don't + // explicitly support). + ToolExecutionStep *ToolExecutionStep `json:"toolExecutionStep,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CompletionTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CompletionTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Step) MarshalJSON() ([]byte, error) { + type noMethod Step + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StepDimensionValueEntry struct { + Key string `json:"key,omitempty"` + + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StepDimensionValueEntry) MarshalJSON() ([]byte, error) { + type noMethod StepDimensionValueEntry + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StepLabelsEntry struct { + Key string `json:"key,omitempty"` + + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StepLabelsEntry) MarshalJSON() ([]byte, error) { + type noMethod StepLabelsEntry + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SuccessDetail struct { + // OtherNativeCrash: If a native process other than the app crashed. + OtherNativeCrash bool `json:"otherNativeCrash,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OtherNativeCrash") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OtherNativeCrash") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SuccessDetail) MarshalJSON() ([]byte, error) { + type noMethod SuccessDetail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestCaseReference: A reference to a test case. +// +// Test case references are canonically ordered lexicographically by +// these three factors: * First, by test_suite_name. * Second, by +// class_name. * Third, by name. +type TestCaseReference struct { + // ClassName: The name of the class. + ClassName string `json:"className,omitempty"` + + // Name: The name of the test case. + // + // Required. + Name string `json:"name,omitempty"` + + // TestSuiteName: The name of the test suite to which this test case + // belongs. + TestSuiteName string `json:"testSuiteName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClassName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClassName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestCaseReference) MarshalJSON() ([]byte, error) { + type noMethod TestCaseReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestExecutionStep: A step that represents running tests. +// +// It accepts ant-junit xml files which will be parsed into structured +// test results by the service. Xml file paths are updated in order to +// append more files, however they can't be deleted. +// +// Users can also add test results manually by using the test_result +// field. +type TestExecutionStep struct { + // TestIssues: Issues observed during the test execution. + // + // For example, if the mobile app under test crashed during the test, + // the error message and the stack trace content can be recorded here to + // assist debugging. + // + // - In response: present if set by create or update - In create/update + // request: optional + TestIssues []*TestIssue `json:"testIssues,omitempty"` + + // TestSuiteOverviews: List of test suite overview contents. This could + // be parsed from xUnit XML log by server, or uploaded directly by user. + // This references should only be called when test suites are fully + // parsed or uploaded. + // + // The maximum allowed number of test suite overviews per step is + // 1000. + // + // - In response: always set - In create request: optional - In update + // request: never (use publishXunitXmlFiles custom method instead) + TestSuiteOverviews []*TestSuiteOverview `json:"testSuiteOverviews,omitempty"` + + // TestTiming: The timing break down of the test execution. + // + // - In response: present if set by create or update - In create/update + // request: optional + TestTiming *TestTiming `json:"testTiming,omitempty"` + + // ToolExecution: Represents the execution of the test runner. + // + // The exit code of this tool will be used to determine if the test + // passed. + // + // - In response: always set - In create/update request: optional + ToolExecution *ToolExecution `json:"toolExecution,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TestIssues") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TestIssues") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestExecutionStep) MarshalJSON() ([]byte, error) { + type noMethod TestExecutionStep + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIssue: An abnormal event observed during the test execution. +type TestIssue struct { + // ErrorMessage: A brief human-readable message describing the abnormal + // event. + // + // Required. + ErrorMessage string `json:"errorMessage,omitempty"` + + // StackTrace: Optional. + StackTrace *StackTrace `json:"stackTrace,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorMessage") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorMessage") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIssue) MarshalJSON() ([]byte, error) { + type noMethod TestIssue + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestSuiteOverview: A summary of a test suite result either parsed +// from XML or uploaded directly by a user. +// +// Note: the API related comments are for StepService only. This message +// is also being used in ExecutionService in a read only mode for the +// corresponding step. +type TestSuiteOverview struct { + // ErrorCount: Number of test cases in error, typically set by the + // service by parsing the xml_source. + // + // - In create/response: always set - In update request: never + ErrorCount int64 `json:"errorCount,omitempty"` + + // FailureCount: Number of failed test cases, typically set by the + // service by parsing the xml_source. May also be set by the user. + // + // - In create/response: always set - In update request: never + FailureCount int64 `json:"failureCount,omitempty"` + + // Name: The name of the test suite. + // + // - In create/response: always set - In update request: never + Name string `json:"name,omitempty"` + + // SkippedCount: Number of test cases not run, typically set by the + // service by parsing the xml_source. + // + // - In create/response: always set - In update request: never + SkippedCount int64 `json:"skippedCount,omitempty"` + + // TotalCount: Number of test cases, typically set by the service by + // parsing the xml_source. + // + // - In create/response: always set - In update request: never + TotalCount int64 `json:"totalCount,omitempty"` + + // XmlSource: If this test suite was parsed from XML, this is the URI + // where the original XML file is stored. + // + // Note: Multiple test suites can share the same xml_source + // + // Returns INVALID_ARGUMENT if the uri format is not supported. + // + // - In create/response: optional - In update request: never + XmlSource *FileReference `json:"xmlSource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorCount") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestSuiteOverview) MarshalJSON() ([]byte, error) { + type noMethod TestSuiteOverview + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestTiming: Testing timing break down to know phases. +type TestTiming struct { + // TestProcessDuration: How long it took to run the test process. + // + // - In response: present if previously set. - In create/update request: + // optional + TestProcessDuration *Duration `json:"testProcessDuration,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TestProcessDuration") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TestProcessDuration") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TestTiming) MarshalJSON() ([]byte, error) { + type noMethod TestTiming + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Thumbnail: A single thumbnail, with its size and format. +type Thumbnail struct { + // ContentType: The thumbnail's content type, i.e. "image/png". + // + // Always set. + ContentType string `json:"contentType,omitempty"` + + // Data: The thumbnail file itself. + // + // That is, the bytes here are precisely the bytes that make up the + // thumbnail file; they can be served as an image as-is (with the + // appropriate content type.) + // + // Always set. + Data string `json:"data,omitempty"` + + // HeightPx: The height of the thumbnail, in pixels. + // + // Always set. + HeightPx int64 `json:"heightPx,omitempty"` + + // WidthPx: The width of the thumbnail, in pixels. + // + // Always set. + WidthPx int64 `json:"widthPx,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContentType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContentType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Thumbnail) MarshalJSON() ([]byte, error) { + type noMethod Thumbnail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Timestamp: A Timestamp represents a point in time independent of any +// time zone or calendar, represented as seconds and fractions of +// seconds at nanosecond resolution in UTC Epoch time. It is encoded +// using the Proleptic Gregorian Calendar which extends the Gregorian +// calendar backwards to year one. It is encoded assuming all minutes +// are 60 seconds long, i.e. leap seconds are "smeared" so that no leap +// second table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from +// RFC 3339 date strings. See +// [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc333 +// 9.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; gettimeofday(&tv, NULL); +// +// Timestamp timestamp; timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 +// `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; GetSystemTimeAsFileTime(&ft); UINT64 ticks = +// (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch +// 1601-01-01T00:00:00Z // is 11644473600 seconds before Unix epoch +// 1970-01-01T00:00:00Z. Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java +// `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / +// 1000) .setNanos((int) ((millis % 1000) * +// 1000000)).build(); +// +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, +// {day}, {hour}, {min}, and {sec} are zero-padded to two digits each. +// The fractional seconds, which can go up to 9 digits (i.e. up to 1 +// nanosecond resolution), are optional. The "Z" suffix indicates the +// timezone ("UTC"); the timezone is required, though only UTC (as +// indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScrip +// t/Reference/Global_Objects/Date/toISOString] method. In Python, a +// standard `datetime.datetime` object can be converted to this format +// using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime +// ) with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in +// Java, one can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) to obtain a formatter capable of generating timestamps in this +// format. +type Timestamp struct { + // Nanos: Non-negative fractions of a second at nanosecond resolution. + // Negative second values with fractions must still have non-negative + // nanos values that count forward in time. Must be from 0 to + // 999,999,999 inclusive. + Nanos int64 `json:"nanos,omitempty"` + + // Seconds: Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `json:"seconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Nanos") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nanos") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Timestamp) MarshalJSON() ([]byte, error) { + type noMethod Timestamp + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ToolExecution: An execution of an arbitrary tool. It could be a test +// runner or a tool copying artifacts or deploying code. +type ToolExecution struct { + // CommandLineArguments: The full tokenized command line including the + // program name (equivalent to argv in a C program). + // + // - In response: present if set by create request - In create request: + // optional - In update request: never set + CommandLineArguments []string `json:"commandLineArguments,omitempty"` + + // ExitCode: Tool execution exit code. This field will be set once the + // tool has exited. + // + // - In response: present if set by create/update request - In create + // request: optional - In update request: optional, a + // FAILED_PRECONDITION error will be returned if an exit_code is already + // set. + ExitCode *ToolExitCode `json:"exitCode,omitempty"` + + // ToolLogs: References to any plain text logs output the tool + // execution. + // + // This field can be set before the tool has exited in order to be able + // to have access to a live view of the logs while the tool is + // running. + // + // The maximum allowed number of tool logs per step is 1000. + // + // - In response: present if set by create/update request - In create + // request: optional - In update request: optional, any value provided + // will be appended to the existing list + ToolLogs []*FileReference `json:"toolLogs,omitempty"` + + // ToolOutputs: References to opaque files of any format output by the + // tool execution. + // + // The maximum allowed number of tool outputs per step is 1000. + // + // - In response: present if set by create/update request - In create + // request: optional - In update request: optional, any value provided + // will be appended to the existing list + ToolOutputs []*ToolOutputReference `json:"toolOutputs,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "CommandLineArguments") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommandLineArguments") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ToolExecution) MarshalJSON() ([]byte, error) { + type noMethod ToolExecution + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ToolExecutionStep: Generic tool step to be used for binaries we do +// not explicitly support. For example: running cp to copy artifacts +// from one location to another. +type ToolExecutionStep struct { + // ToolExecution: A Tool execution. + // + // - In response: present if set by create/update request - In + // create/update request: optional + ToolExecution *ToolExecution `json:"toolExecution,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ToolExecution") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ToolExecution") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ToolExecutionStep) MarshalJSON() ([]byte, error) { + type noMethod ToolExecutionStep + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ToolExitCode: Exit code from a tool execution. +type ToolExitCode struct { + // Number: Tool execution exit code. A value of 0 means that the + // execution was successful. + // + // - In response: always set - In create/update request: always set + Number int64 `json:"number,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Number") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Number") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ToolExitCode) MarshalJSON() ([]byte, error) { + type noMethod ToolExitCode + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ToolOutputReference: A reference to a ToolExecution output file. +type ToolOutputReference struct { + // CreationTime: The creation time of the file. + // + // - In response: present if set by create/update request - In + // create/update request: optional + CreationTime *Timestamp `json:"creationTime,omitempty"` + + // Output: A FileReference to an output file. + // + // - In response: always set - In create/update request: always set + Output *FileReference `json:"output,omitempty"` + + // TestCase: The test case to which this output file belongs. + // + // - In response: present if set by create/update request - In + // create/update request: optional + TestCase *TestCaseReference `json:"testCase,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreationTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ToolOutputReference) MarshalJSON() ([]byte, error) { + type noMethod ToolOutputReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "toolresults.projects.getSettings": + +type ProjectsGetSettingsCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetSettings: Gets the Tool Results settings for a project. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to read from +// project +func (r *ProjectsService) GetSettings(projectId string) *ProjectsGetSettingsCall { + c := &ProjectsGetSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetSettingsCall) Fields(s ...googleapi.Field) *ProjectsGetSettingsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetSettingsCall) IfNoneMatch(entityTag string) *ProjectsGetSettingsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetSettingsCall) Context(ctx context.Context) *ProjectsGetSettingsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetSettingsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetSettingsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/settings") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.getSettings" call. +// Exactly one of *ProjectSettings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ProjectSettings.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGetSettingsCall) Do(opts ...googleapi.CallOption) (*ProjectSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ProjectSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the Tool Results settings for a project.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read from project", + // "httpMethod": "GET", + // "id": "toolresults.projects.getSettings", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/settings", + // "response": { + // "$ref": "ProjectSettings" + // } + // } + +} + +// method id "toolresults.projects.initializeSettings": + +type ProjectsInitializeSettingsCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// InitializeSettings: Creates resources for settings which have not yet +// been set. +// +// Currently, this creates a single resource: a Google Cloud Storage +// bucket, to be used as the default bucket for this project. The bucket +// is created in the name of the user calling. Except in rare cases, +// calling this method in parallel from multiple clients will only +// create a single bucket. In order to avoid unnecessary storage +// charges, the bucket is configured to automatically delete objects +// older than 90 days. +// +// The bucket is created with the project-private ACL: All project team +// members are given permissions to the bucket and objects created +// within it according to their roles. Project owners have owners +// rights, and so on. The default ACL on objects created in the bucket +// is project-private as well. See Google Cloud Storage documentation +// for more details. +// +// If there is already a default bucket set and the project can access +// the bucket, this call does nothing. However, if the project doesn't +// have the permission to access the bucket or the bucket is deteleted, +// a new bucket will be created. +// +// May return any canonical error codes, including the following: +// +// - PERMISSION_DENIED - if the user is not authorized to write to +// project - Any error code raised by Google Cloud Storage +func (r *ProjectsService) InitializeSettings(projectId string) *ProjectsInitializeSettingsCall { + c := &ProjectsInitializeSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInitializeSettingsCall) Fields(s ...googleapi.Field) *ProjectsInitializeSettingsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInitializeSettingsCall) Context(ctx context.Context) *ProjectsInitializeSettingsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInitializeSettingsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInitializeSettingsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}:initializeSettings") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.initializeSettings" call. +// Exactly one of *ProjectSettings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ProjectSettings.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInitializeSettingsCall) Do(opts ...googleapi.CallOption) (*ProjectSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ProjectSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates resources for settings which have not yet been set.\n\nCurrently, this creates a single resource: a Google Cloud Storage bucket, to be used as the default bucket for this project. The bucket is created in the name of the user calling. Except in rare cases, calling this method in parallel from multiple clients will only create a single bucket. In order to avoid unnecessary storage charges, the bucket is configured to automatically delete objects older than 90 days.\n\nThe bucket is created with the project-private ACL: All project team members are given permissions to the bucket and objects created within it according to their roles. Project owners have owners rights, and so on. The default ACL on objects created in the bucket is project-private as well. See Google Cloud Storage documentation for more details.\n\nIf there is already a default bucket set and the project can access the bucket, this call does nothing. However, if the project doesn't have the permission to access the bucket or the bucket is deteleted, a new bucket will be created.\n\nMay return any canonical error codes, including the following:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - Any error code raised by Google Cloud Storage", + // "httpMethod": "POST", + // "id": "toolresults.projects.initializeSettings", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}:initializeSettings", + // "response": { + // "$ref": "ProjectSettings" + // } + // } + +} + +// method id "toolresults.projects.histories.create": + +type ProjectsHistoriesCreateCall struct { + s *Service + projectId string + history *History + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a History. +// +// The returned History will have the id set. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to write to +// project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND +// - if the containing project does not exist +func (r *ProjectsHistoriesService) Create(projectId string, history *History) *ProjectsHistoriesCreateCall { + c := &ProjectsHistoriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.history = history + return c +} + +// RequestId sets the optional parameter "requestId": A unique request +// ID for server to detect duplicated requests. For example, a +// UUID. +// +// Optional, but strongly recommended. +func (c *ProjectsHistoriesCreateCall) RequestId(requestId string) *ProjectsHistoriesCreateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesCreateCall) Fields(s ...googleapi.Field) *ProjectsHistoriesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesCreateCall) Context(ctx context.Context) *ProjectsHistoriesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.history) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.create" call. +// Exactly one of *History or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *History.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsHistoriesCreateCall) Do(opts ...googleapi.CallOption) (*History, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &History{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a History.\n\nThe returned History will have the id set.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing project does not exist", + // "httpMethod": "POST", + // "id": "toolresults.projects.histories.create", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{projectId}/histories", + // "request": { + // "$ref": "History" + // }, + // "response": { + // "$ref": "History" + // } + // } + +} + +// method id "toolresults.projects.histories.get": + +type ProjectsHistoriesGetCall struct { + s *Service + projectId string + historyId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a History. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to read project - +// INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the +// History does not exist +func (r *ProjectsHistoriesService) Get(projectId string, historyId string) *ProjectsHistoriesGetCall { + c := &ProjectsHistoriesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesGetCall) Fields(s ...googleapi.Field) *ProjectsHistoriesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesGetCall) IfNoneMatch(entityTag string) *ProjectsHistoriesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesGetCall) Context(ctx context.Context) *ProjectsHistoriesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.get" call. +// Exactly one of *History or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *History.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsHistoriesGetCall) Do(opts ...googleapi.CallOption) (*History, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &History{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a History.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the History does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.get", + // "parameterOrder": [ + // "projectId", + // "historyId" + // ], + // "parameters": { + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}", + // "response": { + // "$ref": "History" + // } + // } + +} + +// method id "toolresults.projects.histories.list": + +type ProjectsHistoriesListCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists Histories for a given Project. +// +// The histories are sorted by modification time in descending order. +// The history_id key will be used to order the history with the same +// modification time. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to read project - +// INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the +// History does not exist +func (r *ProjectsHistoriesService) List(projectId string) *ProjectsHistoriesListCall { + c := &ProjectsHistoriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// FilterByName sets the optional parameter "filterByName": If set, only +// return histories with the given name. +func (c *ProjectsHistoriesListCall) FilterByName(filterByName string) *ProjectsHistoriesListCall { + c.urlParams_.Set("filterByName", filterByName) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of Histories to fetch. +// +// Default value: 20. The server will use this default if the field is +// not set or has a value of 0. Any value greater than 100 will be +// treated as 100. +func (c *ProjectsHistoriesListCall) PageSize(pageSize int64) *ProjectsHistoriesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A continuation +// token to resume the query at the next item. +func (c *ProjectsHistoriesListCall) PageToken(pageToken string) *ProjectsHistoriesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesListCall) Fields(s ...googleapi.Field) *ProjectsHistoriesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesListCall) IfNoneMatch(entityTag string) *ProjectsHistoriesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesListCall) Context(ctx context.Context) *ProjectsHistoriesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.list" call. +// Exactly one of *ListHistoriesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListHistoriesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesListCall) Do(opts ...googleapi.CallOption) (*ListHistoriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListHistoriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists Histories for a given Project.\n\nThe histories are sorted by modification time in descending order. The history_id key will be used to order the history with the same modification time.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the History does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.list", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "filterByName": { + // "description": "If set, only return histories with the given name.\n\nOptional.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of Histories to fetch.\n\nDefault value: 20. The server will use this default if the field is not set or has a value of 0. Any value greater than 100 will be treated as 100.\n\nOptional.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A continuation token to resume the query at the next item.\n\nOptional.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories", + // "response": { + // "$ref": "ListHistoriesResponse" + // } + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHistoriesListCall) Pages(ctx context.Context, f func(*ListHistoriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "toolresults.projects.histories.executions.create": + +type ProjectsHistoriesExecutionsCreateCall struct { + s *Service + projectId string + historyId string + execution *Execution + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an Execution. +// +// The returned Execution will have the id set. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to write to +// project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND +// - if the containing History does not exist +func (r *ProjectsHistoriesExecutionsService) Create(projectId string, historyId string, execution *Execution) *ProjectsHistoriesExecutionsCreateCall { + c := &ProjectsHistoriesExecutionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.execution = execution + return c +} + +// RequestId sets the optional parameter "requestId": A unique request +// ID for server to detect duplicated requests. For example, a +// UUID. +// +// Optional, but strongly recommended. +func (c *ProjectsHistoriesExecutionsCreateCall) RequestId(requestId string) *ProjectsHistoriesExecutionsCreateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsCreateCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsCreateCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.execution) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.create" call. +// Exactly one of *Execution or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Execution.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsCreateCall) Do(opts ...googleapi.CallOption) (*Execution, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Execution{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an Execution.\n\nThe returned Execution will have the id set.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing History does not exist", + // "httpMethod": "POST", + // "id": "toolresults.projects.histories.executions.create", + // "parameterOrder": [ + // "projectId", + // "historyId" + // ], + // "parameters": { + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions", + // "request": { + // "$ref": "Execution" + // }, + // "response": { + // "$ref": "Execution" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.get": + +type ProjectsHistoriesExecutionsGetCall struct { + s *Service + projectId string + historyId string + executionId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets an Execution. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to write to +// project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND +// - if the Execution does not exist +func (r *ProjectsHistoriesExecutionsService) Get(projectId string, historyId string, executionId string) *ProjectsHistoriesExecutionsGetCall { + c := &ProjectsHistoriesExecutionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsGetCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsGetCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsGetCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.get" call. +// Exactly one of *Execution or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Execution.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsGetCall) Do(opts ...googleapi.CallOption) (*Execution, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Execution{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets an Execution.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the Execution does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.get", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId" + // ], + // "parameters": { + // "executionId": { + // "description": "An Execution id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}", + // "response": { + // "$ref": "Execution" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.list": + +type ProjectsHistoriesExecutionsListCall struct { + s *Service + projectId string + historyId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists Histories for a given Project. +// +// The executions are sorted by creation_time in descending order. The +// execution_id key will be used to order the executions with the same +// creation_time. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to read project - +// INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the +// containing History does not exist +func (r *ProjectsHistoriesExecutionsService) List(projectId string, historyId string) *ProjectsHistoriesExecutionsListCall { + c := &ProjectsHistoriesExecutionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of Executions to fetch. +// +// Default value: 25. The server will use this default if the field is +// not set or has a value of 0. +func (c *ProjectsHistoriesExecutionsListCall) PageSize(pageSize int64) *ProjectsHistoriesExecutionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A continuation +// token to resume the query at the next item. +func (c *ProjectsHistoriesExecutionsListCall) PageToken(pageToken string) *ProjectsHistoriesExecutionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsListCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsListCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsListCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.list" call. +// Exactly one of *ListExecutionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListExecutionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsListCall) Do(opts ...googleapi.CallOption) (*ListExecutionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListExecutionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists Histories for a given Project.\n\nThe executions are sorted by creation_time in descending order. The execution_id key will be used to order the executions with the same creation_time.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing History does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.list", + // "parameterOrder": [ + // "projectId", + // "historyId" + // ], + // "parameters": { + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of Executions to fetch.\n\nDefault value: 25. The server will use this default if the field is not set or has a value of 0.\n\nOptional.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A continuation token to resume the query at the next item.\n\nOptional.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions", + // "response": { + // "$ref": "ListExecutionsResponse" + // } + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHistoriesExecutionsListCall) Pages(ctx context.Context, f func(*ListExecutionsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "toolresults.projects.histories.executions.patch": + +type ProjectsHistoriesExecutionsPatchCall struct { + s *Service + projectId string + historyId string + executionId string + execution *Execution + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing Execution with the supplied partial +// entity. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to write to +// project - INVALID_ARGUMENT - if the request is malformed - +// FAILED_PRECONDITION - if the requested state transition is illegal - +// NOT_FOUND - if the containing History does not exist +func (r *ProjectsHistoriesExecutionsService) Patch(projectId string, historyId string, executionId string, execution *Execution) *ProjectsHistoriesExecutionsPatchCall { + c := &ProjectsHistoriesExecutionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.execution = execution + return c +} + +// RequestId sets the optional parameter "requestId": A unique request +// ID for server to detect duplicated requests. For example, a +// UUID. +// +// Optional, but strongly recommended. +func (c *ProjectsHistoriesExecutionsPatchCall) RequestId(requestId string) *ProjectsHistoriesExecutionsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsPatchCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsPatchCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.execution) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.patch" call. +// Exactly one of *Execution or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Execution.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsPatchCall) Do(opts ...googleapi.CallOption) (*Execution, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Execution{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing Execution with the supplied partial entity.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the requested state transition is illegal - NOT_FOUND - if the containing History does not exist", + // "httpMethod": "PATCH", + // "id": "toolresults.projects.histories.executions.patch", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId" + // ], + // "parameters": { + // "executionId": { + // "description": "Required.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "Required.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id. Required.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}", + // "request": { + // "$ref": "Execution" + // }, + // "response": { + // "$ref": "Execution" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.create": + +type ProjectsHistoriesExecutionsStepsCreateCall struct { + s *Service + projectId string + historyId string + executionId string + step *Step + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a Step. +// +// The returned Step will have the id set. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to write to +// project - INVALID_ARGUMENT - if the request is malformed - +// FAILED_PRECONDITION - if the step is too large (more than 10Mib) - +// NOT_FOUND - if the containing Execution does not exist +func (r *ProjectsHistoriesExecutionsStepsService) Create(projectId string, historyId string, executionId string, step *Step) *ProjectsHistoriesExecutionsStepsCreateCall { + c := &ProjectsHistoriesExecutionsStepsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.step = step + return c +} + +// RequestId sets the optional parameter "requestId": A unique request +// ID for server to detect duplicated requests. For example, a +// UUID. +// +// Optional, but strongly recommended. +func (c *ProjectsHistoriesExecutionsStepsCreateCall) RequestId(requestId string) *ProjectsHistoriesExecutionsStepsCreateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsCreateCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsCreateCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.step) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.create" call. +// Exactly one of *Step or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Step.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsHistoriesExecutionsStepsCreateCall) Do(opts ...googleapi.CallOption) (*Step, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Step{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a Step.\n\nThe returned Step will have the id set.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the step is too large (more than 10Mib) - NOT_FOUND - if the containing Execution does not exist", + // "httpMethod": "POST", + // "id": "toolresults.projects.histories.executions.steps.create", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId" + // ], + // "parameters": { + // "executionId": { + // "description": "A Execution id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps", + // "request": { + // "$ref": "Step" + // }, + // "response": { + // "$ref": "Step" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.get": + +type ProjectsHistoriesExecutionsStepsGetCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a Step. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to read project - +// INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the +// Step does not exist +func (r *ProjectsHistoriesExecutionsStepsService) Get(projectId string, historyId string, executionId string, stepId string) *ProjectsHistoriesExecutionsStepsGetCall { + c := &ProjectsHistoriesExecutionsStepsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsGetCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsStepsGetCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsStepsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsGetCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.get" call. +// Exactly one of *Step or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Step.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsHistoriesExecutionsStepsGetCall) Do(opts ...googleapi.CallOption) (*Step, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Step{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a Step.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the Step does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.steps.get", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "A Execution id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A Step id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}", + // "response": { + // "$ref": "Step" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.getPerfMetricsSummary": + +type ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetPerfMetricsSummary: Retrieves a PerfMetricsSummary. +// +// May return any of the following error code(s): - NOT_FOUND - The +// specified PerfMetricsSummary does not exist +func (r *ProjectsHistoriesExecutionsStepsService) GetPerfMetricsSummary(projectId string, historyId string, executionId string, stepId string) *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall { + c := &ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.getPerfMetricsSummary" call. +// Exactly one of *PerfMetricsSummary or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PerfMetricsSummary.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsGetPerfMetricsSummaryCall) Do(opts ...googleapi.CallOption) (*PerfMetricsSummary, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PerfMetricsSummary{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a PerfMetricsSummary.\n\nMay return any of the following error code(s): - NOT_FOUND - The specified PerfMetricsSummary does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.steps.getPerfMetricsSummary", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "A tool results execution ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A tool results history ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The cloud project", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A tool results step ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary", + // "response": { + // "$ref": "PerfMetricsSummary" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.list": + +type ProjectsHistoriesExecutionsStepsListCall struct { + s *Service + projectId string + historyId string + executionId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists Steps for a given Execution. +// +// The steps are sorted by creation_time in descending order. The +// step_id key will be used to order the steps with the same +// creation_time. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to read project - +// INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION +// - if an argument in the request happens to be invalid; e.g. if an +// attempt is made to list the children of a nonexistent Step - +// NOT_FOUND - if the containing Execution does not exist +func (r *ProjectsHistoriesExecutionsStepsService) List(projectId string, historyId string, executionId string) *ProjectsHistoriesExecutionsStepsListCall { + c := &ProjectsHistoriesExecutionsStepsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of Steps to fetch. +// +// Default value: 25. The server will use this default if the field is +// not set or has a value of 0. +func (c *ProjectsHistoriesExecutionsStepsListCall) PageSize(pageSize int64) *ProjectsHistoriesExecutionsStepsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A continuation +// token to resume the query at the next item. +func (c *ProjectsHistoriesExecutionsStepsListCall) PageToken(pageToken string) *ProjectsHistoriesExecutionsStepsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsListCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsStepsListCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsStepsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsListCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.list" call. +// Exactly one of *ListStepsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListStepsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsListCall) Do(opts ...googleapi.CallOption) (*ListStepsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListStepsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists Steps for a given Execution.\n\nThe steps are sorted by creation_time in descending order. The step_id key will be used to order the steps with the same creation_time.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to read project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if an argument in the request happens to be invalid; e.g. if an attempt is made to list the children of a nonexistent Step - NOT_FOUND - if the containing Execution does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.steps.list", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId" + // ], + // "parameters": { + // "executionId": { + // "description": "A Execution id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of Steps to fetch.\n\nDefault value: 25. The server will use this default if the field is not set or has a value of 0.\n\nOptional.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A continuation token to resume the query at the next item.\n\nOptional.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps", + // "response": { + // "$ref": "ListStepsResponse" + // } + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHistoriesExecutionsStepsListCall) Pages(ctx context.Context, f func(*ListStepsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "toolresults.projects.histories.executions.steps.patch": + +type ProjectsHistoriesExecutionsStepsPatchCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + step *Step + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing Step with the supplied partial +// entity. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to write project +// - INVALID_ARGUMENT - if the request is malformed - +// FAILED_PRECONDITION - if the requested state transition is illegal +// (e.g try to upload a duplicate xml file), if the updated step is too +// large (more than 10Mib) - NOT_FOUND - if the containing Execution +// does not exist +func (r *ProjectsHistoriesExecutionsStepsService) Patch(projectId string, historyId string, executionId string, stepId string, step *Step) *ProjectsHistoriesExecutionsStepsPatchCall { + c := &ProjectsHistoriesExecutionsStepsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + c.step = step + return c +} + +// RequestId sets the optional parameter "requestId": A unique request +// ID for server to detect duplicated requests. For example, a +// UUID. +// +// Optional, but strongly recommended. +func (c *ProjectsHistoriesExecutionsStepsPatchCall) RequestId(requestId string) *ProjectsHistoriesExecutionsStepsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPatchCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPatchCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.step) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.patch" call. +// Exactly one of *Step or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Step.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsHistoriesExecutionsStepsPatchCall) Do(opts ...googleapi.CallOption) (*Step, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Step{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing Step with the supplied partial entity.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the requested state transition is illegal (e.g try to upload a duplicate xml file), if the updated step is too large (more than 10Mib) - NOT_FOUND - if the containing Execution does not exist", + // "httpMethod": "PATCH", + // "id": "toolresults.projects.histories.executions.steps.patch", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "A Execution id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "A unique request ID for server to detect duplicated requests. For example, a UUID.\n\nOptional, but strongly recommended.", + // "location": "query", + // "type": "string" + // }, + // "stepId": { + // "description": "A Step id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}", + // "request": { + // "$ref": "Step" + // }, + // "response": { + // "$ref": "Step" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.publishXunitXmlFiles": + +type ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + publishxunitxmlfilesrequest *PublishXunitXmlFilesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// PublishXunitXmlFiles: Publish xml files to an existing Step. +// +// May return any of the following canonical error codes: +// +// - PERMISSION_DENIED - if the user is not authorized to write project +// - INVALID_ARGUMENT - if the request is malformed - +// FAILED_PRECONDITION - if the requested state transition is illegal, +// e.g try to upload a duplicate xml file or a file too large. - +// NOT_FOUND - if the containing Execution does not exist +func (r *ProjectsHistoriesExecutionsStepsService) PublishXunitXmlFiles(projectId string, historyId string, executionId string, stepId string, publishxunitxmlfilesrequest *PublishXunitXmlFilesRequest) *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall { + c := &ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + c.publishxunitxmlfilesrequest = publishxunitxmlfilesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishxunitxmlfilesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}:publishXunitXmlFiles") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.publishXunitXmlFiles" call. +// Exactly one of *Step or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Step.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsHistoriesExecutionsStepsPublishXunitXmlFilesCall) Do(opts ...googleapi.CallOption) (*Step, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Step{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Publish xml files to an existing Step.\n\nMay return any of the following canonical error codes:\n\n- PERMISSION_DENIED - if the user is not authorized to write project - INVALID_ARGUMENT - if the request is malformed - FAILED_PRECONDITION - if the requested state transition is illegal, e.g try to upload a duplicate xml file or a file too large. - NOT_FOUND - if the containing Execution does not exist", + // "httpMethod": "POST", + // "id": "toolresults.projects.histories.executions.steps.publishXunitXmlFiles", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "A Execution id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A Step id. Note: This step must include a TestExecutionStep.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}:publishXunitXmlFiles", + // "request": { + // "$ref": "PublishXunitXmlFilesRequest" + // }, + // "response": { + // "$ref": "Step" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.perfMetricsSummary.create": + +type ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + perfmetricssummary *PerfMetricsSummary + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a PerfMetricsSummary resource. +// +// May return any of the following error code(s): - ALREADY_EXISTS - A +// PerfMetricSummary already exists for the given Step - NOT_FOUND - The +// containing Step does not exist +func (r *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService) Create(projectId string, historyId string, executionId string, stepId string, perfmetricssummary *PerfMetricsSummary) *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall { + c := &ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + c.perfmetricssummary = perfmetricssummary + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.perfmetricssummary) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.perfMetricsSummary.create" call. +// Exactly one of *PerfMetricsSummary or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PerfMetricsSummary.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsPerfMetricsSummaryCreateCall) Do(opts ...googleapi.CallOption) (*PerfMetricsSummary, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PerfMetricsSummary{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a PerfMetricsSummary resource.\n\nMay return any of the following error code(s): - ALREADY_EXISTS - A PerfMetricSummary already exists for the given Step - NOT_FOUND - The containing Step does not exist", + // "httpMethod": "POST", + // "id": "toolresults.projects.histories.executions.steps.perfMetricsSummary.create", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "A tool results execution ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A tool results history ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The cloud project", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A tool results step ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary", + // "request": { + // "$ref": "PerfMetricsSummary" + // }, + // "response": { + // "$ref": "PerfMetricsSummary" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.perfSampleSeries.create": + +type ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + perfsampleseries *PerfSampleSeries + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a PerfSampleSeries. +// +// May return any of the following error code(s): - ALREADY_EXISTS - +// PerfMetricSummary already exists for the given Step - NOT_FOUND - The +// containing Step does not exist +func (r *ProjectsHistoriesExecutionsStepsPerfSampleSeriesService) Create(projectId string, historyId string, executionId string, stepId string, perfsampleseries *PerfSampleSeries) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall { + c := &ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + c.perfsampleseries = perfsampleseries + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.perfsampleseries) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.perfSampleSeries.create" call. +// Exactly one of *PerfSampleSeries or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PerfSampleSeries.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesCreateCall) Do(opts ...googleapi.CallOption) (*PerfSampleSeries, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PerfSampleSeries{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a PerfSampleSeries.\n\nMay return any of the following error code(s): - ALREADY_EXISTS - PerfMetricSummary already exists for the given Step - NOT_FOUND - The containing Step does not exist", + // "httpMethod": "POST", + // "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.create", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "A tool results execution ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A tool results history ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The cloud project", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A tool results step ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries", + // "request": { + // "$ref": "PerfSampleSeries" + // }, + // "response": { + // "$ref": "PerfSampleSeries" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.perfSampleSeries.get": + +type ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + sampleSeriesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a PerfSampleSeries. +// +// May return any of the following error code(s): - NOT_FOUND - The +// specified PerfSampleSeries does not exist +func (r *ProjectsHistoriesExecutionsStepsPerfSampleSeriesService) Get(projectId string, historyId string, executionId string, stepId string, sampleSeriesId string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall { + c := &ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + c.sampleSeriesId = sampleSeriesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + "sampleSeriesId": c.sampleSeriesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.perfSampleSeries.get" call. +// Exactly one of *PerfSampleSeries or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PerfSampleSeries.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesGetCall) Do(opts ...googleapi.CallOption) (*PerfSampleSeries, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PerfSampleSeries{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a PerfSampleSeries.\n\nMay return any of the following error code(s): - NOT_FOUND - The specified PerfSampleSeries does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.get", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId", + // "sampleSeriesId" + // ], + // "parameters": { + // "executionId": { + // "description": "A tool results execution ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A tool results history ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The cloud project", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sampleSeriesId": { + // "description": "A sample series id", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A tool results step ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}", + // "response": { + // "$ref": "PerfSampleSeries" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.perfSampleSeries.list": + +type ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists PerfSampleSeries for a given Step. +// +// The request provides an optional filter which specifies one or more +// PerfMetricsType to include in the result; if none returns all. The +// resulting PerfSampleSeries are sorted by ids. +// +// May return any of the following canonical error codes: - NOT_FOUND - +// The containing Step does not exist +func (r *ProjectsHistoriesExecutionsStepsPerfSampleSeriesService) List(projectId string, historyId string, executionId string, stepId string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall { + c := &ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + return c +} + +// Filter sets the optional parameter "filter": Specify one or more +// PerfMetricType values such as CPU to filter the result +// +// Possible values: +// "cpu" +// "memory" +// "network" +// "perfMetricTypeUnspecified" +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) Filter(filter ...string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall { + c.urlParams_.SetMulti("filter", append([]string{}, filter...)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.perfSampleSeries.list" call. +// Exactly one of *ListPerfSampleSeriesResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListPerfSampleSeriesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesListCall) Do(opts ...googleapi.CallOption) (*ListPerfSampleSeriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListPerfSampleSeriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists PerfSampleSeries for a given Step.\n\nThe request provides an optional filter which specifies one or more PerfMetricsType to include in the result; if none returns all. The resulting PerfSampleSeries are sorted by ids.\n\nMay return any of the following canonical error codes: - NOT_FOUND - The containing Step does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.list", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "A tool results execution ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "filter": { + // "description": "Specify one or more PerfMetricType values such as CPU to filter the result", + // "enum": [ + // "cpu", + // "memory", + // "network", + // "perfMetricTypeUnspecified" + // ], + // "enumDescriptions": [ + // "", + // "", + // "", + // "" + // ], + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A tool results history ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The cloud project", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A tool results step ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries", + // "response": { + // "$ref": "ListPerfSampleSeriesResponse" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.batchCreate": + +type ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + sampleSeriesId string + batchcreateperfsamplesrequest *BatchCreatePerfSamplesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchCreate: Creates a batch of PerfSamples - a client can submit +// multiple batches of Perf Samples through repeated calls to this +// method in order to split up a large request payload - duplicates and +// existing timestamp entries will be ignored. - the batch operation may +// partially succeed - the set of elements successfully inserted is +// returned in the response (omits items which already existed in the +// database). +// +// May return any of the following canonical error codes: - NOT_FOUND - +// The containing PerfSampleSeries does not exist +func (r *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService) BatchCreate(projectId string, historyId string, executionId string, stepId string, sampleSeriesId string, batchcreateperfsamplesrequest *BatchCreatePerfSamplesRequest) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall { + c := &ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + c.sampleSeriesId = sampleSeriesId + c.batchcreateperfsamplesrequest = batchcreateperfsamplesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchcreateperfsamplesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples:batchCreate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + "sampleSeriesId": c.sampleSeriesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.batchCreate" call. +// Exactly one of *BatchCreatePerfSamplesResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BatchCreatePerfSamplesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateCall) Do(opts ...googleapi.CallOption) (*BatchCreatePerfSamplesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BatchCreatePerfSamplesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a batch of PerfSamples - a client can submit multiple batches of Perf Samples through repeated calls to this method in order to split up a large request payload - duplicates and existing timestamp entries will be ignored. - the batch operation may partially succeed - the set of elements successfully inserted is returned in the response (omits items which already existed in the database).\n\nMay return any of the following canonical error codes: - NOT_FOUND - The containing PerfSampleSeries does not exist", + // "httpMethod": "POST", + // "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.batchCreate", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId", + // "sampleSeriesId" + // ], + // "parameters": { + // "executionId": { + // "description": "A tool results execution ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A tool results history ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The cloud project", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sampleSeriesId": { + // "description": "A sample series id", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A tool results step ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples:batchCreate", + // "request": { + // "$ref": "BatchCreatePerfSamplesRequest" + // }, + // "response": { + // "$ref": "BatchCreatePerfSamplesResponse" + // } + // } + +} + +// method id "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.list": + +type ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + sampleSeriesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the Performance Samples of a given Sample Series - The +// list results are sorted by timestamps ascending - The default page +// size is 500 samples; and maximum size allowed 5000 - The response +// token indicates the last returned PerfSample timestamp - When the +// results size exceeds the page size, submit a subsequent request +// including the page token to return the rest of the samples up to the +// page limit +// +// May return any of the following canonical error codes: - OUT_OF_RANGE +// - The specified request page_token is out of valid range - NOT_FOUND +// - The containing PerfSampleSeries does not exist +func (r *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService) List(projectId string, historyId string, executionId string, stepId string, sampleSeriesId string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall { + c := &ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + c.sampleSeriesId = sampleSeriesId + return c +} + +// PageSize sets the optional parameter "pageSize": The default page +// size is 500 samples, and the maximum size is 5000. If the page_size +// is greater than 5000, the effective page size will be 5000 +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) PageSize(pageSize int64) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional, the +// next_page_token returned in the previous response +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) PageToken(pageToken string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + "sampleSeriesId": c.sampleSeriesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.list" call. +// Exactly one of *ListPerfSamplesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListPerfSamplesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) Do(opts ...googleapi.CallOption) (*ListPerfSamplesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListPerfSamplesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the Performance Samples of a given Sample Series - The list results are sorted by timestamps ascending - The default page size is 500 samples; and maximum size allowed 5000 - The response token indicates the last returned PerfSample timestamp - When the results size exceeds the page size, submit a subsequent request including the page token to return the rest of the samples up to the page limit\n\nMay return any of the following canonical error codes: - OUT_OF_RANGE - The specified request page_token is out of valid range - NOT_FOUND - The containing PerfSampleSeries does not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.steps.perfSampleSeries.samples.list", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId", + // "sampleSeriesId" + // ], + // "parameters": { + // "executionId": { + // "description": "A tool results execution ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A tool results history ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The default page size is 500 samples, and the maximum size is 5000. If the page_size is greater than 5000, the effective page size will be 5000", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional, the next_page_token returned in the previous response", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "The cloud project", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sampleSeriesId": { + // "description": "A sample series id", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A tool results step ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples", + // "response": { + // "$ref": "ListPerfSamplesResponse" + // } + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListCall) Pages(ctx context.Context, f func(*ListPerfSamplesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "toolresults.projects.histories.executions.steps.thumbnails.list": + +type ProjectsHistoriesExecutionsStepsThumbnailsListCall struct { + s *Service + projectId string + historyId string + executionId string + stepId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists thumbnails of images attached to a step. +// +// May return any of the following canonical error codes: - +// PERMISSION_DENIED - if the user is not authorized to read from the +// project, or from any of the images - INVALID_ARGUMENT - if the +// request is malformed - NOT_FOUND - if the step does not exist, or if +// any of the images do not exist +func (r *ProjectsHistoriesExecutionsStepsThumbnailsService) List(projectId string, historyId string, executionId string, stepId string) *ProjectsHistoriesExecutionsStepsThumbnailsListCall { + c := &ProjectsHistoriesExecutionsStepsThumbnailsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.historyId = historyId + c.executionId = executionId + c.stepId = stepId + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of thumbnails to fetch. +// +// Default value: 50. The server will use this default if the field is +// not set or has a value of 0. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) PageSize(pageSize int64) *ProjectsHistoriesExecutionsStepsThumbnailsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A continuation +// token to resume the query at the next item. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) PageToken(pageToken string) *ProjectsHistoriesExecutionsStepsThumbnailsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) Fields(s ...googleapi.Field) *ProjectsHistoriesExecutionsStepsThumbnailsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) IfNoneMatch(entityTag string) *ProjectsHistoriesExecutionsStepsThumbnailsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) Context(ctx context.Context) *ProjectsHistoriesExecutionsStepsThumbnailsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/thumbnails") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "historyId": c.historyId, + "executionId": c.executionId, + "stepId": c.stepId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "toolresults.projects.histories.executions.steps.thumbnails.list" call. +// Exactly one of *ListStepThumbnailsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListStepThumbnailsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) Do(opts ...googleapi.CallOption) (*ListStepThumbnailsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListStepThumbnailsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists thumbnails of images attached to a step.\n\nMay return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to read from the project, or from any of the images - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the step does not exist, or if any of the images do not exist", + // "httpMethod": "GET", + // "id": "toolresults.projects.histories.executions.steps.thumbnails.list", + // "parameterOrder": [ + // "projectId", + // "historyId", + // "executionId", + // "stepId" + // ], + // "parameters": { + // "executionId": { + // "description": "An Execution id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "historyId": { + // "description": "A History id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of thumbnails to fetch.\n\nDefault value: 50. The server will use this default if the field is not set or has a value of 0.\n\nOptional.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A continuation token to resume the query at the next item.\n\nOptional.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "A Project id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stepId": { + // "description": "A Step id.\n\nRequired.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/thumbnails", + // "response": { + // "$ref": "ListStepThumbnailsResponse" + // } + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHistoriesExecutionsStepsThumbnailsListCall) Pages(ctx context.Context, f func(*ListStepThumbnailsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/vendor/google.golang.org/api/tracing/v1/tracing-api.json b/vendor/google.golang.org/api/tracing/v1/tracing-api.json new file mode 100644 index 000000000..91a78f258 --- /dev/null +++ b/vendor/google.golang.org/api/tracing/v1/tracing-api.json @@ -0,0 +1,648 @@ +{ + "canonicalName": "Tracing", + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/trace.append": { + "description": "Write Trace data for a project or application" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/trace.readonly": { + "description": "Read Trace data for a project or application" + } + } + } + }, + "rootUrl": "https://tracing.googleapis.com/", + "ownerDomain": "google.com", + "name": "tracing", + "batchPath": "batch", + "title": "Google Tracing API", + "ownerName": "Google", + "resources": { + "projects": { + "resources": { + "traces": { + "methods": { + "batchUpdate": { + "request": { + "$ref": "BatchUpdateSpansRequest" + }, + "description": "Sends new spans to Stackdriver Trace or updates existing spans. If the\nname of a trace that you send matches that of an existing trace, any fields\nin the existing trace and its spans are overwritten by the provided values,\nand any new fields provided are merged with the existing trace data. If the\nname does not match, a new trace is created with given set of spans.", + "httpMethod": "PATCH", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "parent": { + "description": "ID of the Cloud project where the trace data is stored.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append" + ], + "flatPath": "v1/projects/{projectsId}/traces:batchUpdate", + "id": "tracing.projects.traces.batchUpdate", + "path": "v1/{+parent}/traces:batchUpdate" + }, + "listSpans": { + "description": "Returns a list of spans within a trace.", + "response": { + "$ref": "ListSpansResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.readonly" + ], + "parameters": { + "name": { + "location": "path", + "description": "ID of the span set where is \"projects/\u003cproject_id\u003e/traces/\u003ctrace_id\u003e\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/traces/[^/]+$" + }, + "pageToken": { + "type": "string", + "location": "query", + "description": "Token identifying the page of results to return. If provided, use the\nvalue of the `page_token` field from a previous request. Optional." + } + }, + "flatPath": "v1/projects/{projectsId}/traces/{tracesId}:listSpans", + "path": "v1/{+name}:listSpans", + "id": "tracing.projects.traces.listSpans" + }, + "list": { + "description": "Returns of a list of traces that match the specified filter conditions.", + "httpMethod": "GET", + "response": { + "$ref": "ListTracesResponse" + }, + "parameterOrder": [ + "parent" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.readonly" + ], + "parameters": { + "filter": { + "location": "query", + "description": "An optional filter for the request.\nExample:\n\"version_label_key:a some_label:some_label_key\"\nreturns traces from version a and has some_label with some_label_key.", + "type": "string" + }, + "endTime": { + "location": "query", + "description": "End of the time interval (inclusive) during which the trace data was\ncollected from the application.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "location": "query", + "description": "Start of the time interval (inclusive) during which the trace data was\ncollected from the application.", + "format": "google-datetime", + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "Token identifying the page of results to return. If provided, use the\nvalue of the `next_page_token` field from a previous request. Optional.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Maximum number of traces to return. If not specified or \u003c= 0, the\nimplementation selects a reasonable value. The implementation may\nreturn fewer traces than the requested page size. Optional.", + "format": "int32", + "type": "integer" + }, + "parent": { + "location": "path", + "description": "ID of the Cloud project where the trace data is stored.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + }, + "orderBy": { + "description": "Field used to sort the returned traces. Optional.\nCan be one of the following:\n\n* `trace_id`\n* `name` (`name` field of root span in the trace)\n* `duration` (difference between `end_time` and `start_time` fields of\n the root span)\n* `start` (`start_time` field of the root span)\n\nDescending order can be specified by appending `desc` to the sort field\n(for example, `name desc`).\n\nOnly one sort field is permitted.", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1/projects/{projectsId}/traces", + "id": "tracing.projects.traces.list", + "path": "v1/{+parent}/traces" + }, + "get": { + "response": { + "$ref": "Trace" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "description": "ID of the trace which is \"projects/\u003cproject_id\u003e/traces/\u003ctrace_id\u003e\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/traces/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.readonly" + ], + "flatPath": "v1/projects/{projectsId}/traces/{tracesId}", + "path": "v1/{+name}", + "id": "tracing.projects.traces.get", + "description": "Returns a specific trace." + } + } + } + } + } + }, + "parameters": { + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Returns response with indentations and line breaks." + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" + }, + "$.xgafv": { + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format.", + "type": "string" + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + } + }, + "version": "v1", + "baseUrl": "https://tracing.googleapis.com/", + "servicePath": "", + "description": "Send and retrieve trace data from Google Stackdriver Trace.\n", + "kind": "discovery#restDescription", + "basePath": "", + "documentationLink": "https://cloud.google.com/trace", + "revision": "20170208", + "id": "tracing:v1", + "discoveryVersion": "v1", + "version_module": "True", + "schemas": { + "Module": { + "type": "object", + "properties": { + "buildId": { + "type": "string", + "description": "Build_id is a unique identifier for the module,\nusually a hash of its contents" + }, + "module": { + "description": "Binary module.\nE.g. main binary, kernel modules, and dynamic libraries\nsuch as libc.so, sharedlib.so", + "type": "string" + } + }, + "id": "Module" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "type": "object", + "properties": { + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", + "type": "array", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + } + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "id": "Status" + }, + "Span": { + "id": "Span", + "description": "A span represents a single operation within a trace. Spans can be nested\nand form a trace tree. Often, a trace contains a root span that describes the\nend-to-end latency and, optionally, one or more subspans for\nits sub-operations. Spans do not need to be contiguous. There may be gaps\nbetween spans in a trace.", + "type": "object", + "properties": { + "localEndTime": { + "description": "Local machine clock time from the UNIX epoch,\nat which span execution ended.\nOn the server side these are the times when the server application\nhandler finishes running.", + "format": "google-datetime", + "type": "string" + }, + "parentId": { + "description": "ID of parent span. 0 or missing if this is a root span.", + "format": "uint64", + "type": "string" + }, + "timeEvents": { + "description": "A collection of time-stamped events.", + "type": "array", + "items": { + "$ref": "TimeEvent" + } + }, + "status": { + "$ref": "Status", + "description": "The final status of the Span. This is optional." + }, + "name": { + "description": "Name of the span. The span name is sanitized and displayed in the\nStackdriver Trace tool in the {% dynamic print site_values.console_name %}.\nThe name may be a method name or some other per-call site name.\nFor the same executable and the same call point, a best practice is\nto use a consistent name, which makes it easier to correlate\ncross-trace spans.", + "type": "string" + }, + "stackTrace": { + "description": "Stack trace captured at the start of the span. This is optional.", + "$ref": "StackTrace" + }, + "links": { + "description": "A collection of links.", + "type": "array", + "items": { + "$ref": "Link" + } + }, + "attributes": { + "additionalProperties": { + "$ref": "AttributeValue" + }, + "description": "Properties of a span. Attributes at the span level.\nE.g.\n\"/instance_id\": \"my-instance\"\n\"/zone\": \"us-central1-a\"\n\"/grpc/peer_address\": \"ip:port\" (dns, etc.)\n\"/grpc/deadline\": \"Duration\"\n\"/http/user_agent\"\n\"/http/request_bytes\": 300\n\"/http/response_bytes\": 1200\n\"/http/url\": google.com/apis\n\"/pid\"\n\"abc.com/myattribute\": \"my attribute value\"\n\nMaximum length for attribute key is 128 characters, for string attribute\nvalue is 2K characters.", + "type": "object" + }, + "id": { + "type": "string", + "description": "Identifier for the span. Must be a 64-bit integer other than 0 and\nunique within a trace.", + "format": "uint64" + }, + "localStartTime": { + "description": "Local machine clock time from the UNIX epoch,\nat which span execution started.\nOn the server side these are the times when the server application\nhandler starts running.", + "format": "google-datetime", + "type": "string" + }, + "hasRemoteParent": { + "description": "True if this Span has a remote parent (is an RPC server Span).", + "type": "boolean" + } + } + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "ListTracesResponse": { + "description": "The response message for the `ListTraces` method.", + "type": "object", + "properties": { + "traces": { + "description": "List of trace records returned.", + "type": "array", + "items": { + "$ref": "Trace" + } + }, + "nextPageToken": { + "description": "If defined, indicates that there are more traces that match the request\nand that this value should be passed to the next request to continue\nretrieving additional traces.", + "type": "string" + } + }, + "id": "ListTracesResponse" + }, + "AttributeValue": { + "description": "Allowed attribute values.", + "type": "object", + "properties": { + "stringValue": { + "description": "A string value.", + "type": "string" + }, + "boolValue": { + "type": "boolean", + "description": "A boolean value." + }, + "intValue": { + "description": "An integer value.", + "format": "int64", + "type": "string" + } + }, + "id": "AttributeValue" + }, + "BatchUpdateSpansRequest": { + "description": "The request message for the `BatchUpdateSpans` method.", + "type": "object", + "properties": { + "spanUpdates": { + "description": "A map from trace name to spans to be stored or updated.", + "type": "object", + "additionalProperties": { + "$ref": "SpanUpdates" + } + } + }, + "id": "BatchUpdateSpansRequest" + }, + "StackTrace": { + "type": "object", + "properties": { + "stackFrame": { + "description": "Stack frames of this stack trace.", + "type": "array", + "items": { + "$ref": "StackFrame" + } + }, + "stackTraceHashId": { + "description": "User can choose to use their own hash function to hash large attributes to\nsave network bandwidth and storage.\nTypical usage is to pass both stack_frame and stack_trace_hash_id initially\nto inform the storage of the mapping. And in subsequent calls, pass in\nstack_trace_hash_id only. User shall verify the hash value is\nsuccessfully stored.", + "format": "uint64", + "type": "string" + } + }, + "id": "StackTrace" + }, + "TimeEvent": { + "id": "TimeEvent", + "description": "A time-stamped annotation in the Span.", + "type": "object", + "properties": { + "networkEvent": { + "$ref": "NetworkEvent", + "description": "Optional field that can be used only for network events." + }, + "annotation": { + "description": "Optional field for user supplied \u003cstring, AttributeValue\u003e map", + "$ref": "Annotation" + }, + "localTime": { + "description": "The local machine absolute timestamp when this event happened.", + "format": "google-datetime", + "type": "string" + } + } + }, + "NetworkEvent": { + "description": "An event describing an RPC message sent/received on the network.", + "type": "object", + "properties": { + "kernelTime": { + "description": "If available, this is the kernel time:\nFor sent messages, this is the time at which the first bit was sent.\nFor received messages, this is the time at which the last bit was\nreceived.", + "format": "google-datetime", + "type": "string" + }, + "type": { + "enum": [ + "TYPE_UNSPECIFIED", + "SENT", + "RECV" + ], + "type": "string", + "enumDescriptions": [ + "", + "", + "" + ] + }, + "messageId": { + "description": "Every message has an identifier, which must be different from all the\nnetwork messages in this span.\nThis is especially important when the request/response are streamed.", + "format": "uint64", + "type": "string" + }, + "messageSize": { + "description": "Number of bytes send/receive.", + "format": "uint64", + "type": "string" + } + }, + "id": "NetworkEvent" + }, + "ListSpansResponse": { + "description": "The response message for the 'ListSpans' method.", + "type": "object", + "properties": { + "spans": { + "description": "The requested spans if they are any in the specified trace.", + "type": "array", + "items": { + "$ref": "Span" + } + }, + "nextPageToken": { + "description": "If defined, indicates that there are more spans that match the request\nand that this value should be passed to the next request to continue\nretrieving additional spans.", + "type": "string" + } + }, + "id": "ListSpansResponse" + }, + "SpanUpdates": { + "description": "Collection of spans.", + "type": "object", + "properties": { + "spans": { + "type": "array", + "items": { + "$ref": "Span" + } + } + }, + "id": "SpanUpdates" + }, + "StackFrame": { + "description": "Presents a single stack frame in a stack trace.", + "type": "object", + "properties": { + "lineNumber": { + "description": "Line number of the frame.", + "format": "int64", + "type": "string" + }, + "loadModule": { + "$ref": "Module", + "description": "Binary module the code is loaded from." + }, + "columnNumber": { + "description": "Column number is important in JavaScript(anonymous functions),\nMight not be available in some languages.", + "format": "int64", + "type": "string" + }, + "fileName": { + "description": "File name of the frame.", + "type": "string" + }, + "sourceVersion": { + "description": "source_version is deployment specific. It might be\nbetter to be stored in deployment metadata.", + "type": "string" + }, + "originalFunctionName": { + "description": "Used when function name is ‘mangled’. Not guaranteed to be fully\nqualified but usually it is.", + "type": "string" + }, + "functionName": { + "type": "string", + "description": "Fully qualified names which uniquely identify function/method/etc." + } + }, + "id": "StackFrame" + }, + "Link": { + "description": "Link one span with another which may be in a different Trace. Used (for\nexample) in batching operations, where a single batch handler processes\nmultiple requests from different traces.", + "type": "object", + "properties": { + "type": { + "enum": [ + "TYPE_UNSPECIFIED", + "CHILD", + "PARENT" + ], + "description": "The type of the link.", + "type": "string", + "enumDescriptions": [ + "", + "", + "" + ] + }, + "traceId": { + "type": "string", + "description": "The trace identifier of the linked span." + }, + "spanId": { + "description": "The span identifier of the linked span.", + "format": "uint64", + "type": "string" + } + }, + "id": "Link" + }, + "Annotation": { + "description": "Text annotation with a set of attributes.", + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "A user-supplied message describing the event." + }, + "attributes": { + "description": "A set of attributes on the annotation.", + "type": "object", + "additionalProperties": { + "$ref": "AttributeValue" + } + } + }, + "id": "Annotation" + }, + "Trace": { + "properties": { + "name": { + "description": "ID of the trace which is \"projects/\u003cproject_id\u003e/traces/\u003ctrace_id\u003e\".\ntrace_id is globally unique identifier for the trace. Common to all the\nspans. It is conceptually a 128-bit hex-encoded value.", + "type": "string" + } + }, + "id": "Trace", + "description": "A trace describes how long it takes for an application to perform some\noperations. It consists of a set of spans, each of which contains details\nabout an operation with time information and operation details.", + "type": "object" + } + }, + "icons": { + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" + }, + "protocol": "rest" +} diff --git a/vendor/google.golang.org/api/tracing/v1/tracing-gen.go b/vendor/google.golang.org/api/tracing/v1/tracing-gen.go new file mode 100644 index 000000000..d75a7fdb0 --- /dev/null +++ b/vendor/google.golang.org/api/tracing/v1/tracing-gen.go @@ -0,0 +1,1563 @@ +// Package tracing provides access to the Google Tracing API. +// +// See https://cloud.google.com/trace +// +// Usage example: +// +// import "google.golang.org/api/tracing/v1" +// ... +// tracingService, err := tracing.New(oauthHttpClient) +package tracing // import "google.golang.org/api/tracing/v1" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "tracing:v1" +const apiName = "tracing" +const apiVersion = "v1" +const basePath = "https://tracing.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // Write Trace data for a project or application + TraceAppendScope = "https://www.googleapis.com/auth/trace.append" + + // Read Trace data for a project or application + TraceReadonlyScope = "https://www.googleapis.com/auth/trace.readonly" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Traces = NewProjectsTracesService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Traces *ProjectsTracesService +} + +func NewProjectsTracesService(s *Service) *ProjectsTracesService { + rs := &ProjectsTracesService{s: s} + return rs +} + +type ProjectsTracesService struct { + s *Service +} + +// Annotation: Text annotation with a set of attributes. +type Annotation struct { + // Attributes: A set of attributes on the annotation. + Attributes map[string]AttributeValue `json:"attributes,omitempty"` + + // Description: A user-supplied message describing the event. + Description string `json:"description,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Attributes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Attributes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Annotation) MarshalJSON() ([]byte, error) { + type noMethod Annotation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AttributeValue: Allowed attribute values. +type AttributeValue struct { + // BoolValue: A boolean value. + BoolValue bool `json:"boolValue,omitempty"` + + // IntValue: An integer value. + IntValue int64 `json:"intValue,omitempty,string"` + + // StringValue: A string value. + StringValue string `json:"stringValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BoolValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BoolValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AttributeValue) MarshalJSON() ([]byte, error) { + type noMethod AttributeValue + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchUpdateSpansRequest: The request message for the +// `BatchUpdateSpans` method. +type BatchUpdateSpansRequest struct { + // SpanUpdates: A map from trace name to spans to be stored or updated. + SpanUpdates map[string]SpanUpdates `json:"spanUpdates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SpanUpdates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SpanUpdates") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchUpdateSpansRequest) MarshalJSON() ([]byte, error) { + type noMethod BatchUpdateSpansRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + +// Link: Link one span with another which may be in a different Trace. +// Used (for +// example) in batching operations, where a single batch handler +// processes +// multiple requests from different traces. +type Link struct { + // SpanId: The span identifier of the linked span. + SpanId uint64 `json:"spanId,omitempty,string"` + + // TraceId: The trace identifier of the linked span. + TraceId string `json:"traceId,omitempty"` + + // Type: The type of the link. + // + // Possible values: + // "TYPE_UNSPECIFIED" + // "CHILD" + // "PARENT" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SpanId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SpanId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Link) MarshalJSON() ([]byte, error) { + type noMethod Link + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListSpansResponse: The response message for the 'ListSpans' method. +type ListSpansResponse struct { + // NextPageToken: If defined, indicates that there are more spans that + // match the request + // and that this value should be passed to the next request to + // continue + // retrieving additional spans. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Spans: The requested spans if they are any in the specified trace. + Spans []*Span `json:"spans,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListSpansResponse) MarshalJSON() ([]byte, error) { + type noMethod ListSpansResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListTracesResponse: The response message for the `ListTraces` method. +type ListTracesResponse struct { + // NextPageToken: If defined, indicates that there are more traces that + // match the request + // and that this value should be passed to the next request to + // continue + // retrieving additional traces. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Traces: List of trace records returned. + Traces []*Trace `json:"traces,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListTracesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListTracesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Module struct { + // BuildId: Build_id is a unique identifier for the module, + // usually a hash of its contents + BuildId string `json:"buildId,omitempty"` + + // Module: Binary module. + // E.g. main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so + Module string `json:"module,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BuildId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BuildId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Module) MarshalJSON() ([]byte, error) { + type noMethod Module + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEvent: An event describing an RPC message sent/received on the +// network. +type NetworkEvent struct { + // KernelTime: If available, this is the kernel time: + // For sent messages, this is the time at which the first bit was + // sent. + // For received messages, this is the time at which the last bit + // was + // received. + KernelTime string `json:"kernelTime,omitempty"` + + // MessageId: Every message has an identifier, which must be different + // from all the + // network messages in this span. + // This is especially important when the request/response are streamed. + MessageId uint64 `json:"messageId,omitempty,string"` + + // MessageSize: Number of bytes send/receive. + MessageSize uint64 `json:"messageSize,omitempty,string"` + + // Possible values: + // "TYPE_UNSPECIFIED" + // "SENT" + // "RECV" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "KernelTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KernelTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEvent) MarshalJSON() ([]byte, error) { + type noMethod NetworkEvent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Span: A span represents a single operation within a trace. Spans can +// be nested +// and form a trace tree. Often, a trace contains a root span that +// describes the +// end-to-end latency and, optionally, one or more subspans for +// its sub-operations. Spans do not need to be contiguous. There may be +// gaps +// between spans in a trace. +type Span struct { + // Attributes: Properties of a span. Attributes at the span + // level. + // E.g. + // "/instance_id": "my-instance" + // "/zone": "us-central1-a" + // "/grpc/peer_address": "ip:port" (dns, etc.) + // "/grpc/deadline": + // "Duration" + // "/http/user_agent" + // "/http/request_bytes": 300 + // "/http/response_bytes": 1200 + // "/http/url": google.com/apis + // "/pid" + // "abc.com/myattribute": "my attribute value" + // + // Maximum length for attribute key is 128 characters, for string + // attribute + // value is 2K characters. + Attributes map[string]AttributeValue `json:"attributes,omitempty"` + + // HasRemoteParent: True if this Span has a remote parent (is an RPC + // server Span). + HasRemoteParent bool `json:"hasRemoteParent,omitempty"` + + // Id: Identifier for the span. Must be a 64-bit integer other than 0 + // and + // unique within a trace. + Id uint64 `json:"id,omitempty,string"` + + // Links: A collection of links. + Links []*Link `json:"links,omitempty"` + + // LocalEndTime: Local machine clock time from the UNIX epoch, + // at which span execution ended. + // On the server side these are the times when the server + // application + // handler finishes running. + LocalEndTime string `json:"localEndTime,omitempty"` + + // LocalStartTime: Local machine clock time from the UNIX epoch, + // at which span execution started. + // On the server side these are the times when the server + // application + // handler starts running. + LocalStartTime string `json:"localStartTime,omitempty"` + + // Name: Name of the span. The span name is sanitized and displayed in + // the + // Stackdriver Trace tool in the {% dynamic print + // site_values.console_name %}. + // The name may be a method name or some other per-call site name. + // For the same executable and the same call point, a best practice + // is + // to use a consistent name, which makes it easier to + // correlate + // cross-trace spans. + Name string `json:"name,omitempty"` + + // ParentId: ID of parent span. 0 or missing if this is a root span. + ParentId uint64 `json:"parentId,omitempty,string"` + + // StackTrace: Stack trace captured at the start of the span. This is + // optional. + StackTrace *StackTrace `json:"stackTrace,omitempty"` + + // Status: The final status of the Span. This is optional. + Status *Status `json:"status,omitempty"` + + // TimeEvents: A collection of time-stamped events. + TimeEvents []*TimeEvent `json:"timeEvents,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Attributes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Attributes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Span) MarshalJSON() ([]byte, error) { + type noMethod Span + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SpanUpdates: Collection of spans. +type SpanUpdates struct { + Spans []*Span `json:"spans,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Spans") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Spans") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SpanUpdates) MarshalJSON() ([]byte, error) { + type noMethod SpanUpdates + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StackFrame: Presents a single stack frame in a stack trace. +type StackFrame struct { + // ColumnNumber: Column number is important in JavaScript(anonymous + // functions), + // Might not be available in some languages. + ColumnNumber int64 `json:"columnNumber,omitempty,string"` + + // FileName: File name of the frame. + FileName string `json:"fileName,omitempty"` + + // FunctionName: Fully qualified names which uniquely identify + // function/method/etc. + FunctionName string `json:"functionName,omitempty"` + + // LineNumber: Line number of the frame. + LineNumber int64 `json:"lineNumber,omitempty,string"` + + // LoadModule: Binary module the code is loaded from. + LoadModule *Module `json:"loadModule,omitempty"` + + // OriginalFunctionName: Used when function name is ‘mangled’. Not + // guaranteed to be fully + // qualified but usually it is. + OriginalFunctionName string `json:"originalFunctionName,omitempty"` + + // SourceVersion: source_version is deployment specific. It might + // be + // better to be stored in deployment metadata. + SourceVersion string `json:"sourceVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ColumnNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ColumnNumber") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StackFrame) MarshalJSON() ([]byte, error) { + type noMethod StackFrame + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StackTrace struct { + // StackFrame: Stack frames of this stack trace. + StackFrame []*StackFrame `json:"stackFrame,omitempty"` + + // StackTraceHashId: User can choose to use their own hash function to + // hash large attributes to + // save network bandwidth and storage. + // Typical usage is to pass both stack_frame and stack_trace_hash_id + // initially + // to inform the storage of the mapping. And in subsequent calls, pass + // in + // stack_trace_hash_id only. User shall verify the hash value + // is + // successfully stored. + StackTraceHashId uint64 `json:"stackTraceHashId,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "StackFrame") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StackFrame") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StackTrace) MarshalJSON() ([]byte, error) { + type noMethod StackTrace + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is +// suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of +// google.rpc.Code, but it may accept additional error codes if needed. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error +// details or +// localize it in the client. The optional error details may contain +// arbitrary +// information about the error. There is a predefined set of error +// detail types +// in the package `google.rpc` which can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it +// is not necessarily the actual wire format. When the `Status` message +// is +// exposed in different client libraries and different wire protocols, +// it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety +// of +// environments, either with or without APIs, to provide a +// consistent developer experience across different +// environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, +// it may embed the `Status` in the normal response to indicate the +// partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may +// have a `Status` message for error reporting purpose. +// +// - Batch operations. If a client uses batch request and batch +// response, the +// `Status` message should be used directly inside batch response, +// one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation +// results in its response, the status of those operations should +// be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could +// be used directly after any stripping needed for security/privacy +// reasons. +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There will + // be a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any + // user-facing error message should be localized and sent in + // the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type noMethod Status + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TimeEvent: A time-stamped annotation in the Span. +type TimeEvent struct { + // Annotation: Optional field for user supplied + // map + Annotation *Annotation `json:"annotation,omitempty"` + + // LocalTime: The local machine absolute timestamp when this event + // happened. + LocalTime string `json:"localTime,omitempty"` + + // NetworkEvent: Optional field that can be used only for network + // events. + NetworkEvent *NetworkEvent `json:"networkEvent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Annotation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Annotation") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TimeEvent) MarshalJSON() ([]byte, error) { + type noMethod TimeEvent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Trace: A trace describes how long it takes for an application to +// perform some +// operations. It consists of a set of spans, each of which contains +// details +// about an operation with time information and operation details. +type Trace struct { + // Name: ID of the trace which is + // "projects//traces/". + // trace_id is globally unique identifier for the trace. Common to all + // the + // spans. It is conceptually a 128-bit hex-encoded value. + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Trace) MarshalJSON() ([]byte, error) { + type noMethod Trace + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "tracing.projects.traces.batchUpdate": + +type ProjectsTracesBatchUpdateCall struct { + s *Service + parent string + batchupdatespansrequest *BatchUpdateSpansRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchUpdate: Sends new spans to Stackdriver Trace or updates existing +// spans. If the +// name of a trace that you send matches that of an existing trace, any +// fields +// in the existing trace and its spans are overwritten by the provided +// values, +// and any new fields provided are merged with the existing trace data. +// If the +// name does not match, a new trace is created with given set of spans. +func (r *ProjectsTracesService) BatchUpdate(parent string, batchupdatespansrequest *BatchUpdateSpansRequest) *ProjectsTracesBatchUpdateCall { + c := &ProjectsTracesBatchUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.batchupdatespansrequest = batchupdatespansrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTracesBatchUpdateCall) Fields(s ...googleapi.Field) *ProjectsTracesBatchUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTracesBatchUpdateCall) Context(ctx context.Context) *ProjectsTracesBatchUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTracesBatchUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTracesBatchUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchupdatespansrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/traces:batchUpdate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "tracing.projects.traces.batchUpdate" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTracesBatchUpdateCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sends new spans to Stackdriver Trace or updates existing spans. If the\nname of a trace that you send matches that of an existing trace, any fields\nin the existing trace and its spans are overwritten by the provided values,\nand any new fields provided are merged with the existing trace data. If the\nname does not match, a new trace is created with given set of spans.", + // "flatPath": "v1/projects/{projectsId}/traces:batchUpdate", + // "httpMethod": "PATCH", + // "id": "tracing.projects.traces.batchUpdate", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "ID of the Cloud project where the trace data is stored.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/traces:batchUpdate", + // "request": { + // "$ref": "BatchUpdateSpansRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/trace.append" + // ] + // } + +} + +// method id "tracing.projects.traces.get": + +type ProjectsTracesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns a specific trace. +func (r *ProjectsTracesService) Get(name string) *ProjectsTracesGetCall { + c := &ProjectsTracesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTracesGetCall) Fields(s ...googleapi.Field) *ProjectsTracesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTracesGetCall) IfNoneMatch(entityTag string) *ProjectsTracesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTracesGetCall) Context(ctx context.Context) *ProjectsTracesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTracesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTracesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "tracing.projects.traces.get" call. +// Exactly one of *Trace or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Trace.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTracesGetCall) Do(opts ...googleapi.CallOption) (*Trace, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Trace{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a specific trace.", + // "flatPath": "v1/projects/{projectsId}/traces/{tracesId}", + // "httpMethod": "GET", + // "id": "tracing.projects.traces.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "ID of the trace which is \"projects/\u003cproject_id\u003e/traces/\u003ctrace_id\u003e\".", + // "location": "path", + // "pattern": "^projects/[^/]+/traces/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Trace" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/trace.readonly" + // ] + // } + +} + +// method id "tracing.projects.traces.list": + +type ProjectsTracesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns of a list of traces that match the specified filter +// conditions. +func (r *ProjectsTracesService) List(parent string) *ProjectsTracesListCall { + c := &ProjectsTracesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// EndTime sets the optional parameter "endTime": End of the time +// interval (inclusive) during which the trace data was +// collected from the application. +func (c *ProjectsTracesListCall) EndTime(endTime string) *ProjectsTracesListCall { + c.urlParams_.Set("endTime", endTime) + return c +} + +// Filter sets the optional parameter "filter": An optional filter for +// the request. +// Example: +// "version_label_key:a some_label:some_label_key" +// returns traces from version a and has some_label with some_label_key. +func (c *ProjectsTracesListCall) Filter(filter string) *ProjectsTracesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": Field used to sort the +// returned traces. +// Can be one of the following: +// +// * `trace_id` +// * `name` (`name` field of root span in the trace) +// * `duration` (difference between `end_time` and `start_time` fields +// of +// the root span) +// * `start` (`start_time` field of the root span) +// +// Descending order can be specified by appending `desc` to the sort +// field +// (for example, `name desc`). +// +// Only one sort field is permitted. +func (c *ProjectsTracesListCall) OrderBy(orderBy string) *ProjectsTracesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// traces to return. If not specified or <= 0, the +// implementation selects a reasonable value. The implementation +// may +// return fewer traces than the requested page size. +func (c *ProjectsTracesListCall) PageSize(pageSize int64) *ProjectsTracesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token identifying +// the page of results to return. If provided, use the +// value of the `next_page_token` field from a previous request. +func (c *ProjectsTracesListCall) PageToken(pageToken string) *ProjectsTracesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// StartTime sets the optional parameter "startTime": Start of the time +// interval (inclusive) during which the trace data was +// collected from the application. +func (c *ProjectsTracesListCall) StartTime(startTime string) *ProjectsTracesListCall { + c.urlParams_.Set("startTime", startTime) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTracesListCall) Fields(s ...googleapi.Field) *ProjectsTracesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTracesListCall) IfNoneMatch(entityTag string) *ProjectsTracesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTracesListCall) Context(ctx context.Context) *ProjectsTracesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTracesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTracesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/traces") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "tracing.projects.traces.list" call. +// Exactly one of *ListTracesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListTracesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTracesListCall) Do(opts ...googleapi.CallOption) (*ListTracesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListTracesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns of a list of traces that match the specified filter conditions.", + // "flatPath": "v1/projects/{projectsId}/traces", + // "httpMethod": "GET", + // "id": "tracing.projects.traces.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "endTime": { + // "description": "End of the time interval (inclusive) during which the trace data was\ncollected from the application.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, + // "filter": { + // "description": "An optional filter for the request.\nExample:\n\"version_label_key:a some_label:some_label_key\"\nreturns traces from version a and has some_label with some_label_key.", + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "Field used to sort the returned traces. Optional.\nCan be one of the following:\n\n* `trace_id`\n* `name` (`name` field of root span in the trace)\n* `duration` (difference between `end_time` and `start_time` fields of\n the root span)\n* `start` (`start_time` field of the root span)\n\nDescending order can be specified by appending `desc` to the sort field\n(for example, `name desc`).\n\nOnly one sort field is permitted.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum number of traces to return. If not specified or \u003c= 0, the\nimplementation selects a reasonable value. The implementation may\nreturn fewer traces than the requested page size. Optional.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token identifying the page of results to return. If provided, use the\nvalue of the `next_page_token` field from a previous request. Optional.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "ID of the Cloud project where the trace data is stored.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "startTime": { + // "description": "Start of the time interval (inclusive) during which the trace data was\ncollected from the application.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/traces", + // "response": { + // "$ref": "ListTracesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/trace.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsTracesListCall) Pages(ctx context.Context, f func(*ListTracesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "tracing.projects.traces.listSpans": + +type ProjectsTracesListSpansCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListSpans: Returns a list of spans within a trace. +func (r *ProjectsTracesService) ListSpans(name string) *ProjectsTracesListSpansCall { + c := &ProjectsTracesListSpansCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// PageToken sets the optional parameter "pageToken": Token identifying +// the page of results to return. If provided, use the +// value of the `page_token` field from a previous request. +func (c *ProjectsTracesListSpansCall) PageToken(pageToken string) *ProjectsTracesListSpansCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTracesListSpansCall) Fields(s ...googleapi.Field) *ProjectsTracesListSpansCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTracesListSpansCall) IfNoneMatch(entityTag string) *ProjectsTracesListSpansCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTracesListSpansCall) Context(ctx context.Context) *ProjectsTracesListSpansCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTracesListSpansCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTracesListSpansCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:listSpans") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "tracing.projects.traces.listSpans" call. +// Exactly one of *ListSpansResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListSpansResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTracesListSpansCall) Do(opts ...googleapi.CallOption) (*ListSpansResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListSpansResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a list of spans within a trace.", + // "flatPath": "v1/projects/{projectsId}/traces/{tracesId}:listSpans", + // "httpMethod": "GET", + // "id": "tracing.projects.traces.listSpans", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "ID of the span set where is \"projects/\u003cproject_id\u003e/traces/\u003ctrace_id\u003e\".", + // "location": "path", + // "pattern": "^projects/[^/]+/traces/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "pageToken": { + // "description": "Token identifying the page of results to return. If provided, use the\nvalue of the `page_token` field from a previous request. Optional.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}:listSpans", + // "response": { + // "$ref": "ListSpansResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/trace.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsTracesListSpansCall) Pages(ctx context.Context, f func(*ListSpansResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/vendor/google.golang.org/api/translate/v2/translate-gen.go b/vendor/google.golang.org/api/translate/v2/translate-gen.go index a3705f912..86e2587c6 100644 --- a/vendor/google.golang.org/api/translate/v2/translate-gen.go +++ b/vendor/google.golang.org/api/translate/v2/translate-gen.go @@ -57,9 +57,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Detections *DetectionsService @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewDetectionsService(s *Service) *DetectionsService { rs := &DetectionsService{s: s} return rs @@ -370,6 +375,7 @@ func (c *DetectionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -509,6 +515,7 @@ func (c *LanguagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -662,6 +669,7 @@ func (c *TranslationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go index 398b77983..e1434bacd 100644 --- a/vendor/google.golang.org/api/transport/dial.go +++ b/vendor/google.golang.org/api/transport/dial.go @@ -53,8 +53,11 @@ func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Clie if o.APIKey != "" { hc := &http.Client{ Transport: >ransport.APIKey{ - Key: o.APIKey, - Transport: userAgentTransport{userAgent: o.UserAgent}, + Key: o.APIKey, + Transport: userAgentTransport{ + base: baseTransport(ctx), + userAgent: o.UserAgent, + }, }, } return hc, o.Endpoint, nil @@ -76,7 +79,10 @@ func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Clie hc := &http.Client{ Transport: &oauth2.Transport{ Source: o.TokenSource, - Base: userAgentTransport{userAgent: o.UserAgent}, + Base: userAgentTransport{ + base: baseTransport(ctx), + userAgent: o.UserAgent, + }, }, } return hc, o.Endpoint, nil @@ -90,10 +96,7 @@ type userAgentTransport struct { func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base if rt == nil { - rt = http.DefaultTransport - if rt == nil { - return nil, errors.New("transport: no Transport specified or available") - } + return nil, errors.New("transport: no Transport specified") } if t.userAgent == "" { return rt.RoundTrip(req) @@ -110,6 +113,16 @@ func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) // Set at init time by dial_appengine.go. If nil, we're not on App Engine. var appengineDialerHook func(context.Context) grpc.DialOption +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// baseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. +func baseTransport(ctx context.Context) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + return http.DefaultTransport +} // DialGRPC returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. @@ -164,3 +177,25 @@ func serviceAcctTokenSource(ctx context.Context, filename string, scope ...strin } return cfg.TokenSource(ctx), nil } + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP client specified") + } + if o.GRPCConn != nil { + return o.GRPCConn, nil + } + grpcOpts := []grpc.DialOption{grpc.WithInsecure()} + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) +} diff --git a/vendor/google.golang.org/api/transport/dial_appengine.go b/vendor/google.golang.org/api/transport/dial_appengine.go index 201244d24..fdac1f34d 100644 --- a/vendor/google.golang.org/api/transport/dial_appengine.go +++ b/vendor/google.golang.org/api/transport/dial_appengine.go @@ -18,10 +18,12 @@ package transport import ( "net" + "net/http" "time" "golang.org/x/net/context" "google.golang.org/appengine/socket" + "google.golang.org/appengine/urlfetch" "google.golang.org/grpc" ) @@ -31,4 +33,8 @@ func init() { return socket.DialTimeout(ctx, "tcp", addr, timeout) }) } + + appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { + return &urlfetch.Transport{Context: ctx} + } } diff --git a/vendor/google.golang.org/api/urlshortener/v1/urlshortener-gen.go b/vendor/google.golang.org/api/urlshortener/v1/urlshortener-gen.go index a88511327..13ce6c64b 100644 --- a/vendor/google.golang.org/api/urlshortener/v1/urlshortener-gen.go +++ b/vendor/google.golang.org/api/urlshortener/v1/urlshortener-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Url *UrlService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewUrlService(s *Service) *UrlService { rs := &UrlService{s: s} return rs @@ -371,6 +376,7 @@ func (c *UrlGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -509,6 +515,7 @@ func (c *UrlInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.url) if err != nil { @@ -653,6 +660,7 @@ func (c *UrlListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/vision/v1/vision-api.json b/vendor/google.golang.org/api/vision/v1/vision-api.json index f702396a0..5d8e2d00c 100644 --- a/vendor/google.golang.org/api/vision/v1/vision-api.json +++ b/vendor/google.golang.org/api/vision/v1/vision-api.json @@ -1,85 +1,186 @@ { - "id": "vision:v1", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", - "protocol": "rest", + "rootUrl": "https://vision.googleapis.com/", + "ownerDomain": "google.com", + "name": "vision", + "batchPath": "batch", "title": "Google Cloud Vision API", + "ownerName": "Google", "resources": { "images": { "methods": { "annotate": { - "id": "vision.images.annotate", - "response": { - "$ref": "BatchAnnotateImagesResponse" - }, - "parameterOrder": [], "description": "Run image detection and annotation for a batch of images.", "request": { "$ref": "BatchAnnotateImagesRequest" }, - "flatPath": "v1/images:annotate", + "response": { + "$ref": "BatchAnnotateImagesResponse" + }, + "parameterOrder": [], "httpMethod": "POST", - "parameters": {}, - "path": "v1/images:annotate", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] + ], + "parameters": {}, + "flatPath": "v1/images:annotate", + "path": "v1/images:annotate", + "id": "vision.images.annotate" } } } }, + "parameters": { + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Returns response with indentations and line breaks." + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "$.xgafv": { + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ], + "description": "V1 error format." + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "alt": { + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ] + }, + "access_token": { + "type": "string", + "location": "query", + "description": "OAuth access token." + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Pretty-print response." + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "type": "string", + "location": "query" + } + }, + "version": "v1", + "baseUrl": "https://vision.googleapis.com/", + "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", + "kind": "discovery#restDescription", + "servicePath": "", + "basePath": "", + "revision": "20170214", + "documentationLink": "https://cloud.google.com/vision/", + "id": "vision:v1", + "discoveryVersion": "v1", + "version_module": "True", "schemas": { - "ImageSource": { - "description": "External image source (Google Cloud Storage image location).", + "DominantColorsAnnotation": { + "id": "DominantColorsAnnotation", + "description": "Set of dominant colors and their corresponding scores.", "type": "object", "properties": { - "gcsImageUri": { - "description": "Google Cloud Storage image URI, which must be in the following form:\n`gs://bucket_name/object_name` (for details, see\n[Google Cloud Storage Request URIs](https://cloud.google.com/storage/docs/reference-uris)).\nNOTE: Cloud Storage object versioning is not supported.", - "type": "string" + "colors": { + "description": "RGB color values with their score and pixel fraction.", + "type": "array", + "items": { + "$ref": "ColorInfo" + } } - }, - "id": "ImageSource" + } }, - "AnnotateImageRequest": { - "description": "Request for performing Google Cloud Vision API tasks over a user-provided\nimage, with user-requested features.", + "Vertex": { + "description": "A vertex represents a 2D point in the image.\nNOTE: the vertex coordinates are in the same scale as the original image.", "type": "object", "properties": { - "image": { - "description": "The image to be processed.", - "$ref": "Image" - }, - "imageContext": { - "description": "Additional context that may accompany the image.", - "$ref": "ImageContext" + "x": { + "description": "X coordinate.", + "format": "int32", + "type": "integer" }, - "features": { - "description": "Requested features.", + "y": { + "description": "Y coordinate.", + "format": "int32", + "type": "integer" + } + }, + "id": "Vertex" + }, + "BoundingPoly": { + "description": "A bounding polygon for the detected image annotation.", + "type": "object", + "properties": { + "vertices": { "type": "array", "items": { - "$ref": "Feature" - } + "$ref": "Vertex" + }, + "description": "The bounding polygon vertices." } }, - "id": "AnnotateImageRequest" + "id": "BoundingPoly" }, "AnnotateImageResponse": { "description": "Response to an image annotation request.", "type": "object", "properties": { - "labelAnnotations": { - "description": "If present, label detection has completed successfully.", - "type": "array", - "items": { - "$ref": "EntityAnnotation" - } + "error": { + "$ref": "Status", + "description": "If set, represents the error message for the operation.\nNote that filled-in image annotations are guaranteed to be\ncorrect, even when `error` is set." }, "landmarkAnnotations": { "description": "If present, landmark detection has completed successfully.", @@ -88,14 +189,6 @@ "$ref": "EntityAnnotation" } }, - "safeSearchAnnotation": { - "description": "If present, safe-search annotation has completed successfully.", - "$ref": "SafeSearchAnnotation" - }, - "imagePropertiesAnnotation": { - "description": "If present, image properties were extracted successfully.", - "$ref": "ImageProperties" - }, "textAnnotations": { "description": "If present, text (OCR) detection has completed successfully.", "type": "array", @@ -103,6 +196,17 @@ "$ref": "EntityAnnotation" } }, + "faceAnnotations": { + "type": "array", + "items": { + "$ref": "FaceAnnotation" + }, + "description": "If present, face detection has completed successfully." + }, + "imagePropertiesAnnotation": { + "description": "If present, image properties were extracted successfully.", + "$ref": "ImageProperties" + }, "logoAnnotations": { "description": "If present, logo detection has completed successfully.", "type": "array", @@ -110,311 +214,213 @@ "$ref": "EntityAnnotation" } }, - "faceAnnotations": { - "description": "If present, face detection has completed successfully.", + "safeSearchAnnotation": { + "$ref": "SafeSearchAnnotation", + "description": "If present, safe-search annotation has completed successfully." + }, + "labelAnnotations": { + "description": "If present, label detection has completed successfully.", "type": "array", "items": { - "$ref": "FaceAnnotation" + "$ref": "EntityAnnotation" } - }, - "error": { - "description": "If set, represents the error message for the operation.\nNote that filled-in image annotations are guaranteed to be\ncorrect, even when `error` is set.", - "$ref": "Status" } }, "id": "AnnotateImageResponse" }, - "LatLongRect": { - "description": "Rectangle determined by min and max `LatLng` pairs.", + "BatchAnnotateImagesResponse": { + "description": "Response to a batch image annotation request.", "type": "object", "properties": { - "maxLatLng": { - "description": "Max lat/long pair.", - "$ref": "LatLng" - }, - "minLatLng": { - "description": "Min lat/long pair.", - "$ref": "LatLng" + "responses": { + "description": "Individual responses to image annotation requests within the batch.", + "type": "array", + "items": { + "$ref": "AnnotateImageResponse" + } } }, - "id": "LatLongRect" + "id": "BatchAnnotateImagesResponse" }, - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "ImageSource": { + "description": "External image source (Google Cloud Storage image location).", "type": "object", "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer", - "format": "int32" - }, - "details": { - "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", - "type": "array", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - } - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "gcsImageUri": { + "description": "NOTE: For new code `image_uri` below is preferred.\nGoogle Cloud Storage image URI, which must be in the following form:\n`gs://bucket_name/object_name` (for details, see\n[Google Cloud Storage Request\nURIs](https://cloud.google.com/storage/docs/reference-uris)).\nNOTE: Cloud Storage object versioning is not supported.", "type": "string" } }, - "id": "Status" + "id": "ImageSource" }, - "FaceAnnotation": { - "description": "A face annotation object contains the results of face detection.", + "LocationInfo": { + "description": "Detected entity location information.", "type": "object", "properties": { - "tiltAngle": { - "description": "Pitch angle, which indicates the upwards/downwards angle that the face is\npointing relative to the image's horizontal plane. Range [-180,180].", - "type": "number", - "format": "float" - }, - "underExposedLikelihood": { - "description": "Under-exposed likelihood.", - "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" - ], - "enumDescriptions": [ - "Unknown likelihood.", - "It is very unlikely that the image belongs to the specified vertical.", - "It is unlikely that the image belongs to the specified vertical.", - "It is possible that the image belongs to the specified vertical.", - "It is likely that the image belongs to the specified vertical.", - "It is very likely that the image belongs to the specified vertical." - ], - "type": "string" - }, - "fdBoundingPoly": { - "description": "The `fd_bounding_poly` bounding polygon is tighter than the\n`boundingPoly`, and encloses only the skin part of the face. Typically, it\nis used to eliminate the face from any image analysis that detects the\n\"amount of skin\" visible in an image. It is not based on the\nlandmarker results, only on the initial face detection, hence\nthe \u003ccode\u003efd\u003c/code\u003e (face detection) prefix.", - "$ref": "BoundingPoly" - }, - "landmarkingConfidence": { - "description": "Face landmarking confidence. Range [0, 1].", - "type": "number", - "format": "float" - }, - "joyLikelihood": { - "description": "Joy likelihood.", - "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" - ], - "enumDescriptions": [ - "Unknown likelihood.", - "It is very unlikely that the image belongs to the specified vertical.", - "It is unlikely that the image belongs to the specified vertical.", - "It is possible that the image belongs to the specified vertical.", - "It is likely that the image belongs to the specified vertical.", - "It is very likely that the image belongs to the specified vertical." - ], - "type": "string" - }, - "detectionConfidence": { - "description": "Detection confidence. Range [0, 1].", - "type": "number", - "format": "float" - }, - "surpriseLikelihood": { - "description": "Surprise likelihood.", - "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" - ], - "enumDescriptions": [ - "Unknown likelihood.", - "It is very unlikely that the image belongs to the specified vertical.", - "It is unlikely that the image belongs to the specified vertical.", - "It is possible that the image belongs to the specified vertical.", - "It is likely that the image belongs to the specified vertical.", - "It is very likely that the image belongs to the specified vertical." - ], - "type": "string" - }, - "angerLikelihood": { - "description": "Anger likelihood.", - "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" - ], - "enumDescriptions": [ - "Unknown likelihood.", - "It is very unlikely that the image belongs to the specified vertical.", - "It is unlikely that the image belongs to the specified vertical.", - "It is possible that the image belongs to the specified vertical.", - "It is likely that the image belongs to the specified vertical.", - "It is very likely that the image belongs to the specified vertical." - ], - "type": "string" - }, - "headwearLikelihood": { - "description": "Headwear likelihood.", - "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" - ], - "enumDescriptions": [ - "Unknown likelihood.", - "It is very unlikely that the image belongs to the specified vertical.", - "It is unlikely that the image belongs to the specified vertical.", - "It is possible that the image belongs to the specified vertical.", - "It is likely that the image belongs to the specified vertical.", - "It is very likely that the image belongs to the specified vertical." - ], + "latLng": { + "description": "lat/long location coordinates.", + "$ref": "LatLng" + } + }, + "id": "LocationInfo" + }, + "Property": { + "description": "A `Property` consists of a user-supplied name/value pair.", + "type": "object", + "properties": { + "uint64Value": { + "description": "Value of numeric properties.", + "format": "uint64", "type": "string" }, - "panAngle": { - "description": "Yaw angle, which indicates the leftward/rightward angle that the face is\npointing relative to the vertical plane perpendicular to the image. Range\n[-180,180].", - "type": "number", - "format": "float" - }, - "boundingPoly": { - "description": "The bounding polygon around the face. The coordinates of the bounding box\nare in the original image's scale, as returned in `ImageParams`.\nThe bounding box is computed to \"frame\" the face in accordance with human\nexpectations. It is based on the landmarker results.\nNote that one or more x and/or y coordinates may not be generated in the\n`BoundingPoly` (the polygon will be unbounded) if only a partial face\nappears in the image to be annotated.", - "$ref": "BoundingPoly" - }, - "landmarks": { - "description": "Detected face landmarks.", - "type": "array", - "items": { - "$ref": "Landmark" - } - }, - "blurredLikelihood": { - "description": "Blurred likelihood.", - "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" - ], - "enumDescriptions": [ - "Unknown likelihood.", - "It is very unlikely that the image belongs to the specified vertical.", - "It is unlikely that the image belongs to the specified vertical.", - "It is possible that the image belongs to the specified vertical.", - "It is likely that the image belongs to the specified vertical.", - "It is very likely that the image belongs to the specified vertical." - ], + "name": { + "description": "Name of the property.", "type": "string" }, - "rollAngle": { - "description": "Roll angle, which indicates the amount of clockwise/anti-clockwise rotation\nof the face relative to the image vertical about the axis perpendicular to\nthe face. Range [-180,180].", - "type": "number", - "format": "float" - }, - "sorrowLikelihood": { - "description": "Sorrow likelihood.", - "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" - ], - "enumDescriptions": [ - "Unknown likelihood.", - "It is very unlikely that the image belongs to the specified vertical.", - "It is unlikely that the image belongs to the specified vertical.", - "It is possible that the image belongs to the specified vertical.", - "It is likely that the image belongs to the specified vertical.", - "It is very likely that the image belongs to the specified vertical." - ], + "value": { + "description": "Value of the property.", "type": "string" } }, - "id": "FaceAnnotation" + "id": "Property" }, - "Vertex": { - "description": "A vertex represents a 2D point in the image.\nNOTE: the vertex coordinates are in the same scale as the original image.", + "Position": { + "description": "A 3D position in the image, used primarily for Face detection landmarks.\nA valid Position must have both x and y coordinates.\nThe position coordinates are in the same scale as the original image.", "type": "object", "properties": { "y": { "description": "Y coordinate.", - "type": "integer", - "format": "int32" + "format": "float", + "type": "number" }, "x": { "description": "X coordinate.", - "type": "integer", - "format": "int32" + "format": "float", + "type": "number" + }, + "z": { + "type": "number", + "description": "Z coordinate (or depth).", + "format": "float" } }, - "id": "Vertex" + "id": "Position" }, "ColorInfo": { "description": "Color information consists of RGB channels, score, and the fraction of\nthe image that the color occupies in the image.", "type": "object", "properties": { + "score": { + "description": "Image-specific score for this color. Value in range [0, 1].", + "format": "float", + "type": "number" + }, "pixelFraction": { - "description": "The fraction of pixels the color occupies in the image.\nValue in range [0, 1].", "type": "number", + "description": "The fraction of pixels the color occupies in the image.\nValue in range [0, 1].", "format": "float" }, "color": { - "description": "RGB components of the color.", - "$ref": "Color" - }, - "score": { - "description": "Image-specific score for this color. Value in range [0, 1].", - "type": "number", - "format": "float" + "$ref": "Color", + "description": "RGB components of the color." } }, "id": "ColorInfo" }, - "BoundingPoly": { - "description": "A bounding polygon for the detected image annotation.", + "EntityAnnotation": { + "description": "Set of detected entity features.", "type": "object", "properties": { - "vertices": { - "description": "The bounding polygon vertices.", + "properties": { + "description": "Some entities may have optional user-supplied `Property` (name/value)\nfields, such a score or string that qualifies the entity.", "type": "array", "items": { - "$ref": "Vertex" + "$ref": "Property" } + }, + "score": { + "description": "Overall score of the result. Range [0, 1].", + "format": "float", + "type": "number" + }, + "locations": { + "description": "The location information for the detected entity. Multiple\n`LocationInfo` elements can be present because one location may\nindicate the location of the scene in the image, and another location\nmay indicate the location of the place where the image was taken.\nLocation information is usually present for landmarks.", + "type": "array", + "items": { + "$ref": "LocationInfo" + } + }, + "mid": { + "description": "Opaque entity ID. Some IDs may be available in\n[Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).", + "type": "string" + }, + "confidence": { + "type": "number", + "description": "The accuracy of the entity detection in an image.\nFor example, for an image in which the \"Eiffel Tower\" entity is detected,\nthis field represents the confidence that there is a tower in the query\nimage. Range [0, 1].", + "format": "float" + }, + "boundingPoly": { + "$ref": "BoundingPoly", + "description": "Image region to which this entity belongs. Currently not produced\nfor `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s\nare produced for the entire text detected in an image region, followed by\n`boundingPoly`s for each word within the detected text." + }, + "locale": { + "description": "The language code for the locale in which the entity textual\n`description` is expressed.", + "type": "string" + }, + "topicality": { + "description": "The relevancy of the ICA (Image Content Annotation) label to the\nimage. For example, the relevancy of \"tower\" is likely higher to an image\ncontaining the detected \"Eiffel Tower\" than to an image containing a\ndetected distant towering building, even though the confidence that\nthere is a tower in each image may be the same. Range [0, 1].", + "format": "float", + "type": "number" + }, + "description": { + "description": "Entity textual description, expressed in its `locale` language.", + "type": "string" } }, - "id": "BoundingPoly" + "id": "EntityAnnotation" }, "Landmark": { "description": "A face-specific landmark (for example, a face feature).\nLandmark positions may fall outside the bounds of the image\nif the face is near one or more edges of the image.\nTherefore it is NOT guaranteed that `0 \u003c= x \u003c width` or\n`0 \u003c= y \u003c height`.", "type": "object", "properties": { - "position": { - "description": "Face landmark position.", - "$ref": "Position" - }, "type": { - "description": "Face landmark type.", + "enumDescriptions": [ + "Unknown face landmark detected. Should not be filled.", + "Left eye.", + "Right eye.", + "Left of left eyebrow.", + "Right of left eyebrow.", + "Left of right eyebrow.", + "Right of right eyebrow.", + "Midpoint between eyes.", + "Nose tip.", + "Upper lip.", + "Lower lip.", + "Mouth left.", + "Mouth right.", + "Mouth center.", + "Nose, bottom right.", + "Nose, bottom left.", + "Nose, bottom center.", + "Left eye, top boundary.", + "Left eye, right corner.", + "Left eye, bottom boundary.", + "Left eye, left corner.", + "Right eye, top boundary.", + "Right eye, right corner.", + "Right eye, bottom boundary.", + "Right eye, left corner.", + "Left eyebrow, upper midpoint.", + "Right eyebrow, upper midpoint.", + "Left ear tragion.", + "Right ear tragion.", + "Left eye pupil.", + "Right eye pupil.", + "Forehead glabella.", + "Chin gnathion.", + "Chin left gonion.", + "Chin right gonion." + ], "enum": [ "UNKNOWN_LANDMARK", "LEFT_EYE", @@ -452,198 +458,411 @@ "CHIN_LEFT_GONION", "CHIN_RIGHT_GONION" ], + "description": "Face landmark type.", + "type": "string" + }, + "position": { + "$ref": "Position", + "description": "Face landmark position." + } + }, + "id": "Landmark" + }, + "Image": { + "description": "Client image to perform Google Cloud Vision API tasks over.", + "type": "object", + "properties": { + "content": { + "description": "Image content, represented as a stream of bytes.\nNote: as with all `bytes` fields, protobuffers use a pure binary\nrepresentation, whereas JSON representations use base64.", + "format": "byte", + "type": "string" + }, + "source": { + "description": "Google Cloud Storage image location. If both `content` and `source`\nare provided for an image, `content` takes precedence and is\nused to perform the image annotation request.", + "$ref": "ImageSource" + } + }, + "id": "Image" + }, + "FaceAnnotation": { + "description": "A face annotation object contains the results of face detection.", + "type": "object", + "properties": { + "angerLikelihood": { "enumDescriptions": [ - "Unknown face landmark detected. Should not be filled.", - "Left eye.", - "Right eye.", - "Left of left eyebrow.", - "Right of left eyebrow.", - "Left of right eyebrow.", - "Right of right eyebrow.", - "Midpoint between eyes.", - "Nose tip.", - "Upper lip.", - "Lower lip.", - "Mouth left.", - "Mouth right.", - "Mouth center.", - "Nose, bottom right.", - "Nose, bottom left.", - "Nose, bottom center.", - "Left eye, top boundary.", - "Left eye, right corner.", - "Left eye, bottom boundary.", - "Left eye, left corner.", - "Right eye, top boundary.", - "Right eye, right corner.", - "Right eye, bottom boundary.", - "Right eye, left corner.", - "Left eyebrow, upper midpoint.", - "Right eyebrow, upper midpoint.", - "Left ear tragion.", - "Right ear tragion.", - "Left eye pupil.", - "Right eye pupil.", - "Forehead glabella.", - "Chin gnathion.", - "Chin left gonion.", - "Chin right gonion." + "Unknown likelihood.", + "It is very unlikely that the image belongs to the specified vertical.", + "It is unlikely that the image belongs to the specified vertical.", + "It is possible that the image belongs to the specified vertical.", + "It is likely that the image belongs to the specified vertical.", + "It is very likely that the image belongs to the specified vertical." + ], + "enum": [ + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "description": "Anger likelihood.", + "type": "string" + }, + "landmarks": { + "description": "Detected face landmarks.", + "type": "array", + "items": { + "$ref": "Landmark" + } + }, + "surpriseLikelihood": { + "enum": [ + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "description": "Surprise likelihood.", + "type": "string", + "enumDescriptions": [ + "Unknown likelihood.", + "It is very unlikely that the image belongs to the specified vertical.", + "It is unlikely that the image belongs to the specified vertical.", + "It is possible that the image belongs to the specified vertical.", + "It is likely that the image belongs to the specified vertical.", + "It is very likely that the image belongs to the specified vertical." + ] + }, + "joyLikelihood": { + "enum": [ + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "description": "Joy likelihood.", + "type": "string", + "enumDescriptions": [ + "Unknown likelihood.", + "It is very unlikely that the image belongs to the specified vertical.", + "It is unlikely that the image belongs to the specified vertical.", + "It is possible that the image belongs to the specified vertical.", + "It is likely that the image belongs to the specified vertical.", + "It is very likely that the image belongs to the specified vertical." + ] + }, + "landmarkingConfidence": { + "description": "Face landmarking confidence. Range [0, 1].", + "format": "float", + "type": "number" + }, + "detectionConfidence": { + "description": "Detection confidence. Range [0, 1].", + "format": "float", + "type": "number" + }, + "panAngle": { + "description": "Yaw angle, which indicates the leftward/rightward angle that the face is\npointing relative to the vertical plane perpendicular to the image. Range\n[-180,180].", + "format": "float", + "type": "number" + }, + "underExposedLikelihood": { + "description": "Under-exposed likelihood.", + "type": "string", + "enumDescriptions": [ + "Unknown likelihood.", + "It is very unlikely that the image belongs to the specified vertical.", + "It is unlikely that the image belongs to the specified vertical.", + "It is possible that the image belongs to the specified vertical.", + "It is likely that the image belongs to the specified vertical.", + "It is very likely that the image belongs to the specified vertical." + ], + "enum": [ + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ] + }, + "blurredLikelihood": { + "enum": [ + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "description": "Blurred likelihood.", + "type": "string", + "enumDescriptions": [ + "Unknown likelihood.", + "It is very unlikely that the image belongs to the specified vertical.", + "It is unlikely that the image belongs to the specified vertical.", + "It is possible that the image belongs to the specified vertical.", + "It is likely that the image belongs to the specified vertical.", + "It is very likely that the image belongs to the specified vertical." + ] + }, + "headwearLikelihood": { + "enumDescriptions": [ + "Unknown likelihood.", + "It is very unlikely that the image belongs to the specified vertical.", + "It is unlikely that the image belongs to the specified vertical.", + "It is possible that the image belongs to the specified vertical.", + "It is likely that the image belongs to the specified vertical.", + "It is very likely that the image belongs to the specified vertical." + ], + "enum": [ + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" ], + "description": "Headwear likelihood.", "type": "string" + }, + "boundingPoly": { + "description": "The bounding polygon around the face. The coordinates of the bounding box\nare in the original image's scale, as returned in `ImageParams`.\nThe bounding box is computed to \"frame\" the face in accordance with human\nexpectations. It is based on the landmarker results.\nNote that one or more x and/or y coordinates may not be generated in the\n`BoundingPoly` (the polygon will be unbounded) if only a partial face\nappears in the image to be annotated.", + "$ref": "BoundingPoly" + }, + "rollAngle": { + "type": "number", + "description": "Roll angle, which indicates the amount of clockwise/anti-clockwise rotation\nof the face relative to the image vertical about the axis perpendicular to\nthe face. Range [-180,180].", + "format": "float" + }, + "sorrowLikelihood": { + "description": "Sorrow likelihood.", + "type": "string", + "enumDescriptions": [ + "Unknown likelihood.", + "It is very unlikely that the image belongs to the specified vertical.", + "It is unlikely that the image belongs to the specified vertical.", + "It is possible that the image belongs to the specified vertical.", + "It is likely that the image belongs to the specified vertical.", + "It is very likely that the image belongs to the specified vertical." + ], + "enum": [ + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ] + }, + "tiltAngle": { + "description": "Pitch angle, which indicates the upwards/downwards angle that the face is\npointing relative to the image's horizontal plane. Range [-180,180].", + "format": "float", + "type": "number" + }, + "fdBoundingPoly": { + "$ref": "BoundingPoly", + "description": "The `fd_bounding_poly` bounding polygon is tighter than the\n`boundingPoly`, and encloses only the skin part of the face. Typically, it\nis used to eliminate the face from any image analysis that detects the\n\"amount of skin\" visible in an image. It is not based on the\nlandmarker results, only on the initial face detection, hence\nthe \u003ccode\u003efd\u003c/code\u003e (face detection) prefix." } }, - "id": "Landmark" + "id": "FaceAnnotation" + }, + "BatchAnnotateImagesRequest": { + "description": "Multiple image annotation requests are batched into a single service call.", + "type": "object", + "properties": { + "requests": { + "description": "Individual image annotation requests for this batch.", + "type": "array", + "items": { + "$ref": "AnnotateImageRequest" + } + } + }, + "id": "BatchAnnotateImagesRequest" }, "ImageContext": { "description": "Image context and/or feature-specific parameters.", "type": "object", "properties": { "latLongRect": { - "description": "lat/long rectangle that specifies the location of the image.", - "$ref": "LatLongRect" + "$ref": "LatLongRect", + "description": "lat/long rectangle that specifies the location of the image." }, "languageHints": { - "description": "List of languages to use for TEXT_DETECTION. In most cases, an empty value\nyields the best results since it enables automatic language detection. For\nlanguages based on the Latin alphabet, setting `language_hints` is not\nneeded. In rare cases, when the language of the text in the image is known,\nsetting a hint will help get better results (although it will be a\nsignificant hindrance if the hint is wrong). Text detection returns an\nerror if one or more of the specified languages is not one of the\n[supported languages](/vision/docs/languages).", "type": "array", "items": { "type": "string" - } + }, + "description": "List of languages to use for TEXT_DETECTION. In most cases, an empty value\nyields the best results since it enables automatic language detection. For\nlanguages based on the Latin alphabet, setting `language_hints` is not\nneeded. In rare cases, when the language of the text in the image is known,\nsetting a hint will help get better results (although it will be a\nsignificant hindrance if the hint is wrong). Text detection returns an\nerror if one or more of the specified languages is not one of the\n[supported languages](/vision/docs/languages)." } }, "id": "ImageContext" }, - "BatchAnnotateImagesRequest": { - "description": "Multiple image annotation requests are batched into a single service call.", + "AnnotateImageRequest": { + "description": "Request for performing Google Cloud Vision API tasks over a user-provided\nimage, with user-requested features.", "type": "object", "properties": { - "requests": { - "description": "Individual image annotation requests for this batch.", + "image": { + "$ref": "Image", + "description": "The image to be processed." + }, + "features": { + "description": "Requested features.", "type": "array", "items": { - "$ref": "AnnotateImageRequest" + "$ref": "Feature" } + }, + "imageContext": { + "$ref": "ImageContext", + "description": "Additional context that may accompany the image." } }, - "id": "BatchAnnotateImagesRequest" + "id": "AnnotateImageRequest" }, - "EntityAnnotation": { - "description": "Set of detected entity features.", + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", "type": "object", "properties": { - "mid": { - "description": "Opaque entity ID. Some IDs may be available in\n[Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).", - "type": "string" - }, - "description": { - "description": "Entity textual description, expressed in its `locale` language.", - "type": "string" - }, - "topicality": { - "description": "The relevancy of the ICA (Image Content Annotation) label to the\nimage. For example, the relevancy of \"tower\" is likely higher to an image\ncontaining the detected \"Eiffel Tower\" than to an image containing a\ndetected distant towering building, even though the confidence that\nthere is a tower in each image may be the same. Range [0, 1].", - "type": "number", - "format": "float" + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" }, - "locale": { - "description": "The language code for the locale in which the entity textual\n`description` is expressed.", + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", "type": "string" }, - "properties": { - "description": "Some entities may have optional user-supplied `Property` (name/value)\nfields, such a score or string that qualifies the entity.", - "type": "array", - "items": { - "$ref": "Property" - } - }, - "score": { - "description": "Overall score of the result. Range [0, 1].", - "type": "number", - "format": "float" - }, - "boundingPoly": { - "description": "Image region to which this entity belongs. Currently not produced\nfor `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s\nare produced for the entire text detected in an image region, followed by\n`boundingPoly`s for each word within the detected text.", - "$ref": "BoundingPoly" - }, - "locations": { - "description": "The location information for the detected entity. Multiple\n`LocationInfo` elements can be present because one location may\nindicate the location of the scene in the image, and another location\nmay indicate the location of the place where the image was taken.\nLocation information is usually present for landmarks.", + "details": { + "description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.", "type": "array", "items": { - "$ref": "LocationInfo" + "type": "object", + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + } } - }, - "confidence": { - "description": "The accuracy of the entity detection in an image.\nFor example, for an image in which the \"Eiffel Tower\" entity is detected,\nthis field represents the confidence that there is a tower in the query\nimage. Range [0, 1].", - "type": "number", - "format": "float" } }, - "id": "EntityAnnotation" + "id": "Status" }, - "Property": { - "description": "A `Property` consists of a user-supplied name/value pair.", + "LatLongRect": { + "id": "LatLongRect", + "description": "Rectangle determined by min and max `LatLng` pairs.", "type": "object", "properties": { - "value": { - "description": "Value of the property.", - "type": "string" + "minLatLng": { + "description": "Min lat/long pair.", + "$ref": "LatLng" }, - "name": { - "description": "Name of the property.", - "type": "string" + "maxLatLng": { + "description": "Max lat/long pair.", + "$ref": "LatLng" + } + } + }, + "LatLng": { + "description": "An object representing a latitude/longitude pair. This is expressed as a pair\nof doubles representing degrees latitude and degrees longitude. Unless\nspecified otherwise, this must conform to the\n\u003ca href=\"http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf\"\u003eWGS84\nstandard\u003c/a\u003e. Values must be within normalized ranges.\n\nExample of normalization code in Python:\n\n def NormalizeLongitude(longitude):\n \"\"\"Wraps decimal degrees longitude to [-180.0, 180.0].\"\"\"\n q, r = divmod(longitude, 360.0)\n if r \u003e 180.0 or (r == 180.0 and q \u003c= -1.0):\n return r - 360.0\n return r\n\n def NormalizeLatLng(latitude, longitude):\n \"\"\"Wraps decimal degrees latitude and longitude to\n [-90.0, 90.0] and [-180.0, 180.0], respectively.\"\"\"\n r = latitude % 360.0\n if r \u003c= 90.0:\n return r, NormalizeLongitude(longitude)\n elif r \u003e= 270.0:\n return r - 360, NormalizeLongitude(longitude)\n else:\n return 180 - r, NormalizeLongitude(longitude + 180.0)\n\n assert 180.0 == NormalizeLongitude(180.0)\n assert -180.0 == NormalizeLongitude(-180.0)\n assert -179.0 == NormalizeLongitude(181.0)\n assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)\n assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)\n assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)\n assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)\n assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)\n assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)\n assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)\n assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)\n assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)\n assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)\n\nThe code in logs/storage/validator/logs_validator_traits.cc treats this type\nas if it were annotated as ST_LOCATION.", + "type": "object", + "properties": { + "latitude": { + "description": "The latitude in degrees. It must be in the range [-90.0, +90.0].", + "format": "double", + "type": "number" + }, + "longitude": { + "description": "The longitude in degrees. It must be in the range [-180.0, +180.0].", + "format": "double", + "type": "number" } }, - "id": "Property" + "id": "LatLng" }, "Color": { "description": "Represents a color in the RGBA color space. This representation is designed\nfor simplicity of conversion to/from color representations in various\nlanguages over compactness; for example, the fields of this representation\ncan be trivially provided to the constructor of \"java.awt.Color\" in Java; it\ncan also be trivially provided to UIColor's \"+colorWithRed:green:blue:alpha\"\nmethod in iOS; and, with just a little work, it can be easily formatted into\na CSS \"rgba()\" string in JavaScript, as well. Here are some examples:\n\nExample (Java):\n\n import com.google.type.Color;\n\n // ...\n public static java.awt.Color fromProto(Color protocolor) {\n float alpha = protocolor.hasAlpha()\n ? protocolor.getAlpha().getValue()\n : 1.0;\n\n return new java.awt.Color(\n protocolor.getRed(),\n protocolor.getGreen(),\n protocolor.getBlue(),\n alpha);\n }\n\n public static Color toProto(java.awt.Color color) {\n float red = (float) color.getRed();\n float green = (float) color.getGreen();\n float blue = (float) color.getBlue();\n float denominator = 255.0;\n Color.Builder resultBuilder =\n Color\n .newBuilder()\n .setRed(red / denominator)\n .setGreen(green / denominator)\n .setBlue(blue / denominator);\n int alpha = color.getAlpha();\n if (alpha != 255) {\n result.setAlpha(\n FloatValue\n .newBuilder()\n .setValue(((float) alpha) / denominator)\n .build());\n }\n return resultBuilder.build();\n }\n // ...\n\nExample (iOS / Obj-C):\n\n // ...\n static UIColor* fromProto(Color* protocolor) {\n float red = [protocolor red];\n float green = [protocolor green];\n float blue = [protocolor blue];\n FloatValue* alpha_wrapper = [protocolor alpha];\n float alpha = 1.0;\n if (alpha_wrapper != nil) {\n alpha = [alpha_wrapper value];\n }\n return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];\n }\n\n static Color* toProto(UIColor* color) {\n CGFloat red, green, blue, alpha;\n if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {\n return nil;\n }\n Color* result = [Color alloc] init];\n [result setRed:red];\n [result setGreen:green];\n [result setBlue:blue];\n if (alpha \u003c= 0.9999) {\n [result setAlpha:floatWrapperWithValue(alpha)];\n }\n [result autorelease];\n return result;\n }\n // ...\n\n Example (JavaScript):\n\n // ...\n\n var protoToCssColor = function(rgb_color) {\n var redFrac = rgb_color.red || 0.0;\n var greenFrac = rgb_color.green || 0.0;\n var blueFrac = rgb_color.blue || 0.0;\n var red = Math.floor(redFrac * 255);\n var green = Math.floor(greenFrac * 255);\n var blue = Math.floor(blueFrac * 255);\n\n if (!('alpha' in rgb_color)) {\n return rgbToCssColor_(red, green, blue);\n }\n\n var alphaFrac = rgb_color.alpha.value || 0.0;\n var rgbParams = [red, green, blue].join(',');\n return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');\n };\n\n var rgbToCssColor_ = function(red, green, blue) {\n var rgbNumber = new Number((red \u003c\u003c 16) | (green \u003c\u003c 8) | blue);\n var hexString = rgbNumber.toString(16);\n var missingZeros = 6 - hexString.length;\n var resultBuilder = ['#'];\n for (var i = 0; i \u003c missingZeros; i++) {\n resultBuilder.push('0');\n }\n resultBuilder.push(hexString);\n return resultBuilder.join('');\n };\n\n // ...", "type": "object", "properties": { + "red": { + "description": "The amount of red in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" + }, "green": { "description": "The amount of green in the color as a value in the interval [0, 1].", - "type": "number", - "format": "float" + "format": "float", + "type": "number" }, "blue": { - "description": "The amount of blue in the color as a value in the interval [0, 1].", - "type": "number", - "format": "float" - }, - "red": { - "description": "The amount of red in the color as a value in the interval [0, 1].", "type": "number", + "description": "The amount of blue in the color as a value in the interval [0, 1].", "format": "float" }, "alpha": { "description": "The fraction of this color that should be applied to the pixel. That is,\nthe final pixel color is defined by the equation:\n\n pixel color = alpha * (this color) + (1.0 - alpha) * (background color)\n\nThis means that a value of 1.0 corresponds to a solid color, whereas\na value of 0.0 corresponds to a completely transparent color. This\nuses a wrapper message rather than a simple float scalar so that it is\npossible to distinguish between a default value and the value being unset.\nIf omitted, this color object is to be rendered as a solid color\n(as if the alpha value had been explicitly given with a value of 1.0).", - "type": "number", - "format": "float" + "format": "float", + "type": "number" } }, "id": "Color" }, - "LocationInfo": { - "description": "Detected entity location information.", + "ImageProperties": { + "description": "Stores image properties, such as dominant colors.", "type": "object", "properties": { - "latLng": { - "description": "lat/long location coordinates.", - "$ref": "LatLng" + "dominantColors": { + "description": "If present, dominant colors completed successfully.", + "$ref": "DominantColorsAnnotation" } }, - "id": "LocationInfo" + "id": "ImageProperties" }, - "SafeSearchAnnotation": { + "Feature": { + "description": "Users describe the type of Google Cloud Vision API tasks to perform over\nimages by using *Feature*s. Each Feature indicates a type of image\ndetection task to perform. Features encode the Cloud Vision API\nvertical to operate on and the number of top-scoring results to return.", "type": "object", "properties": { - "medical": { - "description": "Likelihood that this is a medical image.", + "type": { "enum": [ - "UNKNOWN", - "VERY_UNLIKELY", - "UNLIKELY", - "POSSIBLE", - "LIKELY", - "VERY_LIKELY" + "TYPE_UNSPECIFIED", + "FACE_DETECTION", + "LANDMARK_DETECTION", + "LOGO_DETECTION", + "LABEL_DETECTION", + "TEXT_DETECTION", + "SAFE_SEARCH_DETECTION", + "IMAGE_PROPERTIES" ], + "description": "The feature type.", + "type": "string", + "enumDescriptions": [ + "Unspecified feature type.", + "Run face detection.", + "Run landmark detection.", + "Run logo detection.", + "Run label detection.", + "Run OCR.", + "Run computer vision models to compute image safe-search properties.", + "Compute a set of image properties, such as the image's dominant colors." + ] + }, + "maxResults": { + "description": "Maximum number of results of this type.", + "format": "int32", + "type": "integer" + } + }, + "id": "Feature" + }, + "SafeSearchAnnotation": { + "description": "Set of features pertaining to the image, computed by computer vision\nmethods over safe-search verticals (for example, adult, spoof, medical,\nviolence).", + "type": "object", + "properties": { + "adult": { "enumDescriptions": [ "Unknown likelihood.", "It is very unlikely that the image belongs to the specified vertical.", @@ -651,11 +870,7 @@ "It is possible that the image belongs to the specified vertical.", "It is likely that the image belongs to the specified vertical.", "It is very likely that the image belongs to the specified vertical." - ], - "type": "string" - }, - "spoof": { - "description": "Spoof likelihood. The likelihood that an modification\nwas made to the image's canonical version to make it appear\nfunny or offensive.", + ], "enum": [ "UNKNOWN", "VERY_UNLIKELY", @@ -664,6 +879,12 @@ "LIKELY", "VERY_LIKELY" ], + "description": "Represents the adult content likelihood for the image.", + "type": "string" + }, + "spoof": { + "description": "Spoof likelihood. The likelihood that an modification\nwas made to the image's canonical version to make it appear\nfunny or offensive.", + "type": "string", "enumDescriptions": [ "Unknown likelihood.", "It is very unlikely that the image belongs to the specified vertical.", @@ -672,10 +893,6 @@ "It is likely that the image belongs to the specified vertical.", "It is very likely that the image belongs to the specified vertical." ], - "type": "string" - }, - "violence": { - "description": "Violence likelihood.", "enum": [ "UNKNOWN", "VERY_UNLIKELY", @@ -683,7 +900,10 @@ "POSSIBLE", "LIKELY", "VERY_LIKELY" - ], + ] + }, + "medical": { + "type": "string", "enumDescriptions": [ "Unknown likelihood.", "It is very unlikely that the image belongs to the specified vertical.", @@ -692,10 +912,6 @@ "It is likely that the image belongs to the specified vertical.", "It is very likely that the image belongs to the specified vertical." ], - "type": "string" - }, - "adult": { - "description": "Represents the adult content likelihood for the image.", "enum": [ "UNKNOWN", "VERY_UNLIKELY", @@ -704,6 +920,10 @@ "LIKELY", "VERY_LIKELY" ], + "description": "Likelihood that this is a medical image." + }, + "violence": { + "type": "string", "enumDescriptions": [ "Unknown likelihood.", "It is very unlikely that the image belongs to the specified vertical.", @@ -712,247 +932,33 @@ "It is likely that the image belongs to the specified vertical.", "It is very likely that the image belongs to the specified vertical." ], - "type": "string" - } - }, - "id": "SafeSearchAnnotation" - }, - "Image": { - "description": "Client image to perform Google Cloud Vision API tasks over.", - "type": "object", - "properties": { - "source": { - "description": "Google Cloud Storage image location. If both `content` and `source`\nare provided for an image, `content` takes precedence and is\nused to perform the image annotation request.", - "$ref": "ImageSource" - }, - "content": { - "description": "Image content, represented as a stream of bytes.\nNote: as with all `bytes` fields, protobuffers use a pure binary\nrepresentation, whereas JSON representations use base64.", - "type": "string", - "format": "byte" - } - }, - "id": "Image" - }, - "DominantColorsAnnotation": { - "description": "Set of dominant colors and their corresponding scores.", - "type": "object", - "properties": { - "colors": { - "description": "RGB color values with their score and pixel fraction.", - "type": "array", - "items": { - "$ref": "ColorInfo" - } - } - }, - "id": "DominantColorsAnnotation" - }, - "Feature": { - "description": "Users describe the type of Google Cloud Vision API tasks to perform over\nimages by using *Feature*s. Each Feature indicates a type of image\ndetection task to perform. Features encode the Cloud Vision API\nvertical to operate on and the number of top-scoring results to return.", - "type": "object", - "properties": { - "type": { - "description": "The feature type.", "enum": [ - "TYPE_UNSPECIFIED", - "FACE_DETECTION", - "LANDMARK_DETECTION", - "LOGO_DETECTION", - "LABEL_DETECTION", - "TEXT_DETECTION", - "SAFE_SEARCH_DETECTION", - "IMAGE_PROPERTIES" - ], - "enumDescriptions": [ - "Unspecified feature type.", - "Run face detection.", - "Run landmark detection.", - "Run logo detection.", - "Run label detection.", - "Run OCR.", - "Run computer vision models to compute image safe-search properties.", - "Compute a set of image properties, such as the image's dominant colors." + "UNKNOWN", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" ], - "type": "string" - }, - "maxResults": { - "description": "Maximum number of results of this type.", - "type": "integer", - "format": "int32" - } - }, - "id": "Feature" - }, - "BatchAnnotateImagesResponse": { - "description": "Response to a batch image annotation request.", - "type": "object", - "properties": { - "responses": { - "description": "Individual responses to image annotation requests within the batch.", - "type": "array", - "items": { - "$ref": "AnnotateImageResponse" - } - } - }, - "id": "BatchAnnotateImagesResponse" - }, - "ImageProperties": { - "description": "Stores image properties, such as dominant colors.", - "type": "object", - "properties": { - "dominantColors": { - "description": "If present, dominant colors completed successfully.", - "$ref": "DominantColorsAnnotation" - } - }, - "id": "ImageProperties" - }, - "LatLng": { - "description": "An object representing a latitude/longitude pair. This is expressed as a pair\nof doubles representing degrees latitude and degrees longitude. Unless\nspecified otherwise, this must conform to the\n\u003ca href=\"http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf\"\u003eWGS84\nstandard\u003c/a\u003e. Values must be within normalized ranges.\n\nExample of normalization code in Python:\n\n def NormalizeLongitude(longitude):\n \"\"\"Wraps decimal degrees longitude to [-180.0, 180.0].\"\"\"\n q, r = divmod(longitude, 360.0)\n if r \u003e 180.0 or (r == 180.0 and q \u003c= -1.0):\n return r - 360.0\n return r\n\n def NormalizeLatLng(latitude, longitude):\n \"\"\"Wraps decimal degrees latitude and longitude to\n [-90.0, 90.0] and [-180.0, 180.0], respectively.\"\"\"\n r = latitude % 360.0\n if r \u003c= 90.0:\n return r, NormalizeLongitude(longitude)\n elif r \u003e= 270.0:\n return r - 360, NormalizeLongitude(longitude)\n else:\n return 180 - r, NormalizeLongitude(longitude + 180.0)\n\n assert 180.0 == NormalizeLongitude(180.0)\n assert -180.0 == NormalizeLongitude(-180.0)\n assert -179.0 == NormalizeLongitude(181.0)\n assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)\n assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)\n assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)\n assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)\n assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)\n assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)\n assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)\n assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)\n assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)\n assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)\n\nThe code in logs/storage/validator/logs_validator_traits.cc treats this type\nas if it were annotated as ST_LOCATION.", - "type": "object", - "properties": { - "latitude": { - "description": "The latitude in degrees. It must be in the range [-90.0, +90.0].", - "type": "number", - "format": "double" - }, - "longitude": { - "description": "The longitude in degrees. It must be in the range [-180.0, +180.0].", - "type": "number", - "format": "double" - } - }, - "id": "LatLng" - }, - "Position": { - "description": "A 3D position in the image, used primarily for Face detection landmarks.\nA valid Position must have both x and y coordinates.\nThe position coordinates are in the same scale as the original image.", - "type": "object", - "properties": { - "y": { - "description": "Y coordinate.", - "type": "number", - "format": "float" - }, - "x": { - "description": "X coordinate.", - "type": "number", - "format": "float" - }, - "z": { - "description": "Z coordinate (or depth).", - "type": "number", - "format": "float" + "description": "Violence likelihood." } }, - "id": "Position" + "id": "SafeSearchAnnotation" } }, - "revision": "20170112", - "basePath": "", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version_module": "True", + "protocol": "rest", "canonicalName": "Vision", - "discoveryVersion": "v1", - "baseUrl": "https://vision.googleapis.com/", - "name": "vision", - "parameters": { - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "alt": { - "description": "Data format for response.", - "location": "query", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } } - }, - "documentationLink": "https://cloud.google.com/vision/", - "ownerDomain": "google.com", - "batchPath": "batch", - "servicePath": "", - "ownerName": "Google", - "version": "v1", - "rootUrl": "https://vision.googleapis.com/", - "kind": "discovery#restDescription" + } } diff --git a/vendor/google.golang.org/api/vision/v1/vision-gen.go b/vendor/google.golang.org/api/vision/v1/vision-gen.go index 593f2aa93..9db2333b9 100644 --- a/vendor/google.golang.org/api/vision/v1/vision-gen.go +++ b/vendor/google.golang.org/api/vision/v1/vision-gen.go @@ -61,9 +61,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Images *ImagesService } @@ -75,6 +76,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewImagesService(s *Service) *ImagesService { rs := &ImagesService{s: s} return rs @@ -1034,11 +1039,15 @@ func (s *ImageProperties) MarshalJSON() ([]byte, error) { // ImageSource: External image source (Google Cloud Storage image // location). type ImageSource struct { - // GcsImageUri: Google Cloud Storage image URI, which must be in the - // following form: + // GcsImageUri: NOTE: For new code `image_uri` below is + // preferred. + // Google Cloud Storage image URI, which must be in the following + // form: // `gs://bucket_name/object_name` (for details, see - // [Google Cloud Storage Request + // [Google Cloud Storage + // Request // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // // NOTE: Cloud Storage object versioning is not supported. GcsImageUri string `json:"gcsImageUri,omitempty"` @@ -1353,6 +1362,9 @@ type Property struct { // Name: Name of the property. Name string `json:"name,omitempty"` + // Uint64Value: Value of numeric properties. + Uint64Value uint64 `json:"uint64Value,omitempty,string"` + // Value: Value of the property. Value string `json:"value,omitempty"` @@ -1379,6 +1391,11 @@ func (s *Property) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SafeSearchAnnotation: Set of features pertaining to the image, +// computed by computer vision +// methods over safe-search verticals (for example, adult, spoof, +// medical, +// violence). type SafeSearchAnnotation struct { // Adult: Represents the adult content likelihood for the image. // @@ -1669,6 +1686,7 @@ func (c *ImagesAnnotateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchannotateimagesrequest) if err != nil { diff --git a/vendor/google.golang.org/api/webfonts/v1/webfonts-gen.go b/vendor/google.golang.org/api/webfonts/v1/webfonts-gen.go index 8da3833c6..97aa21557 100644 --- a/vendor/google.golang.org/api/webfonts/v1/webfonts-gen.go +++ b/vendor/google.golang.org/api/webfonts/v1/webfonts-gen.go @@ -55,9 +55,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Webfonts *WebfontsService } @@ -69,6 +70,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewWebfontsService(s *Service) *WebfontsService { rs := &WebfontsService{s: s} return rs @@ -234,6 +239,7 @@ func (c *WebfontsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/api/webmasters/v3/webmasters-gen.go b/vendor/google.golang.org/api/webmasters/v3/webmasters-gen.go index e5f5e6e4d..364d982e5 100644 --- a/vendor/google.golang.org/api/webmasters/v3/webmasters-gen.go +++ b/vendor/google.golang.org/api/webmasters/v3/webmasters-gen.go @@ -68,9 +68,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Searchanalytics *SearchanalyticsService @@ -90,6 +91,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewSearchanalyticsService(s *Service) *SearchanalyticsService { rs := &SearchanalyticsService{s: s} return rs @@ -841,6 +846,7 @@ func (c *SearchanalyticsQueryCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchanalyticsqueryrequest) if err != nil { @@ -975,6 +981,7 @@ func (c *SitemapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "sites/{siteUrl}/sitemaps/{feedpath}") @@ -1091,6 +1098,7 @@ func (c *SitemapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1247,6 +1255,7 @@ func (c *SitemapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1381,6 +1390,7 @@ func (c *SitemapsSubmitCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "sites/{siteUrl}/sitemaps/{feedpath}") @@ -1484,6 +1494,7 @@ func (c *SitesAddCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "sites/{siteUrl}") @@ -1580,6 +1591,7 @@ func (c *SitesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "sites/{siteUrl}") @@ -1686,6 +1698,7 @@ func (c *SitesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1822,6 +1835,7 @@ func (c *SitesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1986,6 +2000,7 @@ func (c *UrlcrawlerrorscountsQueryCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2177,6 +2192,7 @@ func (c *UrlcrawlerrorssamplesGetCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2372,6 +2388,7 @@ func (c *UrlcrawlerrorssamplesListCall) doRequest(alt string) (*http.Response, e reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -2551,6 +2568,7 @@ func (c *UrlcrawlerrorssamplesMarkAsFixedCall) doRequest(alt string) (*http.Resp reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "sites/{siteUrl}/urlCrawlErrorsSamples/{url}") diff --git a/vendor/google.golang.org/api/youtube/v3/youtube-api.json b/vendor/google.golang.org/api/youtube/v3/youtube-api.json index 3f14a1f33..140fbff7f 100644 --- a/vendor/google.golang.org/api/youtube/v3/youtube-api.json +++ b/vendor/google.golang.org/api/youtube/v3/youtube-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/4auVy5GLotTS3CICB9zLFhVaxUM\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/f81k8b4sv9uLeywfoj2KpL2xcPg\"", "discoveryVersion": "v1", "id": "youtube:v3", "name": "youtube", "canonicalName": "YouTube", "version": "v3", - "revision": "20161202", + "revision": "20170130", "title": "YouTube Data API", "description": "Supports core YouTube features, such as uploading videos, creating and managing playlists, searching for content, and much more.", "ownerDomain": "google.com", @@ -1480,6 +1480,13 @@ "type": "object", "description": "Freebase topic information related to the channel.", "properties": { + "topicCategories": { + "type": "array", + "description": "A list of Wikipedia URLs that describe the channel's content.", + "items": { + "type": "string" + } + }, "topicIds": { "type": "array", "description": "A list of Freebase topic IDs associated with the channel. You can retrieve information about each topic using the Freebase Topic API.", @@ -4454,7 +4461,7 @@ "properties": { "authorChannelId": { "type": "string", - "description": "The ID of the user that authored this message, this field is not always filled. textMessageEvent - the user that wrote the message fanFundingEvent - the user that funded the broadcast newSponsorEvent - the user that just became a sponsor messageDeletedEvent - the moderator that took the action messageRetractedEvent - the author that retracted their message userBannedEvent - the moderator that took the action" + "description": "The ID of the user that authored this message, this field is not always filled. textMessageEvent - the user that wrote the message fanFundingEvent - the user that funded the broadcast newSponsorEvent - the user that just became a sponsor messageDeletedEvent - the moderator that took the action messageRetractedEvent - the author that retracted their message userBannedEvent - the moderator that took the action superChatEvent - the user that made the purchase" }, "displayMessage": { "type": "string", @@ -4494,6 +4501,10 @@ "description": "The date and time when the message was orignally published. The value is specified in ISO 8601 (YYYY-MM-DDThh:mm:ss.sZ) format.", "format": "date-time" }, + "superChatDetails": { + "$ref": "LiveChatSuperChatDetails", + "description": "Details about the Super Chat event, this is only set if the type is 'superChatEvent'." + }, "textMessageDetails": { "$ref": "LiveChatTextMessageDetails", "description": "Details about the text message, this is only set if the type is 'textMessageEvent'." @@ -4513,6 +4524,7 @@ "pollVotedEvent", "sponsorOnlyModeEndedEvent", "sponsorOnlyModeStartedEvent", + "superChatEvent", "textMessageEvent", "tombstone", "userBannedEvent" @@ -4531,6 +4543,7 @@ "", "", "", + "", "" ] }, @@ -4694,6 +4707,34 @@ } } }, + "LiveChatSuperChatDetails": { + "id": "LiveChatSuperChatDetails", + "type": "object", + "properties": { + "amountDisplayString": { + "type": "string", + "description": "A rendered string that displays the fund amount and currency to the user." + }, + "amountMicros": { + "type": "string", + "description": "The amount purchased by the user, in micros (1,750,000 micros = 1.75).", + "format": "uint64" + }, + "currency": { + "type": "string", + "description": "The currency in which the purchase was made." + }, + "tier": { + "type": "integer", + "description": "The tier in which the amount belongs to. Lower amounts belong to lower tiers. Starts at 1.", + "format": "uint32" + }, + "userComment": { + "type": "string", + "description": "The comment added by the user to this Super Chat event." + } + } + }, "LiveChatTextMessageDetails": { "id": "LiveChatTextMessageDetails", "type": "object", @@ -5948,6 +5989,111 @@ } } }, + "SuperChatEvent": { + "id": "SuperChatEvent", + "type": "object", + "description": "A superChatEvent resource represents a Super Chat purchase on a YouTube channel.", + "properties": { + "etag": { + "type": "string", + "description": "Etag of this resource." + }, + "id": { + "type": "string", + "description": "The ID that YouTube assigns to uniquely identify the Super Chat event." + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"youtube#superChatEvent\".", + "default": "youtube#superChatEvent" + }, + "snippet": { + "$ref": "SuperChatEventSnippet", + "description": "The snippet object contains basic details about the Super Chat event." + } + } + }, + "SuperChatEventListResponse": { + "id": "SuperChatEventListResponse", + "type": "object", + "properties": { + "etag": { + "type": "string", + "description": "Etag of this resource." + }, + "eventId": { + "type": "string", + "description": "Serialized EventId of the request which produced this response." + }, + "items": { + "type": "array", + "description": "A list of Super Chat purchases that match the request criteria.", + "items": { + "$ref": "SuperChatEvent" + } + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"youtube#superChatEventListResponse\".", + "default": "youtube#superChatEventListResponse" + }, + "nextPageToken": { + "type": "string", + "description": "The token that can be used as the value of the pageToken parameter to retrieve the next page in the result set." + }, + "pageInfo": { + "$ref": "PageInfo" + }, + "tokenPagination": { + "$ref": "TokenPagination" + }, + "visitorId": { + "type": "string", + "description": "The visitorId identifies the visitor." + } + } + }, + "SuperChatEventSnippet": { + "id": "SuperChatEventSnippet", + "type": "object", + "properties": { + "amountMicros": { + "type": "string", + "description": "The purchase amount, in micros of the purchase currency. e.g., 1 is represented as 1000000.", + "format": "uint64" + }, + "channelId": { + "type": "string", + "description": "Channel id where the event occurred." + }, + "commentText": { + "type": "string", + "description": "The text contents of the comment left by the user." + }, + "createdAt": { + "type": "string", + "description": "The date and time when the event occurred. The value is specified in ISO 8601 (YYYY-MM-DDThh:mm:ss.sZ) format.", + "format": "date-time" + }, + "currency": { + "type": "string", + "description": "The currency in which the purchase was made. ISO 4217." + }, + "displayString": { + "type": "string", + "description": "A rendered string that displays the purchase amount and currency (e.g., \"$1.00\"). The string is rendered for the given language." + }, + "messageType": { + "type": "integer", + "description": "The tier for the paid message, which is based on the amount of money spent to purchase the message.", + "format": "uint32" + }, + "supporterDetails": { + "$ref": "ChannelProfileDetails", + "description": "Details about the supporter." + } + } + }, "Thumbnail": { "id": "Thumbnail", "type": "object", @@ -7141,7 +7287,8 @@ "docFile", "imageFile", "notAVideoFile", - "projectFile" + "projectFile", + "unsupportedSpatialAudioLayout" ], "enumDescriptions": [ "", @@ -7149,6 +7296,7 @@ "", "", "", + "", "" ] } @@ -7160,14 +7308,16 @@ "type": "string", "enum": [ "nonStreamableMov", - "procsesingHintSpatialAudio", - "procsesingHintSphericalVideo", - "sendBestQualityVideo" + "sendBestQualityVideo", + "spatialAudio", + "sphericalVideo", + "vrVideo" ], "enumDescriptions": [ "", "", "", + "", "" ] } @@ -7184,7 +7334,9 @@ "problematicVideoCodec", "unknownAudioCodec", "unknownContainer", - "unknownVideoCodec" + "unknownVideoCodec", + "unsupportedSphericalProjectionType", + "unsupportedVrStereoMode" ], "enumDescriptions": [ "", @@ -7193,6 +7345,8 @@ "", "", "", + "", + "", "" ] } @@ -7236,6 +7390,13 @@ "type": "string" } }, + "topicCategories": { + "type": "array", + "description": "A list of Wikipedia URLs that provide a high-level description of the video's content.", + "items": { + "type": "string" + } + }, "topicIds": { "type": "array", "description": "A list of Freebase topic IDs that are centrally associated with the video. These are topics that are centrally featured in the video, and it can be said that the video is mainly about each of these. You can retrieve information about each topic using the Freebase Topic API.", @@ -10112,6 +10273,54 @@ } } }, + "superChatEvents": { + "methods": { + "list": { + "id": "youtube.superChatEvents.list", + "path": "superChatEvents", + "httpMethod": "GET", + "description": "Lists Super Chat events for a channel.", + "parameters": { + "hl": { + "type": "string", + "description": "The hl parameter instructs the API to retrieve localized resource metadata for a specific application language that the YouTube website supports. The parameter value must be a language code included in the list returned by the i18nLanguages.list method.\n\nIf localized resource details are available in that language, the resource's snippet.localized object will contain the localized values. However, if localized details are not available, the snippet.localized object will contain resource details in the resource's default language.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maxResults parameter specifies the maximum number of items that should be returned in the result set.", + "default": "5", + "format": "uint32", + "minimum": "0", + "maximum": "50", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The pageToken parameter identifies a specific page in the result set that should be returned. In an API response, the nextPageToken and prevPageToken properties identify other pages that could be retrieved.", + "location": "query" + }, + "part": { + "type": "string", + "description": "The part parameter specifies the superChatEvent resource parts that the API response will include. Supported values are id and snippet.", + "required": true, + "location": "query" + } + }, + "parameterOrder": [ + "part" + ], + "response": { + "$ref": "SuperChatEventListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/youtube", + "https://www.googleapis.com/auth/youtube.force-ssl", + "https://www.googleapis.com/auth/youtube.readonly" + ] + } + } + }, "thumbnails": { "methods": { "set": { diff --git a/vendor/google.golang.org/api/youtube/v3/youtube-gen.go b/vendor/google.golang.org/api/youtube/v3/youtube-gen.go index 947b801b2..41b27b948 100644 --- a/vendor/google.golang.org/api/youtube/v3/youtube-gen.go +++ b/vendor/google.golang.org/api/youtube/v3/youtube-gen.go @@ -93,6 +93,7 @@ func New(client *http.Client) (*Service, error) { s.Search = NewSearchService(s) s.Sponsors = NewSponsorsService(s) s.Subscriptions = NewSubscriptionsService(s) + s.SuperChatEvents = NewSuperChatEventsService(s) s.Thumbnails = NewThumbnailsService(s) s.VideoAbuseReportReasons = NewVideoAbuseReportReasonsService(s) s.VideoCategories = NewVideoCategoriesService(s) @@ -102,9 +103,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Activities *ActivitiesService @@ -148,6 +150,8 @@ type Service struct { Subscriptions *SubscriptionsService + SuperChatEvents *SuperChatEventsService + Thumbnails *ThumbnailsService VideoAbuseReportReasons *VideoAbuseReportReasonsService @@ -166,6 +170,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewActivitiesService(s *Service) *ActivitiesService { rs := &ActivitiesService{s: s} return rs @@ -355,6 +363,15 @@ type SubscriptionsService struct { s *Service } +func NewSuperChatEventsService(s *Service) *SuperChatEventsService { + rs := &SuperChatEventsService{s: s} + return rs +} + +type SuperChatEventsService struct { + s *Service +} + func NewThumbnailsService(s *Service) *ThumbnailsService { rs := &ThumbnailsService{s: s} return rs @@ -2379,12 +2396,16 @@ func (s *ChannelStatus) MarshalJSON() ([]byte, error) { // ChannelTopicDetails: Freebase topic information related to the // channel. type ChannelTopicDetails struct { + // TopicCategories: A list of Wikipedia URLs that describe the channel's + // content. + TopicCategories []string `json:"topicCategories,omitempty"` + // TopicIds: A list of Freebase topic IDs associated with the channel. // You can retrieve information about each topic using the Freebase // Topic API. TopicIds []string `json:"topicIds,omitempty"` - // ForceSendFields is a list of field names (e.g. "TopicIds") to + // ForceSendFields is a list of field names (e.g. "TopicCategories") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2392,12 +2413,13 @@ type ChannelTopicDetails struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TopicIds") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "TopicCategories") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -5464,7 +5486,8 @@ type LiveChatMessageSnippet struct { // newSponsorEvent - the user that just became a sponsor // messageDeletedEvent - the moderator that took the action // messageRetractedEvent - the author that retracted their message - // userBannedEvent - the moderator that took the action + // userBannedEvent - the moderator that took the action superChatEvent - + // the user that made the purchase AuthorChannelId string `json:"authorChannelId,omitempty"` // DisplayMessage: Contains a string that can be displayed to the user. @@ -5499,6 +5522,10 @@ type LiveChatMessageSnippet struct { // (YYYY-MM-DDThh:mm:ss.sZ) format. PublishedAt string `json:"publishedAt,omitempty"` + // SuperChatDetails: Details about the Super Chat event, this is only + // set if the type is 'superChatEvent'. + SuperChatDetails *LiveChatSuperChatDetails `json:"superChatDetails,omitempty"` + // TextMessageDetails: Details about the text message, this is only set // if the type is 'textMessageEvent'. TextMessageDetails *LiveChatTextMessageDetails `json:"textMessageDetails,omitempty"` @@ -5518,6 +5545,7 @@ type LiveChatMessageSnippet struct { // "pollVotedEvent" // "sponsorOnlyModeEndedEvent" // "sponsorOnlyModeStartedEvent" + // "superChatEvent" // "textMessageEvent" // "tombstone" // "userBannedEvent" @@ -5827,6 +5855,49 @@ func (s *LiveChatPollVotedDetails) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type LiveChatSuperChatDetails struct { + // AmountDisplayString: A rendered string that displays the fund amount + // and currency to the user. + AmountDisplayString string `json:"amountDisplayString,omitempty"` + + // AmountMicros: The amount purchased by the user, in micros (1,750,000 + // micros = 1.75). + AmountMicros uint64 `json:"amountMicros,omitempty,string"` + + // Currency: The currency in which the purchase was made. + Currency string `json:"currency,omitempty"` + + // Tier: The tier in which the amount belongs to. Lower amounts belong + // to lower tiers. Starts at 1. + Tier int64 `json:"tier,omitempty"` + + // UserComment: The comment added by the user to this Super Chat event. + UserComment string `json:"userComment,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AmountDisplayString") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AmountDisplayString") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LiveChatSuperChatDetails) MarshalJSON() ([]byte, error) { + type noMethod LiveChatSuperChatDetails + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type LiveChatTextMessageDetails struct { // MessageText: The user's message. MessageText string `json:"messageText,omitempty"` @@ -7701,6 +7772,154 @@ func (s *SubscriptionSubscriberSnippet) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SuperChatEvent: A superChatEvent resource represents a Super Chat +// purchase on a YouTube channel. +type SuperChatEvent struct { + // Etag: Etag of this resource. + Etag string `json:"etag,omitempty"` + + // Id: The ID that YouTube assigns to uniquely identify the Super Chat + // event. + Id string `json:"id,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "youtube#superChatEvent". + Kind string `json:"kind,omitempty"` + + // Snippet: The snippet object contains basic details about the Super + // Chat event. + Snippet *SuperChatEventSnippet `json:"snippet,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SuperChatEvent) MarshalJSON() ([]byte, error) { + type noMethod SuperChatEvent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SuperChatEventListResponse struct { + // Etag: Etag of this resource. + Etag string `json:"etag,omitempty"` + + // EventId: Serialized EventId of the request which produced this + // response. + EventId string `json:"eventId,omitempty"` + + // Items: A list of Super Chat purchases that match the request + // criteria. + Items []*SuperChatEvent `json:"items,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "youtube#superChatEventListResponse". + Kind string `json:"kind,omitempty"` + + // NextPageToken: The token that can be used as the value of the + // pageToken parameter to retrieve the next page in the result set. + NextPageToken string `json:"nextPageToken,omitempty"` + + PageInfo *PageInfo `json:"pageInfo,omitempty"` + + TokenPagination *TokenPagination `json:"tokenPagination,omitempty"` + + // VisitorId: The visitorId identifies the visitor. + VisitorId string `json:"visitorId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SuperChatEventListResponse) MarshalJSON() ([]byte, error) { + type noMethod SuperChatEventListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SuperChatEventSnippet struct { + // AmountMicros: The purchase amount, in micros of the purchase + // currency. e.g., 1 is represented as 1000000. + AmountMicros uint64 `json:"amountMicros,omitempty,string"` + + // ChannelId: Channel id where the event occurred. + ChannelId string `json:"channelId,omitempty"` + + // CommentText: The text contents of the comment left by the user. + CommentText string `json:"commentText,omitempty"` + + // CreatedAt: The date and time when the event occurred. The value is + // specified in ISO 8601 (YYYY-MM-DDThh:mm:ss.sZ) format. + CreatedAt string `json:"createdAt,omitempty"` + + // Currency: The currency in which the purchase was made. ISO 4217. + Currency string `json:"currency,omitempty"` + + // DisplayString: A rendered string that displays the purchase amount + // and currency (e.g., "$1.00"). The string is rendered for the given + // language. + DisplayString string `json:"displayString,omitempty"` + + // MessageType: The tier for the paid message, which is based on the + // amount of money spent to purchase the message. + MessageType int64 `json:"messageType,omitempty"` + + // SupporterDetails: Details about the supporter. + SupporterDetails *ChannelProfileDetails `json:"supporterDetails,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AmountMicros") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AmountMicros") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SuperChatEventSnippet) MarshalJSON() ([]byte, error) { + type noMethod SuperChatEventSnippet + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Thumbnail: A thumbnail is an image representing a YouTube resource. type Thumbnail struct { // Height: (Optional) Height of the thumbnail image. @@ -9373,6 +9592,7 @@ type VideoSuggestions struct { // "imageFile" // "notAVideoFile" // "projectFile" + // "unsupportedSpatialAudioLayout" ProcessingErrors []string `json:"processingErrors,omitempty"` // ProcessingHints: A list of suggestions that may improve YouTube's @@ -9380,9 +9600,10 @@ type VideoSuggestions struct { // // Possible values: // "nonStreamableMov" - // "procsesingHintSpatialAudio" - // "procsesingHintSphericalVideo" // "sendBestQualityVideo" + // "spatialAudio" + // "sphericalVideo" + // "vrVideo" ProcessingHints []string `json:"processingHints,omitempty"` // ProcessingWarnings: A list of reasons why YouTube may have difficulty @@ -9401,6 +9622,8 @@ type VideoSuggestions struct { // "unknownAudioCodec" // "unknownContainer" // "unknownVideoCodec" + // "unsupportedSphericalProjectionType" + // "unsupportedVrStereoMode" ProcessingWarnings []string `json:"processingWarnings,omitempty"` // TagSuggestions: A list of keyword tags that could be added to the @@ -9477,6 +9700,10 @@ type VideoTopicDetails struct { // topic using Freebase Topic API. RelevantTopicIds []string `json:"relevantTopicIds,omitempty"` + // TopicCategories: A list of Wikipedia URLs that provide a high-level + // description of the video's content. + TopicCategories []string `json:"topicCategories,omitempty"` + // TopicIds: A list of Freebase topic IDs that are centrally associated // with the video. These are topics that are centrally featured in the // video, and it can be said that the video is mainly about each of @@ -9604,6 +9831,7 @@ func (c *ActivitiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.activity) if err != nil { @@ -9822,6 +10050,7 @@ func (c *ActivitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10039,6 +10268,7 @@ func (c *CaptionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "captions") @@ -10209,6 +10439,7 @@ func (c *CaptionsDownloadCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10453,6 +10684,7 @@ func (c *CaptionsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.caption) if err != nil { @@ -10713,6 +10945,7 @@ func (c *CaptionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10954,6 +11187,7 @@ func (c *CaptionsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.caption) if err != nil { @@ -11247,6 +11481,7 @@ func (c *ChannelBannersInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channelbannerresource) if err != nil { @@ -11457,6 +11692,7 @@ func (c *ChannelSectionsDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "channelSections") @@ -11601,6 +11837,7 @@ func (c *ChannelSectionsInsertCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channelsection) if err != nil { @@ -11808,6 +12045,7 @@ func (c *ChannelSectionsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11978,6 +12216,7 @@ func (c *ChannelSectionsUpdateCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channelsection) if err != nil { @@ -12221,6 +12460,7 @@ func (c *ChannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -12441,6 +12681,7 @@ func (c *ChannelsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { @@ -12578,6 +12819,7 @@ func (c *CommentThreadsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commentthread) if err != nil { @@ -12840,6 +13082,7 @@ func (c *CommentThreadsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13068,6 +13311,7 @@ func (c *CommentThreadsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.commentthread) if err != nil { @@ -13195,6 +13439,7 @@ func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "comments") @@ -13289,6 +13534,7 @@ func (c *CommentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -13484,6 +13730,7 @@ func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13666,6 +13913,7 @@ func (c *CommentsMarkAsSpamCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "comments/markAsSpam") @@ -13772,6 +14020,7 @@ func (c *CommentsSetModerationStatusCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "comments/setModerationStatus") @@ -13888,6 +14137,7 @@ func (c *CommentsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { @@ -14059,6 +14309,7 @@ func (c *FanFundingEventsListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14260,6 +14511,7 @@ func (c *GuideCategoriesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14420,6 +14672,7 @@ func (c *I18nLanguagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14570,6 +14823,7 @@ func (c *I18nRegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14758,6 +15012,7 @@ func (c *LiveBroadcastsBindCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveBroadcasts/bind") @@ -14983,6 +15238,7 @@ func (c *LiveBroadcastsControlCall) doRequest(alt string) (*http.Response, error reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveBroadcasts/control") @@ -15181,6 +15437,7 @@ func (c *LiveBroadcastsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveBroadcasts") @@ -15329,6 +15586,7 @@ func (c *LiveBroadcastsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.livebroadcast) if err != nil { @@ -15586,6 +15844,7 @@ func (c *LiveBroadcastsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -15850,6 +16109,7 @@ func (c *LiveBroadcastsTransitionCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveBroadcasts/transition") @@ -16052,6 +16312,7 @@ func (c *LiveBroadcastsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.livebroadcast) if err != nil { @@ -16190,6 +16451,7 @@ func (c *LiveChatBansDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveChat/bans") @@ -16284,6 +16546,7 @@ func (c *LiveChatBansInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.livechatban) if err != nil { @@ -16412,6 +16675,7 @@ func (c *LiveChatMessagesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveChat/messages") @@ -16506,6 +16770,7 @@ func (c *LiveChatMessagesInsertCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.livechatmessage) if err != nil { @@ -16687,6 +16952,7 @@ func (c *LiveChatMessagesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16866,6 +17132,7 @@ func (c *LiveChatModeratorsDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveChat/moderators") @@ -16960,6 +17227,7 @@ func (c *LiveChatModeratorsInsertCall) doRequest(alt string) (*http.Response, er reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.livechatmoderator) if err != nil { @@ -17117,6 +17385,7 @@ func (c *LiveChatModeratorsListCall) doRequest(alt string) (*http.Response, erro reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17327,6 +17596,7 @@ func (c *LiveStreamsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "liveStreams") @@ -17477,6 +17747,7 @@ func (c *LiveStreamsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.livestream) if err != nil { @@ -17706,6 +17977,7 @@ func (c *LiveStreamsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17933,6 +18205,7 @@ func (c *LiveStreamsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.livestream) if err != nil { @@ -18089,6 +18362,7 @@ func (c *PlaylistItemsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "playlistItems") @@ -18207,6 +18481,7 @@ func (c *PlaylistItemsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.playlistitem) if err != nil { @@ -18415,6 +18690,7 @@ func (c *PlaylistItemsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18617,6 +18893,7 @@ func (c *PlaylistItemsUpdateCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.playlistitem) if err != nil { @@ -18769,6 +19046,7 @@ func (c *PlaylistsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "playlists") @@ -18913,6 +19191,7 @@ func (c *PlaylistsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.playlist) if err != nil { @@ -19160,6 +19439,7 @@ func (c *PlaylistsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -19371,6 +19651,7 @@ func (c *PlaylistsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.playlist) if err != nil { @@ -19919,6 +20200,7 @@ func (c *SearchListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20384,6 +20666,7 @@ func (c *SponsorsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -20557,6 +20840,7 @@ func (c *SubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "subscriptions") @@ -20652,6 +20936,7 @@ func (c *SubscriptionsInsertCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { @@ -20917,6 +21202,7 @@ func (c *SubscriptionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21086,6 +21372,215 @@ func (c *SubscriptionsListCall) Pages(ctx context.Context, f func(*SubscriptionL } } +// method id "youtube.superChatEvents.list": + +type SuperChatEventsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists Super Chat events for a channel. +func (r *SuperChatEventsService) List(part string) *SuperChatEventsListCall { + c := &SuperChatEventsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("part", part) + return c +} + +// Hl sets the optional parameter "hl": The hl parameter instructs the +// API to retrieve localized resource metadata for a specific +// application language that the YouTube website supports. The parameter +// value must be a language code included in the list returned by the +// i18nLanguages.list method. +// +// If localized resource details are available in that language, the +// resource's snippet.localized object will contain the localized +// values. However, if localized details are not available, the +// snippet.localized object will contain resource details in the +// resource's default language. +func (c *SuperChatEventsListCall) Hl(hl string) *SuperChatEventsListCall { + c.urlParams_.Set("hl", hl) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maxResults +// parameter specifies the maximum number of items that should be +// returned in the result set. +func (c *SuperChatEventsListCall) MaxResults(maxResults int64) *SuperChatEventsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": The pageToken +// parameter identifies a specific page in the result set that should be +// returned. In an API response, the nextPageToken and prevPageToken +// properties identify other pages that could be retrieved. +func (c *SuperChatEventsListCall) PageToken(pageToken string) *SuperChatEventsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SuperChatEventsListCall) Fields(s ...googleapi.Field) *SuperChatEventsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SuperChatEventsListCall) IfNoneMatch(entityTag string) *SuperChatEventsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SuperChatEventsListCall) Context(ctx context.Context) *SuperChatEventsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SuperChatEventsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SuperChatEventsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "superChatEvents") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "youtube.superChatEvents.list" call. +// Exactly one of *SuperChatEventListResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SuperChatEventListResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SuperChatEventsListCall) Do(opts ...googleapi.CallOption) (*SuperChatEventListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SuperChatEventListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists Super Chat events for a channel.", + // "httpMethod": "GET", + // "id": "youtube.superChatEvents.list", + // "parameterOrder": [ + // "part" + // ], + // "parameters": { + // "hl": { + // "description": "The hl parameter instructs the API to retrieve localized resource metadata for a specific application language that the YouTube website supports. The parameter value must be a language code included in the list returned by the i18nLanguages.list method.\n\nIf localized resource details are available in that language, the resource's snippet.localized object will contain the localized values. However, if localized details are not available, the snippet.localized object will contain resource details in the resource's default language.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "5", + // "description": "The maxResults parameter specifies the maximum number of items that should be returned in the result set.", + // "format": "uint32", + // "location": "query", + // "maximum": "50", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The pageToken parameter identifies a specific page in the result set that should be returned. In an API response, the nextPageToken and prevPageToken properties identify other pages that could be retrieved.", + // "location": "query", + // "type": "string" + // }, + // "part": { + // "description": "The part parameter specifies the superChatEvent resource parts that the API response will include. Supported values are id and snippet.", + // "location": "query", + // "required": true, + // "type": "string" + // } + // }, + // "path": "superChatEvents", + // "response": { + // "$ref": "SuperChatEventListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/youtube", + // "https://www.googleapis.com/auth/youtube.force-ssl", + // "https://www.googleapis.com/auth/youtube.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SuperChatEventsListCall) Pages(ctx context.Context, f func(*SuperChatEventListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "youtube.thumbnails.set": type ThumbnailsSetCall struct { @@ -21206,6 +21701,7 @@ func (c *ThumbnailsSetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "thumbnails/set") @@ -21419,6 +21915,7 @@ func (c *VideoAbuseReportReasonsListCall) doRequest(alt string) (*http.Response, reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21586,6 +22083,7 @@ func (c *VideoCategoriesListCall) doRequest(alt string) (*http.Response, error) reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -21746,6 +22244,7 @@ func (c *VideosDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "videos") @@ -21874,6 +22373,7 @@ func (c *VideosGetRatingCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22132,6 +22632,7 @@ func (c *VideosInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.video) if err != nil { @@ -22500,6 +23001,7 @@ func (c *VideosListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -22733,6 +23235,7 @@ func (c *VideosRateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "videos/rate") @@ -22862,6 +23365,7 @@ func (c *VideosReportAbuseCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.videoabusereport) if err != nil { @@ -22980,6 +23484,7 @@ func (c *VideosUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.video) if err != nil { @@ -23187,6 +23692,7 @@ func (c *WatermarksSetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.invideobranding) if err != nil { @@ -23379,6 +23885,7 @@ func (c *WatermarksUnsetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "watermarks/unset") diff --git a/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-api.json b/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-api.json index 6a5448658..7b73fa2eb 100644 --- a/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-api.json +++ b/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/gIgP-6lJrzwCZJRZSauwDcuE0Pc\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/GzZNLgIZSILKjBelfyeP-92h54Q\"", "discoveryVersion": "v1", "id": "youtubeAnalytics:v1", "name": "youtubeAnalytics", "canonicalName": "YouTube Analytics", "version": "v1", - "revision": "20170111", + "revision": "20170222", "title": "YouTube Analytics API", "description": "Retrieves your YouTube Analytics data.", "ownerDomain": "google.com", @@ -485,6 +485,11 @@ "pattern": "[a-zA-Z]+==[a-zA-Z0-9_+-]+", "location": "query" }, + "include-historical-channel-data": { + "type": "boolean", + "description": "If set to true historical data (i.e. channel data from before the linking of the channel to the content owner) will be retrieved.", + "location": "query" + }, "max-results": { "type": "integer", "description": "The maximum number of rows to include in the response.", diff --git a/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-gen.go b/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-gen.go index b67a7367a..5e3683828 100644 --- a/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-gen.go +++ b/vendor/google.golang.org/api/youtubeanalytics/v1/youtubeanalytics-gen.go @@ -76,9 +76,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only GroupItems *GroupItemsService @@ -94,6 +95,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGroupItemsService(s *Service) *GroupItemsService { rs := &GroupItemsService{s: s} return rs @@ -514,6 +519,7 @@ func (c *GroupItemsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "groupItems") @@ -630,6 +636,7 @@ func (c *GroupItemsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupitem) if err != nil { @@ -784,6 +791,7 @@ func (c *GroupItemsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -932,6 +940,7 @@ func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "groups") @@ -1048,6 +1057,7 @@ func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -1229,6 +1239,7 @@ func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1406,6 +1417,7 @@ func (c *GroupsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -1538,6 +1550,15 @@ func (c *ReportsQueryCall) Filters(filters string) *ReportsQueryCall { return c } +// IncludeHistoricalChannelData sets the optional parameter +// "include-historical-channel-data": If set to true historical data +// (i.e. channel data from before the linking of the channel to the +// content owner) will be retrieved. +func (c *ReportsQueryCall) IncludeHistoricalChannelData(includeHistoricalChannelData bool) *ReportsQueryCall { + c.urlParams_.Set("include-historical-channel-data", fmt.Sprint(includeHistoricalChannelData)) + return c +} + // MaxResults sets the optional parameter "max-results": The maximum // number of rows to include in the response. func (c *ReportsQueryCall) MaxResults(maxResults int64) *ReportsQueryCall { @@ -1604,6 +1625,7 @@ func (c *ReportsQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1695,6 +1717,11 @@ func (c *ReportsQueryCall) Do(opts ...googleapi.CallOption) (*ResultTable, error // "required": true, // "type": "string" // }, + // "include-historical-channel-data": { + // "description": "If set to true historical data (i.e. channel data from before the linking of the channel to the content owner) will be retrieved.", + // "location": "query", + // "type": "boolean" + // }, // "max-results": { // "description": "The maximum number of rows to include in the response.", // "format": "int32", diff --git a/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-api.json b/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-api.json index afd09e8db..7f16deb8f 100644 --- a/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-api.json +++ b/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-api.json @@ -1,12 +1,12 @@ { "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/GbWyz3kSqUOq8ROslYTuFkmV1gg\"", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/I72z8uNvupkTiHWAAEuguyeFdRU\"", "discoveryVersion": "v1", "id": "youtubeAnalytics:v1beta1", "name": "youtubeAnalytics", "canonicalName": "YouTube Analytics", "version": "v1beta1", - "revision": "20170111", + "revision": "20170222", "title": "YouTube Analytics API", "description": "Retrieves your YouTube Analytics data.", "ownerDomain": "google.com", @@ -488,6 +488,11 @@ "pattern": "[a-zA-Z]+==[a-zA-Z0-9_+-]+", "location": "query" }, + "include-historical-channel-data": { + "type": "boolean", + "description": "If set to true historical data (i.e. channel data from before the linking of the channel to the content owner) will be retrieved.", + "location": "query" + }, "max-results": { "type": "integer", "description": "The maximum number of rows to include in the response.", diff --git a/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-gen.go b/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-gen.go index da23e6a4c..32b4e8562 100644 --- a/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-gen.go +++ b/vendor/google.golang.org/api/youtubeanalytics/v1beta1/youtubeanalytics-gen.go @@ -76,9 +76,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only GroupItems *GroupItemsService @@ -94,6 +95,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewGroupItemsService(s *Service) *GroupItemsService { rs := &GroupItemsService{s: s} return rs @@ -514,6 +519,7 @@ func (c *GroupItemsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "groupItems") @@ -630,6 +636,7 @@ func (c *GroupItemsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.groupitem) if err != nil { @@ -784,6 +791,7 @@ func (c *GroupItemsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -932,6 +940,7 @@ func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "groups") @@ -1048,6 +1057,7 @@ func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -1229,6 +1239,7 @@ func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1406,6 +1417,7 @@ func (c *GroupsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { @@ -1538,6 +1550,15 @@ func (c *ReportsQueryCall) Filters(filters string) *ReportsQueryCall { return c } +// IncludeHistoricalChannelData sets the optional parameter +// "include-historical-channel-data": If set to true historical data +// (i.e. channel data from before the linking of the channel to the +// content owner) will be retrieved. +func (c *ReportsQueryCall) IncludeHistoricalChannelData(includeHistoricalChannelData bool) *ReportsQueryCall { + c.urlParams_.Set("include-historical-channel-data", fmt.Sprint(includeHistoricalChannelData)) + return c +} + // MaxResults sets the optional parameter "max-results": The maximum // number of rows to include in the response. func (c *ReportsQueryCall) MaxResults(maxResults int64) *ReportsQueryCall { @@ -1604,6 +1625,7 @@ func (c *ReportsQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1695,6 +1717,11 @@ func (c *ReportsQueryCall) Do(opts ...googleapi.CallOption) (*ResultTable, error // "required": true, // "type": "string" // }, + // "include-historical-channel-data": { + // "description": "If set to true historical data (i.e. channel data from before the linking of the channel to the content owner) will be retrieved.", + // "location": "query", + // "type": "boolean" + // }, // "max-results": { // "description": "The maximum number of rows to include in the response.", // "format": "int32", diff --git a/vendor/google.golang.org/api/youtubereporting/v1/youtubereporting-gen.go b/vendor/google.golang.org/api/youtubereporting/v1/youtubereporting-gen.go index f86568021..f70b7cbe8 100644 --- a/vendor/google.golang.org/api/youtubereporting/v1/youtubereporting-gen.go +++ b/vendor/google.golang.org/api/youtubereporting/v1/youtubereporting-gen.go @@ -67,9 +67,10 @@ func New(client *http.Client) (*Service, error) { } type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only Jobs *JobsService @@ -85,6 +86,10 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + func NewJobsService(s *Service) *JobsService { rs := &JobsService{s: s} rs.Reports = NewJobsReportsService(s) @@ -506,6 +511,7 @@ func (c *JobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) if err != nil { @@ -642,6 +648,7 @@ func (c *JobsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/jobs/{jobId}") @@ -792,6 +799,7 @@ func (c *JobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -973,6 +981,7 @@ func (c *JobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1154,6 +1163,7 @@ func (c *JobsReportsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1361,6 +1371,7 @@ func (c *JobsReportsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1556,6 +1567,7 @@ func (c *MediaDownloadCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -1751,6 +1763,7 @@ func (c *ReportTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go index a8f99d822..77323f751 100644 --- a/vendor/google.golang.org/appengine/aetest/instance.go +++ b/vendor/google.golang.org/appengine/aetest/instance.go @@ -3,6 +3,7 @@ package aetest import ( "io" "net/http" + "time" "golang.org/x/net/context" "google.golang.org/appengine" @@ -24,6 +25,9 @@ type Options struct { // StronglyConsistentDatastore is whether the local datastore should be // strongly consistent. This will diverge from production behaviour. StronglyConsistentDatastore bool + // StartupTimeout is a duration to wait for instance startup. + // By default, 15 seconds. + StartupTimeout time.Duration } // NewContext starts an instance of the development API server, and returns diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go index 9d7899928..70d4eec1f 100644 --- a/vendor/google.golang.org/appengine/aetest/instance_vm.go +++ b/vendor/google.golang.org/appengine/aetest/instance_vm.go @@ -27,11 +27,17 @@ import ( // If opts is nil the default values are used. func NewInstance(opts *Options) (Instance, error) { i := &instance{ - opts: opts, - appID: "testapp", + opts: opts, + appID: "testapp", + startupTimeout: 15 * time.Second, } - if opts != nil && opts.AppID != "" { - i.appID = opts.AppID + if opts != nil { + if opts.AppID != "" { + i.appID = opts.AppID + } + if opts.StartupTimeout > 0 { + i.startupTimeout = opts.StartupTimeout + } } if err := i.startChild(); err != nil { return nil, err @@ -47,13 +53,14 @@ func newSessionID() string { // instance implements the Instance interface. type instance struct { - opts *Options - child *exec.Cmd - apiURL *url.URL // base URL of API HTTP server - adminURL string // base URL of admin HTTP server - appDir string - appID string - relFuncs []func() // funcs to release any associated contexts + opts *Options + child *exec.Cmd + apiURL *url.URL // base URL of API HTTP server + adminURL string // base URL of admin HTTP server + appDir string + appID string + startupTimeout time.Duration + relFuncs []func() // funcs to release any associated contexts } // NewRequest returns an *http.Request associated with this instance. @@ -104,7 +111,6 @@ func (i *instance) Close() (err error) { return fmt.Errorf("unable to call /quit handler: %v", err) } res.Body.Close() - select { case <-time.After(15 * time.Second): p.Kill() @@ -235,7 +241,7 @@ func (i *instance) startChild() (err error) { }() select { - case <-time.After(15 * time.Second): + case <-time.After(i.startupTimeout): if p := i.child.Process; p != nil { p.Kill() } diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go index e317cdde3..c66849e83 100644 --- a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go +++ b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go @@ -8,7 +8,7 @@ // A main func is synthesized if one does not exist. // // A sample Dockerfile to be used with this bundler could look like this: -// FROM gcr.io/google_appengine/go-compat +// FROM gcr.io/google-appengine/go-compat // ADD . /app // RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe package main diff --git a/vendor/google.golang.org/appengine/datastore/datastore_test.go b/vendor/google.golang.org/appengine/datastore/datastore_test.go index 847fe3d01..b3888e9d1 100644 --- a/vendor/google.golang.org/appengine/datastore/datastore_test.go +++ b/vendor/google.golang.org/appengine/datastore/datastore_test.go @@ -71,6 +71,8 @@ var ( testGeoPt0 = appengine.GeoPoint{Lat: 1.2, Lng: 3.4} testGeoPt1 = appengine.GeoPoint{Lat: 5, Lng: 10} testBadGeoPt = appengine.GeoPoint{Lat: 1000, Lng: 34} + + now = time.Unix(1e9, 0).UTC() ) type B0 struct { @@ -134,6 +136,37 @@ type K1 struct { K []*Key } +type S struct { + St string +} + +type NoOmit struct { + A string + B int `datastore:"Bb"` + C bool `datastore:",noindex"` +} + +type OmitAll struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` +} + +type Omit struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` + S `datastore:",omitempty"` +} + +type NoOmits struct { + No []NoOmit `datastore:",omitempty"` + S `datastore:",omitempty"` + Ss S `datastore:",omitempty"` +} + type N0 struct { X0 Nonymous X0 @@ -320,6 +353,14 @@ func (d *Doubler) Load(props []Property) error { return LoadStruct(d, props) } +type EmbeddedTime struct { + time.Time +} + +type SpecialTime struct { + MyTime EmbeddedTime +} + func (d *Doubler) Save() ([]Property, error) { // Save the default Property slice to an in-memory buffer (a PropertyList). props, err := SaveStruct(d) @@ -485,6 +526,77 @@ var testCases = []testCase{ "", "", }, + { + "omit empty, all", + &OmitAll{}, + new(PropertyList), + "", + "", + }, + { + "omit empty", + &Omit{}, + &PropertyList{ + Property{Name: "St", Value: "", NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false, Multiple: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false, Multiple: false}, + Property{Name: "C", Value: true, NoIndex: true, Multiple: false}, + Property{Name: "F", Value: int64(11), NoIndex: false, Multiple: true}, + Property{Name: "St", Value: "", NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + S: S{St: "string"}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false, Multiple: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false, Multiple: false}, + Property{Name: "C", Value: true, NoIndex: true, Multiple: false}, + Property{Name: "F", Value: int64(11), NoIndex: false, Multiple: true}, + Property{Name: "St", Value: "string", NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "omit empty does not propagate", + &NoOmits{ + No: []NoOmit{ + NoOmit{}, + }, + S: S{}, + Ss: S{}, + }, + &PropertyList{ + Property{Name: "No.A", Value: "", NoIndex: false, Multiple: true}, + Property{Name: "No.Bb", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "No.C", Value: false, NoIndex: true, Multiple: true}, + Property{Name: "Ss.St", Value: "", NoIndex: false, Multiple: false}, + Property{Name: "St", Value: "", NoIndex: false, Multiple: false}}, + "", + "", + }, { "key", &K0{K: testKey1a}, @@ -1311,6 +1423,22 @@ var testCases = []testCase{ "", "", }, + { + "embedded time field", + &SpecialTime{MyTime: EmbeddedTime{now}}, + &SpecialTime{MyTime: EmbeddedTime{now}}, + "", + "", + }, + { + "embedded time load", + &PropertyList{ + Property{Name: "MyTime.", Value: now, NoIndex: false, Multiple: false}, + }, + &SpecialTime{MyTime: EmbeddedTime{now}}, + "", + "", + }, } // checkErr returns the empty string if either both want and err are zero, diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go index 9bfd61bbc..85616cf27 100644 --- a/vendor/google.golang.org/appengine/datastore/doc.go +++ b/vendor/google.golang.org/appengine/datastore/doc.go @@ -87,7 +87,7 @@ behavior for struct pointers. Struct pointers are more strongly typed and are easier to use; PropertyLoadSavers are more flexible. The actual types passed do not have to match between Get and Put calls or even -across different App Engine requests. It is valid to put a *PropertyList and +across different calls to datastore. It is valid to put a *PropertyList and get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. Conceptually, any entity is saved as a sequence of properties, and is loaded into the destination value on a property-by-property basis. When loading into @@ -97,18 +97,28 @@ caller whether this error is fatal, recoverable or ignorable. By default, for struct pointers, all properties are potentially indexed, and the property name is the same as the field name (and hence must start with an -upper case letter). Fields may have a `datastore:"name,options"` tag. The tag -name is the property name, which must be one or more valid Go identifiers -joined by ".", but may start with a lower case letter. An empty tag name means -to just use the field name. A "-" tag name means that the datastore will -ignore that field. If options is "noindex" then the field will not be indexed. -If the options is "" then the comma may be omitted. There are no other -recognized options. - -Fields (except for []byte) are indexed by default. Strings longer than 1500 -bytes cannot be indexed; fields used to store long strings should be -tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be -indexed. +upper case letter). + +Fields may have a `datastore:"name,options"` tag. The tag name is the +property name, which must be one or more valid Go identifiers joined by ".", +but may start with a lower case letter. An empty tag name means to just use the +field name. A "-" tag name means that the datastore will ignore that field. + +The only valid options are "omitempty" and "noindex". + +If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save. +The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero. +Struct field values will never be empty. + +If options include "noindex" then the field will not be indexed. All fields are indexed +by default. Strings or byte slices longer than 1500 bytes cannot be indexed; +fields used to store long strings and byte slices must be tagged with "noindex" +or they will cause Put operations to fail. + +To use multiple options together, separate them by a comma. +The order does not matter. + +If the options is "" then the comma may be omitted. Example code: diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go index 7878cbf76..38a636539 100644 --- a/vendor/google.golang.org/appengine/datastore/load.go +++ b/vendor/google.golang.org/appengine/datastore/load.go @@ -65,36 +65,35 @@ func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p P var sliceIndex int name := p.Name - for name != "" { - // First we try to find a field with name matching - // the value of 'name' exactly. - decoder, ok := codec.fields[name] - if ok { - name = "" - } else { - // Now try for legacy flattened nested field (named eg. "A.B.C.D"). - - parent := name - child := "" - - // Cut off the last field (delimited by ".") and find its parent - // in the codec. - // eg. for name "A.B.C.D", split off "A.B.C" and try to - // find a field in the codec with this name. - // Loop again with "A.B", etc. - for !ok { - i := strings.LastIndex(parent, ".") - if i < 0 { - return "no such struct field" - } - if i == len(name)-1 { - return "field name cannot end with '.'" - } - parent, child = name[:i], name[i+1:] - decoder, ok = codec.fields[parent] + + // If name ends with a '.', the last field is anonymous. + // In this case, strings.Split will give us "" as the + // last element of our fields slice, which will match the "" + // field name in the substruct codec. + fields := strings.Split(name, ".") + + for len(fields) > 0 { + var decoder fieldCodec + var ok bool + + // Cut off the last field (delimited by ".") and find its parent + // in the codec. + // eg. for name "A.B.C.D", split off "A.B.C" and try to + // find a field in the codec with this name. + // Loop again with "A.B", etc. + for i := len(fields); i > 0; i-- { + parent := strings.Join(fields[:i], ".") + decoder, ok = codec.fields[parent] + if ok { + fields = fields[i:] + break } + } - name = child + // If we never found a matching field in the codec, return + // error message. + if !ok { + return "no such struct field" } v = initField(structValue, decoder.path) diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go index 55ab9fb27..9270f4d9f 100644 --- a/vendor/google.golang.org/appengine/datastore/prop.go +++ b/vendor/google.golang.org/appengine/datastore/prop.go @@ -150,6 +150,9 @@ type fieldCodec struct { // path is the index path to the field path []int noIndex bool + // omitEmpty indicates that the field should be omitted on save + // if empty. + omitEmpty bool // structCodec is the codec fot the struct field at index 'path', // or nil if the field is not a struct. structCodec *structCodec @@ -261,6 +264,7 @@ func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { c.fields[subname] = fieldCodec{ path: append([]int{i}, subfield.path...), noIndex: subfield.noIndex || opts["noindex"], + omitEmpty: subfield.omitEmpty, structCodec: subfield.structCodec, } } @@ -274,6 +278,7 @@ func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { c.fields[name] = fieldCodec{ path: []int{i}, noIndex: opts["noindex"], + omitEmpty: opts["omitempty"], structCodec: sub, } } diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go index b09e8b348..728d4ca0c 100644 --- a/vendor/google.golang.org/appengine/datastore/save.go +++ b/vendor/google.golang.org/appengine/datastore/save.go @@ -111,6 +111,12 @@ func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p return p, "" } +type saveOpts struct { + noIndex bool + multiple bool + omitEmpty bool +} + // saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) { var err error @@ -126,11 +132,14 @@ func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto return propertiesToProto(defaultAppID, key, props) } -func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { +func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { + if opts.omitEmpty && isEmptyValue(v) { + return nil + } p := Property{ Name: name, - NoIndex: noIndex, - Multiple: multiple, + NoIndex: opts.noIndex, + Multiple: opts.multiple, } switch x := v.Interface().(type) { case *Key: @@ -166,7 +175,7 @@ func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, if err != nil { return fmt.Errorf("datastore: unsupported struct field: %v", err) } - return sub.save(props, name+".", noIndex, multiple) + return sub.save(props, name+".", opts) } } if p.Value == nil { @@ -178,31 +187,35 @@ func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, func (s structPLS) Save() ([]Property, error) { var props []Property - if err := s.save(&props, "", false, false); err != nil { + if err := s.save(&props, "", saveOpts{}); err != nil { return nil, err } return props, nil } -func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { +func (s structPLS) save(props *[]Property, prefix string, opts saveOpts) error { for name, f := range s.codec.fields { name = prefix + name v := s.v.FieldByIndex(f.path) if !v.IsValid() || !v.CanSet() { continue } - noIndex1 := noIndex || f.noIndex + var opts1 saveOpts + opts1.noIndex = opts.noIndex || f.noIndex + opts1.multiple = opts.multiple + opts1.omitEmpty = f.omitEmpty // don't propagate // For slice fields that aren't []byte, save each element. if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + opts1.multiple = true for j := 0; j < v.Len(); j++ { - if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { + if err := saveStructProperty(props, name, opts1, v.Index(j)); err != nil { return err } } continue } // Otherwise, save the field itself. - if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { + if err := saveStructProperty(props, name, opts1, v); err != nil { return err } } @@ -292,3 +305,23 @@ func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.Ent } return e, nil } + +// isEmptyValue is taken from the encoding/json package in the +// standard library. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go index e6d2930fe..455cd456b 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go @@ -8,6 +8,8 @@ Package vision is a generated protocol buffer package. It is generated from these files: google/cloud/vision/v1/geometry.proto google/cloud/vision/v1/image_annotator.proto + google/cloud/vision/v1/text_annotation.proto + google/cloud/vision/v1/web_detection.proto It has these top-level messages: Vertex @@ -25,11 +27,21 @@ It has these top-level messages: ColorInfo DominantColorsAnnotation ImageProperties + CropHint + CropHintsAnnotation + CropHintsParams ImageContext AnnotateImageRequest AnnotateImageResponse BatchAnnotateImagesRequest BatchAnnotateImagesResponse + TextAnnotation + Page + Block + Paragraph + Word + Symbol + WebDetection */ package vision @@ -142,7 +154,7 @@ func init() { proto.RegisterFile("google/cloud/vision/v1/geometry.proto", fileDe var fileDescriptor0 = []byte{ // 237 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0x31, 0x4b, 0x03, 0x31, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x90, 0x31, 0x4b, 0x03, 0x31, 0x14, 0x80, 0x79, 0x57, 0x2c, 0x25, 0xd6, 0xe5, 0x06, 0x39, 0x1c, 0xa4, 0x1c, 0x0a, 0x9d, 0x12, 0xaa, 0x4e, 0xea, 0x74, 0x8b, 0xe0, 0x74, 0xdc, 0xe0, 0xe0, 0x56, 0xaf, 0x8f, 0x47, 0xe0, 0x9a, 0x57, 0x92, 0x34, 0x34, 0xfd, 0xe5, 0x8e, 0xd2, 0xa4, 0x28, 0x8a, 0xdd, 0xf2, 0x91, 0x8f, 0xf7, diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go index 1c291347d..9b028bbaa 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go @@ -22,22 +22,22 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// A bucketized representation of likelihood meant to give our clients highly -// stable results across model upgrades. +// A bucketized representation of likelihood, which is intended to give clients +// highly stable results across model upgrades. type Likelihood int32 const ( // Unknown likelihood. Likelihood_UNKNOWN Likelihood = 0 - // The image very unlikely belongs to the vertical specified. + // It is very unlikely that the image belongs to the specified vertical. Likelihood_VERY_UNLIKELY Likelihood = 1 - // The image unlikely belongs to the vertical specified. + // It is unlikely that the image belongs to the specified vertical. Likelihood_UNLIKELY Likelihood = 2 - // The image possibly belongs to the vertical specified. + // It is possible that the image belongs to the specified vertical. Likelihood_POSSIBLE Likelihood = 3 - // The image likely belongs to the vertical specified. + // It is likely that the image belongs to the specified vertical. Likelihood_LIKELY Likelihood = 4 - // The image very likely belongs to the vertical specified. + // It is very likely that the image belongs to the specified vertical. Likelihood_VERY_LIKELY Likelihood = 5 ) @@ -79,31 +79,44 @@ const ( Feature_LABEL_DETECTION Feature_Type = 4 // Run OCR. Feature_TEXT_DETECTION Feature_Type = 5 - // Run various computer vision models to compute image safe-search properties. + // Run dense text document OCR. Takes precedence when both + // DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + Feature_DOCUMENT_TEXT_DETECTION Feature_Type = 11 + // Run computer vision models to compute image safe-search properties. Feature_SAFE_SEARCH_DETECTION Feature_Type = 6 - // Compute a set of properties about the image (such as the image's dominant colors). + // Compute a set of image properties, such as the image's dominant colors. Feature_IMAGE_PROPERTIES Feature_Type = 7 + // Run crop hints. + Feature_CROP_HINTS Feature_Type = 9 + // Run web detection. + Feature_WEB_DETECTION Feature_Type = 10 ) var Feature_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "FACE_DETECTION", - 2: "LANDMARK_DETECTION", - 3: "LOGO_DETECTION", - 4: "LABEL_DETECTION", - 5: "TEXT_DETECTION", - 6: "SAFE_SEARCH_DETECTION", - 7: "IMAGE_PROPERTIES", + 0: "TYPE_UNSPECIFIED", + 1: "FACE_DETECTION", + 2: "LANDMARK_DETECTION", + 3: "LOGO_DETECTION", + 4: "LABEL_DETECTION", + 5: "TEXT_DETECTION", + 11: "DOCUMENT_TEXT_DETECTION", + 6: "SAFE_SEARCH_DETECTION", + 7: "IMAGE_PROPERTIES", + 9: "CROP_HINTS", + 10: "WEB_DETECTION", } var Feature_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "FACE_DETECTION": 1, - "LANDMARK_DETECTION": 2, - "LOGO_DETECTION": 3, - "LABEL_DETECTION": 4, - "TEXT_DETECTION": 5, - "SAFE_SEARCH_DETECTION": 6, - "IMAGE_PROPERTIES": 7, + "TYPE_UNSPECIFIED": 0, + "FACE_DETECTION": 1, + "LANDMARK_DETECTION": 2, + "LOGO_DETECTION": 3, + "LABEL_DETECTION": 4, + "TEXT_DETECTION": 5, + "DOCUMENT_TEXT_DETECTION": 11, + "SAFE_SEARCH_DETECTION": 6, + "IMAGE_PROPERTIES": 7, + "CROP_HINTS": 9, + "WEB_DETECTION": 10, } func (x Feature_Type) String() string { @@ -112,9 +125,9 @@ func (x Feature_Type) String() string { func (Feature_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } // Face landmark (feature) type. -// Left and right are defined from the vantage of the viewer of the image, -// without considering mirror projections typical of photos. So, LEFT_EYE, -// typically is the person's right eye. +// Left and right are defined from the vantage of the viewer of the image +// without considering mirror projections typical of photos. So, `LEFT_EYE`, +// typically, is the person's right eye. type FaceAnnotation_Landmark_Type int32 const ( @@ -272,9 +285,9 @@ func (FaceAnnotation_Landmark_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0, 0} } -// The Feature indicates what type of image detection task to perform. // Users describe the type of Google Cloud Vision API tasks to perform over -// images by using Features. Features encode the Cloud Vision API +// images by using *Feature*s. Each Feature indicates a type of image +// detection task to perform. Features encode the Cloud Vision API // vertical to operate on and the number of top-scoring results to return. type Feature struct { // The feature type. @@ -304,11 +317,24 @@ func (m *Feature) GetMaxResults() int32 { // External image source (Google Cloud Storage image location). type ImageSource struct { - // Google Cloud Storage image URI. It must be in the following form: - // `gs://bucket_name/object_name`. For more - // details, please see: https://cloud.google.com/storage/docs/reference-uris. - // NOTE: Cloud Storage object versioning is not supported! + // NOTE: For new code `image_uri` below is preferred. + // Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. GcsImageUri string `protobuf:"bytes,1,opt,name=gcs_image_uri,json=gcsImageUri" json:"gcs_image_uri,omitempty"` + // Image URI which supports: + // 1) Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. + // 2) Publicly accessible image HTTP/HTTPS URL. + // This is preferred over the legacy `gcs_image_uri` above. When both + // `gcs_image_uri` and `image_uri` are specified, `image_uri` takes + // precedence. + ImageUri string `protobuf:"bytes,2,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` } func (m *ImageSource) Reset() { *m = ImageSource{} } @@ -323,15 +349,22 @@ func (m *ImageSource) GetGcsImageUri() string { return "" } +func (m *ImageSource) GetImageUri() string { + if m != nil { + return m.ImageUri + } + return "" +} + // Client image to perform Google Cloud Vision API tasks over. type Image struct { // Image content, represented as a stream of bytes. // Note: as with all `bytes` fields, protobuffers use a pure binary // representation, whereas JSON representations use base64. Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` - // Google Cloud Storage image location. If both 'content' and 'source' - // are filled for an image, 'content' takes precedence and it will be - // used for performing the image annotation request. + // Google Cloud Storage image location. If both `content` and `source` + // are provided for an image, `content` takes precedence and is + // used to perform the image annotation request. Source *ImageSource `protobuf:"bytes,2,opt,name=source" json:"source,omitempty"` } @@ -357,35 +390,32 @@ func (m *Image) GetSource() *ImageSource { // A face annotation object contains the results of face detection. type FaceAnnotation struct { // The bounding polygon around the face. The coordinates of the bounding box - // are in the original image's scale, as returned in ImageParams. + // are in the original image's scale, as returned in `ImageParams`. // The bounding box is computed to "frame" the face in accordance with human // expectations. It is based on the landmarker results. // Note that one or more x and/or y coordinates may not be generated in the - // BoundingPoly (the polygon will be unbounded) if only a partial face appears in - // the image to be annotated. + // `BoundingPoly` (the polygon will be unbounded) if only a partial face + // appears in the image to be annotated. BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` - // This bounding polygon is tighter than the previous - // boundingPoly, and - // encloses only the skin part of the face. Typically, it is used to - // eliminate the face from any image analysis that detects the + // The `fd_bounding_poly` bounding polygon is tighter than the + // `boundingPoly`, and encloses only the skin part of the face. Typically, it + // is used to eliminate the face from any image analysis that detects the // "amount of skin" visible in an image. It is not based on the // landmarker results, only on the initial face detection, hence // the fd (face detection) prefix. FdBoundingPoly *BoundingPoly `protobuf:"bytes,2,opt,name=fd_bounding_poly,json=fdBoundingPoly" json:"fd_bounding_poly,omitempty"` // Detected face landmarks. Landmarks []*FaceAnnotation_Landmark `protobuf:"bytes,3,rep,name=landmarks" json:"landmarks,omitempty"` - // Roll angle. Indicates the amount of clockwise/anti-clockwise rotation of - // the - // face relative to the image vertical, about the axis perpendicular to the - // face. Range [-180,180]. + // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation + // of the face relative to the image vertical about the axis perpendicular to + // the face. Range [-180,180]. RollAngle float32 `protobuf:"fixed32,4,opt,name=roll_angle,json=rollAngle" json:"roll_angle,omitempty"` - // Yaw angle. Indicates the leftward/rightward angle that the face is - // pointing, relative to the vertical plane perpendicular to the image. Range + // Yaw angle, which indicates the leftward/rightward angle that the face is + // pointing relative to the vertical plane perpendicular to the image. Range // [-180,180]. PanAngle float32 `protobuf:"fixed32,5,opt,name=pan_angle,json=panAngle" json:"pan_angle,omitempty"` - // Pitch angle. Indicates the upwards/downwards angle that the face is - // pointing - // relative to the image's horizontal plane. Range [-180,180]. + // Pitch angle, which indicates the upwards/downwards angle that the face is + // pointing relative to the image's horizontal plane. Range [-180,180]. TiltAngle float32 `protobuf:"fixed32,6,opt,name=tilt_angle,json=tiltAngle" json:"tilt_angle,omitempty"` // Detection confidence. Range [0, 1]. DetectionConfidence float32 `protobuf:"fixed32,7,opt,name=detection_confidence,json=detectionConfidence" json:"detection_confidence,omitempty"` @@ -519,8 +549,9 @@ func (m *FaceAnnotation) GetHeadwearLikelihood() Likelihood { // A face-specific landmark (for example, a face feature). // Landmark positions may fall outside the bounds of the image -// when the face is near one or more edges of the image. -// Therefore it is NOT guaranteed that 0 <= x < width or 0 <= y < height. +// if the face is near one or more edges of the image. +// Therefore it is NOT guaranteed that `0 <= x < width` or +// `0 <= y < height`. type FaceAnnotation_Landmark struct { // Face landmark type. Type FaceAnnotation_Landmark_Type `protobuf:"varint,3,opt,name=type,enum=google.cloud.vision.v1.FaceAnnotation_Landmark_Type" json:"type,omitempty"` @@ -549,7 +580,7 @@ func (m *FaceAnnotation_Landmark) GetPosition() *Position { // Detected entity location information. type LocationInfo struct { - // Lat - long location coordinates. + // lat/long location coordinates. LatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=lat_lng,json=latLng" json:"lat_lng,omitempty"` } @@ -565,7 +596,7 @@ func (m *LocationInfo) GetLatLng() *google_type1.LatLng { return nil } -// Arbitrary name/value pair. +// A `Property` consists of a user-supplied name/value pair. type Property struct { // Name of the property. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -594,40 +625,40 @@ func (m *Property) GetValue() string { // Set of detected entity features. type EntityAnnotation struct { - // Opaque entity ID. Some IDs might be available in Knowledge Graph(KG). - // For more details on KG please see: - // https://developers.google.com/knowledge-graph/ + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). Mid string `protobuf:"bytes,1,opt,name=mid" json:"mid,omitempty"` // The language code for the locale in which the entity textual - // description (next field) is expressed. + // `description` is expressed. Locale string `protobuf:"bytes,2,opt,name=locale" json:"locale,omitempty"` - // Entity textual description, expressed in its locale language. + // Entity textual description, expressed in its `locale` language. Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` // Overall score of the result. Range [0, 1]. Score float32 `protobuf:"fixed32,4,opt,name=score" json:"score,omitempty"` // The accuracy of the entity detection in an image. - // For example, for an image containing 'Eiffel Tower,' this field represents - // the confidence that there is a tower in the query image. Range [0, 1]. + // For example, for an image in which the "Eiffel Tower" entity is detected, + // this field represents the confidence that there is a tower in the query + // image. Range [0, 1]. Confidence float32 `protobuf:"fixed32,5,opt,name=confidence" json:"confidence,omitempty"` // The relevancy of the ICA (Image Content Annotation) label to the - // image. For example, the relevancy of 'tower' to an image containing - // 'Eiffel Tower' is likely higher than an image containing a distant towering - // building, though the confidence that there is a tower may be the same. - // Range [0, 1]. + // image. For example, the relevancy of "tower" is likely higher to an image + // containing the detected "Eiffel Tower" than to an image containing a + // detected distant towering building, even though the confidence that + // there is a tower in each image may be the same. Range [0, 1]. Topicality float32 `protobuf:"fixed32,6,opt,name=topicality" json:"topicality,omitempty"` - // Image region to which this entity belongs. Not filled currently + // Image region to which this entity belongs. Currently not produced // for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s // are produced for the entire text detected in an image region, followed by // `boundingPoly`s for each word within the detected text. BoundingPoly *BoundingPoly `protobuf:"bytes,7,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` // The location information for the detected entity. Multiple - // LocationInfo elements can be present since one location may - // indicate the location of the scene in the query image, and another the - // location of the place where the query image was taken. Location information - // is usually present for landmarks. + // `LocationInfo` elements can be present because one location may + // indicate the location of the scene in the image, and another location + // may indicate the location of the place where the image was taken. + // Location information is usually present for landmarks. Locations []*LocationInfo `protobuf:"bytes,8,rep,name=locations" json:"locations,omitempty"` - // Some entities can have additional optional Property fields. - // For example a different kind of score or string that qualifies the entity. + // Some entities may have optional user-supplied `Property` (name/value) + // fields, such a score or string that qualifies the entity. Properties []*Property `protobuf:"bytes,9,rep,name=properties" json:"properties,omitempty"` } @@ -699,17 +730,17 @@ func (m *EntityAnnotation) GetProperties() []*Property { return nil } -// Set of features pertaining to the image, computed by various computer vision +// Set of features pertaining to the image, computed by computer vision // methods over safe-search verticals (for example, adult, spoof, medical, // violence). type SafeSearchAnnotation struct { - // Represents the adult contents likelihood for the image. + // Represents the adult content likelihood for the image. Adult Likelihood `protobuf:"varint,1,opt,name=adult,enum=google.cloud.vision.v1.Likelihood" json:"adult,omitempty"` - // Spoof likelihood. The likelihood that an obvious modification + // Spoof likelihood. The likelihood that an modification // was made to the image's canonical version to make it appear // funny or offensive. Spoof Likelihood `protobuf:"varint,2,opt,name=spoof,enum=google.cloud.vision.v1.Likelihood" json:"spoof,omitempty"` - // Likelihood this is a medical image. + // Likelihood that this is a medical image. Medical Likelihood `protobuf:"varint,3,opt,name=medical,enum=google.cloud.vision.v1.Likelihood" json:"medical,omitempty"` // Violence likelihood. Violence Likelihood `protobuf:"varint,4,opt,name=violence,enum=google.cloud.vision.v1.Likelihood" json:"violence,omitempty"` @@ -748,7 +779,7 @@ func (m *SafeSearchAnnotation) GetViolence() Likelihood { return Likelihood_UNKNOWN } -// Rectangle determined by min and max LatLng pairs. +// Rectangle determined by min and max `LatLng` pairs. type LatLongRect struct { // Min lat/long pair. MinLatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=min_lat_lng,json=minLatLng" json:"min_lat_lng,omitempty"` @@ -775,14 +806,14 @@ func (m *LatLongRect) GetMaxLatLng() *google_type1.LatLng { return nil } -// Color information consists of RGB channels, score and fraction of -// image the color occupies in the image. +// Color information consists of RGB channels, score, and the fraction of +// the image that the color occupies in the image. type ColorInfo struct { // RGB components of the color. Color *google_type.Color `protobuf:"bytes,1,opt,name=color" json:"color,omitempty"` // Image-specific score for this color. Value in range [0, 1]. Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` - // Stores the fraction of pixels the color occupies in the image. + // The fraction of pixels the color occupies in the image. // Value in range [0, 1]. PixelFraction float32 `protobuf:"fixed32,3,opt,name=pixel_fraction,json=pixelFraction" json:"pixel_fraction,omitempty"` } @@ -815,7 +846,7 @@ func (m *ColorInfo) GetPixelFraction() float32 { // Set of dominant colors and their corresponding scores. type DominantColorsAnnotation struct { - // RGB color values, with their score and pixel fraction. + // RGB color values with their score and pixel fraction. Colors []*ColorInfo `protobuf:"bytes,1,rep,name=colors" json:"colors,omitempty"` } @@ -831,7 +862,7 @@ func (m *DominantColorsAnnotation) GetColors() []*ColorInfo { return nil } -// Stores image properties (e.g. dominant colors). +// Stores image properties, such as dominant colors. type ImageProperties struct { // If present, dominant colors completed successfully. DominantColors *DominantColorsAnnotation `protobuf:"bytes,1,opt,name=dominant_colors,json=dominantColors" json:"dominant_colors,omitempty"` @@ -849,9 +880,87 @@ func (m *ImageProperties) GetDominantColors() *DominantColorsAnnotation { return nil } -// Image context. +// Single crop hint that is used to generate a new crop when serving an image. +type CropHint struct { + // The bounding polygon for the crop region. The coordinates of the bounding + // box are in the original image's scale, as returned in `ImageParams`. + BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` + // Confidence of this being a salient region. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` + // Fraction of importance of this salient region with respect to the original + // image. + ImportanceFraction float32 `protobuf:"fixed32,3,opt,name=importance_fraction,json=importanceFraction" json:"importance_fraction,omitempty"` +} + +func (m *CropHint) Reset() { *m = CropHint{} } +func (m *CropHint) String() string { return proto.CompactTextString(m) } +func (*CropHint) ProtoMessage() {} +func (*CropHint) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *CropHint) GetBoundingPoly() *BoundingPoly { + if m != nil { + return m.BoundingPoly + } + return nil +} + +func (m *CropHint) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *CropHint) GetImportanceFraction() float32 { + if m != nil { + return m.ImportanceFraction + } + return 0 +} + +// Set of crop hints that are used to generate new crops when serving images. +type CropHintsAnnotation struct { + CropHints []*CropHint `protobuf:"bytes,1,rep,name=crop_hints,json=cropHints" json:"crop_hints,omitempty"` +} + +func (m *CropHintsAnnotation) Reset() { *m = CropHintsAnnotation{} } +func (m *CropHintsAnnotation) String() string { return proto.CompactTextString(m) } +func (*CropHintsAnnotation) ProtoMessage() {} +func (*CropHintsAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *CropHintsAnnotation) GetCropHints() []*CropHint { + if m != nil { + return m.CropHints + } + return nil +} + +// Parameters for crop hints annotation request. +type CropHintsParams struct { + // Aspect ratios in floats, representing the ratio of the width to the height + // of the image. For example, if the desired aspect ratio is 4/3, the + // corresponding float value should be 1.33333. If not specified, the + // best possible crop is returned. The number of provided aspect ratios is + // limited to a maximum of 16; any aspect ratios provided after the 16th are + // ignored. + AspectRatios []float32 `protobuf:"fixed32,1,rep,packed,name=aspect_ratios,json=aspectRatios" json:"aspect_ratios,omitempty"` +} + +func (m *CropHintsParams) Reset() { *m = CropHintsParams{} } +func (m *CropHintsParams) String() string { return proto.CompactTextString(m) } +func (*CropHintsParams) ProtoMessage() {} +func (*CropHintsParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *CropHintsParams) GetAspectRatios() []float32 { + if m != nil { + return m.AspectRatios + } + return nil +} + +// Image context and/or feature-specific parameters. type ImageContext struct { - // Lat/long rectangle that specifies the location of the image. + // lat/long rectangle that specifies the location of the image. LatLongRect *LatLongRect `protobuf:"bytes,1,opt,name=lat_long_rect,json=latLongRect" json:"lat_long_rect,omitempty"` // List of languages to use for TEXT_DETECTION. In most cases, an empty value // yields the best results since it enables automatic language detection. For @@ -860,15 +969,16 @@ type ImageContext struct { // setting a hint will help get better results (although it will be a // significant hindrance if the hint is wrong). Text detection returns an // error if one or more of the specified languages is not one of the - // [supported - // languages](/translate/v2/translate-reference#supported_languages). + // [supported languages](/vision/docs/languages). LanguageHints []string `protobuf:"bytes,2,rep,name=language_hints,json=languageHints" json:"language_hints,omitempty"` + // Parameters for crop hints annotation request. + CropHintsParams *CropHintsParams `protobuf:"bytes,4,opt,name=crop_hints_params,json=cropHintsParams" json:"crop_hints_params,omitempty"` } func (m *ImageContext) Reset() { *m = ImageContext{} } func (m *ImageContext) String() string { return proto.CompactTextString(m) } func (*ImageContext) ProtoMessage() {} -func (*ImageContext) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } +func (*ImageContext) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } func (m *ImageContext) GetLatLongRect() *LatLongRect { if m != nil { @@ -884,6 +994,13 @@ func (m *ImageContext) GetLanguageHints() []string { return nil } +func (m *ImageContext) GetCropHintsParams() *CropHintsParams { + if m != nil { + return m.CropHintsParams + } + return nil +} + // Request for performing Google Cloud Vision API tasks over a user-provided // image, with user-requested features. type AnnotateImageRequest struct { @@ -898,7 +1015,7 @@ type AnnotateImageRequest struct { func (m *AnnotateImageRequest) Reset() { *m = AnnotateImageRequest{} } func (m *AnnotateImageRequest) String() string { return proto.CompactTextString(m) } func (*AnnotateImageRequest) ProtoMessage() {} -func (*AnnotateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } +func (*AnnotateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } func (m *AnnotateImageRequest) GetImage() *Image { if m != nil { @@ -923,30 +1040,40 @@ func (m *AnnotateImageRequest) GetImageContext() *ImageContext { // Response to an image annotation request. type AnnotateImageResponse struct { - // If present, face detection completed successfully. + // If present, face detection has completed successfully. FaceAnnotations []*FaceAnnotation `protobuf:"bytes,1,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"` - // If present, landmark detection completed successfully. + // If present, landmark detection has completed successfully. LandmarkAnnotations []*EntityAnnotation `protobuf:"bytes,2,rep,name=landmark_annotations,json=landmarkAnnotations" json:"landmark_annotations,omitempty"` - // If present, logo detection completed successfully. + // If present, logo detection has completed successfully. LogoAnnotations []*EntityAnnotation `protobuf:"bytes,3,rep,name=logo_annotations,json=logoAnnotations" json:"logo_annotations,omitempty"` - // If present, label detection completed successfully. + // If present, label detection has completed successfully. LabelAnnotations []*EntityAnnotation `protobuf:"bytes,4,rep,name=label_annotations,json=labelAnnotations" json:"label_annotations,omitempty"` - // If present, text (OCR) detection completed successfully. + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. TextAnnotations []*EntityAnnotation `protobuf:"bytes,5,rep,name=text_annotations,json=textAnnotations" json:"text_annotations,omitempty"` - // If present, safe-search annotation completed successfully. + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. + // This annotation provides the structural hierarchy for the OCR detected + // text. + FullTextAnnotation *TextAnnotation `protobuf:"bytes,12,opt,name=full_text_annotation,json=fullTextAnnotation" json:"full_text_annotation,omitempty"` + // If present, safe-search annotation has completed successfully. SafeSearchAnnotation *SafeSearchAnnotation `protobuf:"bytes,6,opt,name=safe_search_annotation,json=safeSearchAnnotation" json:"safe_search_annotation,omitempty"` // If present, image properties were extracted successfully. ImagePropertiesAnnotation *ImageProperties `protobuf:"bytes,8,opt,name=image_properties_annotation,json=imagePropertiesAnnotation" json:"image_properties_annotation,omitempty"` + // If present, crop hints have completed successfully. + CropHintsAnnotation *CropHintsAnnotation `protobuf:"bytes,11,opt,name=crop_hints_annotation,json=cropHintsAnnotation" json:"crop_hints_annotation,omitempty"` + // If present, web detection has completed successfully. + WebDetection *WebDetection `protobuf:"bytes,13,opt,name=web_detection,json=webDetection" json:"web_detection,omitempty"` // If set, represents the error message for the operation. - // Note that filled-in mage annotations are guaranteed to be - // correct, even when error is non-empty. + // Note that filled-in image annotations are guaranteed to be + // correct, even when `error` is set. Error *google_rpc.Status `protobuf:"bytes,9,opt,name=error" json:"error,omitempty"` } func (m *AnnotateImageResponse) Reset() { *m = AnnotateImageResponse{} } func (m *AnnotateImageResponse) String() string { return proto.CompactTextString(m) } func (*AnnotateImageResponse) ProtoMessage() {} -func (*AnnotateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } +func (*AnnotateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } func (m *AnnotateImageResponse) GetFaceAnnotations() []*FaceAnnotation { if m != nil { @@ -983,6 +1110,13 @@ func (m *AnnotateImageResponse) GetTextAnnotations() []*EntityAnnotation { return nil } +func (m *AnnotateImageResponse) GetFullTextAnnotation() *TextAnnotation { + if m != nil { + return m.FullTextAnnotation + } + return nil +} + func (m *AnnotateImageResponse) GetSafeSearchAnnotation() *SafeSearchAnnotation { if m != nil { return m.SafeSearchAnnotation @@ -997,6 +1131,20 @@ func (m *AnnotateImageResponse) GetImagePropertiesAnnotation() *ImageProperties return nil } +func (m *AnnotateImageResponse) GetCropHintsAnnotation() *CropHintsAnnotation { + if m != nil { + return m.CropHintsAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetWebDetection() *WebDetection { + if m != nil { + return m.WebDetection + } + return nil +} + func (m *AnnotateImageResponse) GetError() *google_rpc.Status { if m != nil { return m.Error @@ -1013,7 +1161,7 @@ type BatchAnnotateImagesRequest struct { func (m *BatchAnnotateImagesRequest) Reset() { *m = BatchAnnotateImagesRequest{} } func (m *BatchAnnotateImagesRequest) String() string { return proto.CompactTextString(m) } func (*BatchAnnotateImagesRequest) ProtoMessage() {} -func (*BatchAnnotateImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } +func (*BatchAnnotateImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } func (m *BatchAnnotateImagesRequest) GetRequests() []*AnnotateImageRequest { if m != nil { @@ -1031,7 +1179,7 @@ type BatchAnnotateImagesResponse struct { func (m *BatchAnnotateImagesResponse) Reset() { *m = BatchAnnotateImagesResponse{} } func (m *BatchAnnotateImagesResponse) String() string { return proto.CompactTextString(m) } func (*BatchAnnotateImagesResponse) ProtoMessage() {} -func (*BatchAnnotateImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } +func (*BatchAnnotateImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } func (m *BatchAnnotateImagesResponse) GetResponses() []*AnnotateImageResponse { if m != nil { @@ -1054,6 +1202,9 @@ func init() { proto.RegisterType((*ColorInfo)(nil), "google.cloud.vision.v1.ColorInfo") proto.RegisterType((*DominantColorsAnnotation)(nil), "google.cloud.vision.v1.DominantColorsAnnotation") proto.RegisterType((*ImageProperties)(nil), "google.cloud.vision.v1.ImageProperties") + proto.RegisterType((*CropHint)(nil), "google.cloud.vision.v1.CropHint") + proto.RegisterType((*CropHintsAnnotation)(nil), "google.cloud.vision.v1.CropHintsAnnotation") + proto.RegisterType((*CropHintsParams)(nil), "google.cloud.vision.v1.CropHintsParams") proto.RegisterType((*ImageContext)(nil), "google.cloud.vision.v1.ImageContext") proto.RegisterType((*AnnotateImageRequest)(nil), "google.cloud.vision.v1.AnnotateImageRequest") proto.RegisterType((*AnnotateImageResponse)(nil), "google.cloud.vision.v1.AnnotateImageResponse") @@ -1141,133 +1292,148 @@ var _ImageAnnotator_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/vision/v1/image_annotator.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 2044 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0xcd, 0x6e, 0xe3, 0xc8, - 0x11, 0x1e, 0x49, 0x96, 0x2d, 0x95, 0x6c, 0x99, 0x6e, 0x7b, 0xbc, 0x1a, 0xcf, 0x9f, 0x97, 0xc9, - 0x24, 0xc6, 0x62, 0x22, 0x67, 0x3c, 0x1b, 0x60, 0xb3, 0x33, 0x08, 0x22, 0xd9, 0xb4, 0x2d, 0x8c, - 0x2c, 0x6a, 0x5b, 0x72, 0x26, 0x4e, 0x82, 0x10, 0x34, 0xd5, 0xe2, 0x70, 0x96, 0x62, 0x33, 0x24, - 0xe5, 0xb1, 0x2e, 0x39, 0x04, 0x08, 0x90, 0x7b, 0xce, 0xc9, 0x3b, 0x04, 0x08, 0x72, 0xcd, 0x39, - 0xc7, 0x60, 0x5f, 0x21, 0x0f, 0x91, 0x63, 0xd0, 0x3f, 0xa4, 0x5a, 0x5e, 0x6b, 0x22, 0xef, 0x49, - 0xec, 0xaa, 0xfa, 0xbe, 0xaa, 0xae, 0xea, 0xbf, 0x12, 0x3c, 0x77, 0x29, 0x75, 0x7d, 0xb2, 0xef, - 0xf8, 0x74, 0x3c, 0xd8, 0xbf, 0xf2, 0x62, 0x8f, 0x06, 0xfb, 0x57, 0x2f, 0xf6, 0xbd, 0x91, 0xed, - 0x12, 0xcb, 0x0e, 0x02, 0x9a, 0xd8, 0x09, 0x8d, 0xea, 0x61, 0x44, 0x13, 0x8a, 0xb6, 0x85, 0x75, - 0x9d, 0x5b, 0xd7, 0x85, 0x75, 0xfd, 0xea, 0xc5, 0xce, 0x23, 0xc9, 0x62, 0x87, 0xde, 0xbe, 0xc4, - 0x78, 0x34, 0x88, 0x05, 0x6a, 0xe7, 0xd9, 0x1c, 0x1f, 0x2e, 0xa1, 0x23, 0x92, 0x44, 0x13, 0x69, - 0xf6, 0x89, 0x34, 0x8b, 0x42, 0x67, 0x3f, 0x4e, 0xec, 0x64, 0x1c, 0xdf, 0x50, 0x24, 0x93, 0x90, - 0xec, 0x3b, 0xd4, 0x4f, 0xc3, 0xd9, 0xa9, 0xa9, 0x0a, 0xdf, 0x4e, 0xfc, 0xc0, 0x15, 0x1a, 0xfd, - 0x2f, 0x79, 0x58, 0x39, 0x26, 0x76, 0x32, 0x8e, 0x08, 0xfa, 0x02, 0x96, 0x98, 0x41, 0x2d, 0xb7, - 0x9b, 0xdb, 0xab, 0x1e, 0x7c, 0xbf, 0x7e, 0xfb, 0x1c, 0xea, 0xd2, 0xbc, 0xde, 0x9f, 0x84, 0x04, - 0x73, 0x04, 0x7a, 0x0a, 0x95, 0x91, 0x7d, 0x6d, 0x45, 0x24, 0x1e, 0xfb, 0x49, 0x5c, 0xcb, 0xef, - 0xe6, 0xf6, 0x8a, 0x18, 0x46, 0xf6, 0x35, 0x16, 0x12, 0xfd, 0x1f, 0x39, 0x58, 0x62, 0xf6, 0x68, - 0x0b, 0xb4, 0xfe, 0x45, 0xd7, 0xb0, 0xce, 0x3b, 0xbd, 0xae, 0x71, 0xd8, 0x3a, 0x6e, 0x19, 0x47, - 0xda, 0x3d, 0x84, 0xa0, 0x7a, 0xdc, 0x38, 0x34, 0xac, 0x23, 0xa3, 0x6f, 0x1c, 0xf6, 0x5b, 0x66, - 0x47, 0xcb, 0xa1, 0x6d, 0x40, 0xed, 0x46, 0xe7, 0xe8, 0xac, 0x81, 0xdf, 0x28, 0xf2, 0x3c, 0xb3, - 0x6d, 0x9b, 0x27, 0xa6, 0x22, 0x2b, 0xa0, 0x4d, 0x58, 0x6f, 0x37, 0x9a, 0x46, 0x5b, 0x11, 0x2e, - 0x31, 0xc3, 0xbe, 0xf1, 0xcb, 0xbe, 0x22, 0x2b, 0xa2, 0x07, 0x70, 0xbf, 0xd7, 0x38, 0x36, 0xac, - 0x9e, 0xd1, 0xc0, 0x87, 0xa7, 0x8a, 0x6a, 0x99, 0x45, 0xd6, 0x3a, 0x6b, 0x9c, 0x18, 0x56, 0x17, - 0x9b, 0x5d, 0x03, 0xf7, 0x5b, 0x46, 0x4f, 0x5b, 0xd1, 0x5f, 0x40, 0xa5, 0xc5, 0x2a, 0xdc, 0xa3, - 0xe3, 0xc8, 0x21, 0x48, 0x87, 0x35, 0xd7, 0x89, 0x2d, 0x51, 0xf4, 0x71, 0xe4, 0xf1, 0x5c, 0x95, - 0x71, 0xc5, 0x75, 0x62, 0x6e, 0x76, 0x1e, 0x79, 0xfa, 0x6f, 0xa1, 0xc8, 0xbf, 0x51, 0x0d, 0x56, - 0x1c, 0x1a, 0x24, 0x24, 0x48, 0xb8, 0xd9, 0x2a, 0x4e, 0x87, 0xe8, 0x15, 0x2c, 0xc7, 0x9c, 0x90, - 0xa7, 0xaa, 0x72, 0xf0, 0xbd, 0x79, 0xb9, 0x56, 0x7c, 0x63, 0x09, 0xd1, 0xff, 0xbd, 0x0e, 0xd5, - 0x63, 0xdb, 0x21, 0x8d, 0x6c, 0xfd, 0xa0, 0x16, 0xac, 0x5d, 0xd2, 0x71, 0x30, 0xf0, 0x02, 0xd7, - 0x0a, 0xa9, 0x3f, 0xe1, 0xfe, 0x2a, 0xf3, 0x4b, 0xd8, 0x94, 0xc6, 0x5d, 0xea, 0x4f, 0xf0, 0xea, - 0xa5, 0x32, 0x42, 0x1d, 0xd0, 0x86, 0x03, 0x6b, 0x96, 0x2d, 0x7f, 0x07, 0xb6, 0xea, 0x70, 0xa0, - 0x8e, 0xd1, 0x19, 0x94, 0x7d, 0x3b, 0x18, 0x8c, 0xec, 0xe8, 0xeb, 0xb8, 0x56, 0xd8, 0x2d, 0xec, - 0x55, 0x0e, 0xf6, 0xe7, 0xae, 0xac, 0x99, 0x59, 0xd5, 0xdb, 0x12, 0x87, 0xa7, 0x0c, 0xe8, 0x31, - 0x40, 0x44, 0x7d, 0xdf, 0xb2, 0x03, 0xd7, 0x27, 0xb5, 0xa5, 0xdd, 0xdc, 0x5e, 0x1e, 0x97, 0x99, - 0xa4, 0xc1, 0x04, 0xe8, 0x21, 0x94, 0x43, 0x3b, 0x90, 0xda, 0x22, 0xd7, 0x96, 0x42, 0x3b, 0x10, - 0xca, 0xc7, 0x00, 0x89, 0xe7, 0x27, 0x52, 0xbb, 0x2c, 0xb0, 0x4c, 0x22, 0xd4, 0x2f, 0x60, 0x6b, - 0x40, 0x12, 0xe2, 0x30, 0xdf, 0x96, 0x43, 0x83, 0xa1, 0x37, 0x20, 0x81, 0x43, 0x6a, 0x2b, 0xdc, - 0x70, 0x33, 0xd3, 0x1d, 0x66, 0x2a, 0xf4, 0x13, 0xd8, 0x4e, 0x43, 0x63, 0xc9, 0x52, 0x40, 0x25, - 0x0e, 0xba, 0xaf, 0x68, 0x15, 0x58, 0x0b, 0xaa, 0xef, 0xe9, 0xc4, 0xf2, 0xbd, 0xaf, 0x89, 0xef, - 0xbd, 0xa3, 0x74, 0x50, 0x2b, 0xf3, 0x2d, 0xa7, 0xcf, 0x4b, 0x4c, 0x3b, 0xb3, 0xc4, 0x6b, 0xef, - 0xe9, 0x64, 0x3a, 0x44, 0x26, 0x6c, 0xc4, 0x34, 0x8a, 0xe8, 0x07, 0x95, 0x0d, 0x16, 0x66, 0xd3, - 0x04, 0x58, 0x21, 0x3c, 0x03, 0xcd, 0x0e, 0x5c, 0x12, 0xa9, 0x7c, 0x95, 0x85, 0xf9, 0xd6, 0x39, - 0x56, 0xa1, 0xeb, 0xc1, 0x66, 0x3c, 0x8e, 0xc2, 0xc8, 0x8b, 0x89, 0xca, 0xb8, 0xba, 0x30, 0x23, - 0x4a, 0xe1, 0x0a, 0xe9, 0x6f, 0xa0, 0x36, 0x0e, 0x06, 0x24, 0xb2, 0xc8, 0x75, 0x48, 0x63, 0x32, - 0x50, 0x99, 0xd7, 0x16, 0x66, 0xde, 0xe6, 0x1c, 0x86, 0xa0, 0x50, 0xd8, 0xbf, 0x02, 0x74, 0xe9, - 0x8f, 0xa3, 0x68, 0x96, 0xb7, 0xba, 0x30, 0xef, 0x86, 0x44, 0xcf, 0x66, 0xe1, 0x1d, 0xb1, 0x07, - 0x1f, 0x88, 0x3d, 0x93, 0xd7, 0xf5, 0xc5, 0xb3, 0x90, 0xc2, 0xa7, 0xb2, 0x9d, 0x7f, 0xad, 0x40, - 0x29, 0xdd, 0x22, 0xe8, 0x54, 0x9e, 0xdd, 0x05, 0x4e, 0xf9, 0xf9, 0x1d, 0x77, 0x98, 0x7a, 0x96, - 0xbf, 0x86, 0x52, 0x48, 0x63, 0x8f, 0xe9, 0xf9, 0xfe, 0xaa, 0x1c, 0xec, 0xce, 0x63, 0xeb, 0x4a, - 0x3b, 0x9c, 0x21, 0xf4, 0xbf, 0x2f, 0x4f, 0x0f, 0xfa, 0xf3, 0xce, 0x9b, 0x8e, 0xf9, 0xb6, 0x63, - 0xa5, 0xc7, 0xb8, 0x76, 0x0f, 0xad, 0x42, 0xa9, 0x6d, 0x1c, 0xf7, 0x2d, 0xe3, 0xc2, 0xd0, 0x72, - 0x68, 0x0d, 0xca, 0xb8, 0x75, 0x72, 0x2a, 0x86, 0x79, 0x54, 0x83, 0x2d, 0xae, 0x34, 0x8f, 0xad, - 0xd4, 0xa8, 0x89, 0xcd, 0xb7, 0x5a, 0x81, 0x1d, 0xdb, 0xc2, 0xf0, 0xa6, 0x6a, 0x89, 0xa9, 0x52, - 0x50, 0xc6, 0xc5, 0x55, 0x45, 0xb4, 0x03, 0xdb, 0x19, 0x6a, 0x56, 0xb7, 0xcc, 0x60, 0x67, 0xad, - 0xa3, 0xae, 0xd9, 0xea, 0xf4, 0xad, 0xa6, 0xd1, 0x7f, 0x6b, 0x18, 0x1d, 0xa6, 0xed, 0x69, 0x2b, - 0x2c, 0xc6, 0x8e, 0xd9, 0x33, 0xac, 0x7e, 0xab, 0xab, 0x95, 0x58, 0x8c, 0xe7, 0xdd, 0xae, 0x81, - 0xad, 0x76, 0xab, 0xab, 0x95, 0xd9, 0xb0, 0x6d, 0xbe, 0x95, 0x43, 0x40, 0x55, 0x80, 0x33, 0xf3, - 0xbc, 0x7f, 0xca, 0xa3, 0xd2, 0x2a, 0x68, 0x1d, 0x2a, 0x62, 0xcc, 0xfd, 0x69, 0xab, 0x48, 0x83, - 0x55, 0x21, 0x38, 0x34, 0x3a, 0x7d, 0x03, 0x6b, 0x6b, 0xe8, 0x3e, 0x6c, 0x70, 0xfa, 0xa6, 0xd9, - 0xef, 0x9b, 0x67, 0xd2, 0xb0, 0xca, 0xf2, 0xa5, 0x8a, 0x39, 0xdf, 0x3a, 0xbb, 0x04, 0x55, 0xa9, - 0x24, 0xd1, 0xb2, 0x59, 0x1b, 0x17, 0x86, 0xd5, 0x37, 0xbb, 0x56, 0xd3, 0x3c, 0xef, 0x1c, 0x35, - 0xf0, 0x85, 0xb6, 0x31, 0xa3, 0x12, 0xb3, 0x3e, 0x34, 0x71, 0xc7, 0xc0, 0x1a, 0x42, 0x8f, 0xa0, - 0x96, 0xa9, 0x24, 0x63, 0x06, 0xdc, 0xcc, 0xd2, 0xcf, 0xb4, 0xfc, 0x43, 0xe2, 0xb6, 0xa6, 0x89, - 0xfc, 0x96, 0xbb, 0xfb, 0xb3, 0xba, 0x19, 0x7f, 0xdb, 0xe8, 0x31, 0x3c, 0x98, 0xea, 0x6e, 0x3a, - 0xfc, 0x64, 0x5a, 0xd5, 0x9b, 0x1e, 0x6b, 0xe8, 0x29, 0x3c, 0x54, 0xeb, 0x6c, 0x89, 0x12, 0xa4, - 0x15, 0xd3, 0x1e, 0xa0, 0x5d, 0x78, 0x34, 0x53, 0xd2, 0x9b, 0x16, 0x3b, 0x2c, 0xa1, 0x82, 0xa2, - 0x81, 0xad, 0x3e, 0x6e, 0x9c, 0xb0, 0x5b, 0xfe, 0x21, 0xcb, 0xbe, 0xc4, 0x29, 0xe2, 0x47, 0xfc, - 0x51, 0x91, 0xce, 0xbd, 0x7b, 0xde, 0x6d, 0xb5, 0xb5, 0xc7, 0xec, 0x51, 0x31, 0x0d, 0x4f, 0x08, - 0x9f, 0x30, 0xfc, 0xb1, 0x89, 0x8d, 0x53, 0xa3, 0x71, 0x64, 0x9d, 0xf0, 0x37, 0x47, 0xbb, 0xa1, - 0x3d, 0x45, 0x1b, 0xb0, 0x76, 0x78, 0xda, 0xea, 0x58, 0x27, 0x9d, 0x46, 0xff, 0x94, 0x51, 0xee, - 0x32, 0xff, 0x5c, 0xc4, 0x79, 0x4f, 0xcc, 0x0e, 0x93, 0x7e, 0xca, 0xf0, 0x5c, 0x2a, 0x98, 0xa5, - 0x58, 0xd7, 0x5f, 0xc3, 0x6a, 0x9b, 0x3a, 0x7c, 0x53, 0xb6, 0x82, 0x21, 0x45, 0xcf, 0x61, 0xc5, - 0xb7, 0x13, 0xcb, 0x0f, 0x5c, 0x79, 0x95, 0x6f, 0xa6, 0x7b, 0x90, 0xed, 0xd1, 0x7a, 0xdb, 0x4e, - 0xda, 0x81, 0x8b, 0x97, 0x7d, 0xfe, 0xab, 0x7f, 0x0e, 0xa5, 0x6e, 0x44, 0x43, 0x12, 0x25, 0x13, - 0x84, 0x60, 0x29, 0xb0, 0x47, 0x44, 0x3e, 0x4c, 0xf8, 0x37, 0xda, 0x82, 0xe2, 0x95, 0xed, 0x8f, - 0xc5, 0x6b, 0xa3, 0x8c, 0xc5, 0x40, 0xff, 0x63, 0x01, 0x34, 0x23, 0x48, 0xbc, 0x64, 0xa2, 0xbc, - 0x24, 0x34, 0x28, 0x8c, 0xbc, 0x81, 0x44, 0xb3, 0x4f, 0xb4, 0x0d, 0xcb, 0x3e, 0x75, 0x6c, 0x3f, - 0x45, 0xcb, 0x11, 0xda, 0x85, 0xca, 0x80, 0xc4, 0x4e, 0xe4, 0x85, 0xfc, 0xa8, 0x28, 0x88, 0x87, - 0x90, 0x22, 0x62, 0x6e, 0x63, 0x87, 0x46, 0xe9, 0x35, 0x2d, 0x06, 0xe8, 0x09, 0x80, 0x72, 0x4f, - 0x8a, 0x3b, 0x5a, 0x91, 0x30, 0x7d, 0x42, 0x43, 0xcf, 0xb1, 0x7d, 0x2f, 0x99, 0xc8, 0x5b, 0x5a, - 0x91, 0x7c, 0xfb, 0xad, 0xb3, 0xf2, 0x9d, 0xdf, 0x3a, 0x4d, 0x28, 0xfb, 0x32, 0xeb, 0x71, 0xad, - 0xc4, 0xdf, 0x26, 0x73, 0x69, 0xd4, 0xf2, 0xe0, 0x29, 0x0c, 0xfd, 0x1c, 0x20, 0x14, 0xb9, 0xf7, - 0x48, 0x5c, 0x2b, 0x73, 0x92, 0xf9, 0x07, 0xa6, 0xac, 0x12, 0x56, 0x30, 0xfa, 0x9f, 0xf2, 0xb0, - 0xd5, 0xb3, 0x87, 0xa4, 0x47, 0xec, 0xc8, 0x79, 0xa7, 0xd4, 0xe2, 0x0b, 0x28, 0xda, 0x83, 0xb1, - 0x9f, 0xc8, 0x07, 0xf9, 0x22, 0xf7, 0x84, 0x00, 0x30, 0x64, 0x1c, 0x52, 0x3a, 0xe4, 0x25, 0x5b, - 0x10, 0xc9, 0x01, 0xe8, 0x35, 0xac, 0x8c, 0xc8, 0x80, 0xe5, 0x5a, 0x5e, 0x25, 0x8b, 0x60, 0x53, - 0x08, 0xfa, 0x19, 0x94, 0xae, 0x3c, 0xea, 0xf3, 0xca, 0x2e, 0x2d, 0x0c, 0xcf, 0x30, 0xfa, 0x07, - 0xa8, 0xb0, 0xa5, 0x4d, 0x03, 0x17, 0x13, 0x27, 0x41, 0x2f, 0xa1, 0x32, 0xf2, 0x02, 0x6b, 0x81, - 0x9d, 0x50, 0x1e, 0x79, 0x81, 0xf8, 0xe4, 0x20, 0xfb, 0x3a, 0x03, 0xe5, 0x3f, 0x06, 0xb2, 0xaf, - 0xc5, 0xa7, 0x1e, 0x41, 0xf9, 0x90, 0xf5, 0x4b, 0x7c, 0xf3, 0xed, 0x41, 0x91, 0x37, 0x4f, 0xd2, - 0x21, 0x9a, 0xc1, 0x72, 0x33, 0x2c, 0x0c, 0xa6, 0x2b, 0x3c, 0xaf, 0xae, 0xf0, 0x67, 0x50, 0x0d, - 0xbd, 0x6b, 0xe2, 0x5b, 0xc3, 0xc8, 0x76, 0xb2, 0xcd, 0x91, 0xc7, 0x6b, 0x5c, 0x7a, 0x2c, 0x85, - 0xfa, 0x39, 0xd4, 0x8e, 0xe8, 0xc8, 0x0b, 0xec, 0x20, 0xe1, 0xa4, 0xb1, 0x52, 0xfa, 0x9f, 0xc2, - 0x32, 0xf7, 0x10, 0xd7, 0x72, 0x7c, 0x45, 0x7d, 0x3a, 0x2f, 0x8d, 0x59, 0xd4, 0x58, 0x02, 0x74, - 0x1f, 0xd6, 0x79, 0xd7, 0xd0, 0xcd, 0x56, 0x18, 0xba, 0x80, 0xf5, 0x81, 0xf4, 0x64, 0x65, 0xb4, - 0x6c, 0x6a, 0x3f, 0x9e, 0x47, 0x3b, 0x2f, 0x30, 0x5c, 0x1d, 0xcc, 0x68, 0xf4, 0xdf, 0xc3, 0x2a, - 0xf7, 0x76, 0xc8, 0x3a, 0x9b, 0xeb, 0x04, 0x9d, 0xc0, 0x1a, 0xcf, 0x3c, 0x0d, 0x5c, 0x2b, 0x22, - 0x4e, 0x22, 0x1d, 0xcd, 0x6d, 0x70, 0x94, 0x72, 0xe3, 0x8a, 0xaf, 0xd4, 0xfe, 0x19, 0x54, 0x7d, - 0x3b, 0x70, 0xc7, 0xac, 0xd1, 0x7a, 0xe7, 0x05, 0xbc, 0xab, 0x2c, 0xec, 0x95, 0xf1, 0x5a, 0x2a, - 0x3d, 0x65, 0x42, 0xfd, 0x9b, 0x1c, 0x6c, 0xc9, 0xf0, 0x08, 0x0f, 0x04, 0x93, 0xdf, 0x8d, 0x49, - 0xcc, 0xd6, 0x4e, 0x91, 0x77, 0x69, 0x32, 0x80, 0xc7, 0x1f, 0xed, 0xb0, 0xb0, 0xb0, 0x45, 0xaf, - 0xa0, 0x34, 0x14, 0xdd, 0xad, 0x70, 0x57, 0x39, 0x78, 0xfa, 0x7f, 0xba, 0x60, 0x9c, 0x01, 0xd8, - 0xc1, 0x24, 0xfa, 0x42, 0x47, 0xe4, 0x82, 0x57, 0xfd, 0x23, 0x27, 0x8a, 0x9a, 0x37, 0xbc, 0xea, - 0x29, 0x23, 0xfd, 0x9f, 0x45, 0xb8, 0x7f, 0x63, 0x56, 0x71, 0x48, 0x83, 0x98, 0xa0, 0xaf, 0x40, - 0x1b, 0xda, 0x4e, 0xf6, 0x87, 0x03, 0x3f, 0xb9, 0xc4, 0x12, 0xf9, 0xc1, 0x62, 0x6f, 0x3e, 0xbc, - 0x3e, 0x9c, 0x19, 0xc7, 0xe8, 0xd7, 0xb0, 0x95, 0xb6, 0x29, 0x33, 0xb4, 0x22, 0x01, 0x7b, 0xf3, - 0x68, 0x6f, 0x5e, 0x1d, 0x78, 0x33, 0x65, 0x51, 0xc9, 0x7b, 0xa0, 0xf9, 0xd4, 0xa5, 0x33, 0xc4, - 0x85, 0x3b, 0x12, 0xaf, 0x33, 0x06, 0x95, 0xf4, 0x1c, 0x36, 0x7c, 0xfb, 0x92, 0xf8, 0x33, 0xac, - 0x4b, 0x77, 0x64, 0xd5, 0x38, 0xc5, 0x8d, 0x58, 0x59, 0xf6, 0x67, 0x58, 0x8b, 0x77, 0x8d, 0x95, - 0x31, 0xa8, 0xa4, 0x97, 0xb0, 0x1d, 0xdb, 0x43, 0x62, 0xc5, 0xfc, 0x74, 0x57, 0xb8, 0xf9, 0xd5, - 0x56, 0x39, 0x78, 0x3e, 0x8f, 0xfa, 0xb6, 0x2b, 0x01, 0x6f, 0xc5, 0xb7, 0x5d, 0x14, 0x2e, 0x3c, - 0x14, 0x2b, 0x6f, 0x7a, 0xab, 0xa8, 0x8e, 0x4a, 0xdc, 0xd1, 0x0f, 0x3f, 0xba, 0x0e, 0xa7, 0xa7, - 0x05, 0x7e, 0xe0, 0xcd, 0x0a, 0x14, 0x47, 0x7b, 0x50, 0x24, 0x51, 0x44, 0x23, 0xde, 0xaf, 0x2a, - 0x27, 0x63, 0x14, 0x3a, 0xf5, 0x1e, 0xff, 0x27, 0x0a, 0x0b, 0x03, 0x7d, 0x08, 0x3b, 0x4d, 0x3b, - 0xc9, 0xa2, 0x14, 0xab, 0x38, 0x4e, 0x37, 0xe7, 0x29, 0x94, 0x22, 0xf1, 0x99, 0xae, 0xde, 0xb9, - 0x69, 0xb8, 0x6d, 0x73, 0xe3, 0x0c, 0xad, 0xbf, 0x87, 0x87, 0xb7, 0xfa, 0x91, 0xdb, 0xe5, 0x0d, - 0x94, 0x23, 0xf9, 0x9d, 0x7a, 0xfa, 0xd1, 0x82, 0x9e, 0x04, 0x0a, 0x4f, 0xf1, 0x9f, 0x11, 0x00, - 0xa5, 0xa7, 0xab, 0xc0, 0x8a, 0x6c, 0x70, 0xb4, 0x7b, 0xec, 0xfd, 0xf7, 0x0b, 0x03, 0x5f, 0x58, - 0xe7, 0x9d, 0x76, 0xeb, 0x8d, 0xd1, 0xbe, 0xd0, 0x72, 0xac, 0x8d, 0xc8, 0x46, 0x79, 0x36, 0xea, - 0x9a, 0xbd, 0x5e, 0xab, 0xd9, 0x36, 0xb4, 0x02, 0x02, 0x58, 0x96, 0x9a, 0x25, 0xd6, 0x32, 0x70, - 0xa8, 0x14, 0x14, 0x0f, 0xfe, 0x96, 0x83, 0x2a, 0x8f, 0xa1, 0x91, 0xfe, 0xa9, 0x88, 0xfe, 0x9a, - 0x83, 0xcd, 0x5b, 0xa6, 0x89, 0x0e, 0xe6, 0x3e, 0x7a, 0xe6, 0xe6, 0x7e, 0xe7, 0xe5, 0x9d, 0x30, - 0x62, 0xee, 0xfa, 0x93, 0x3f, 0x7c, 0xf3, 0x9f, 0x3f, 0xe7, 0x6b, 0xfa, 0x66, 0xf6, 0x97, 0x67, - 0xfc, 0xa5, 0x5c, 0x61, 0xe4, 0xcb, 0xdc, 0x67, 0xcd, 0x04, 0x76, 0x1c, 0x3a, 0x9a, 0xc3, 0xdc, - 0xdc, 0x9c, 0x9d, 0x4e, 0x37, 0xa2, 0x09, 0xed, 0xe6, 0x7e, 0xf5, 0x5a, 0x9a, 0xbb, 0x94, 0x9d, - 0xe9, 0x75, 0x1a, 0xb9, 0xfb, 0x2e, 0x09, 0xf8, 0xff, 0x92, 0xfb, 0x42, 0x65, 0x87, 0x5e, 0x7c, - 0xf3, 0xdf, 0xd0, 0x57, 0xe2, 0xeb, 0xbf, 0xb9, 0xdc, 0xe5, 0x32, 0xb7, 0x7d, 0xf9, 0xbf, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x83, 0x58, 0x40, 0x64, 0x9c, 0x15, 0x00, 0x00, + // 2281 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x59, 0x4b, 0x73, 0x23, 0x49, + 0xf1, 0x5f, 0xc9, 0x2f, 0x29, 0xf5, 0x6a, 0x97, 0x1f, 0xa3, 0xb1, 0xe7, 0xe1, 0xed, 0xfd, 0xcf, + 0x1f, 0xc7, 0x30, 0xd8, 0x8c, 0x67, 0x21, 0x96, 0x9d, 0x09, 0x40, 0x92, 0xdb, 0xb6, 0x62, 0x64, + 0x49, 0x5b, 0x92, 0xd7, 0x6b, 0x20, 0xe8, 0x68, 0xb7, 0x4a, 0x9a, 0x9e, 0x6d, 0x75, 0x35, 0xdd, + 0xad, 0x19, 0xfb, 0x4a, 0x04, 0x11, 0xdc, 0xb9, 0x73, 0xe4, 0x4e, 0x04, 0x5f, 0x81, 0x03, 0x47, + 0x62, 0xcf, 0xdc, 0xf8, 0x0c, 0x04, 0x47, 0xa2, 0x1e, 0xdd, 0x2a, 0x69, 0x2c, 0x8f, 0x4c, 0x70, + 0x52, 0x57, 0x66, 0xfe, 0x7e, 0x59, 0x95, 0x55, 0x59, 0x59, 0x55, 0x82, 0x67, 0x03, 0x4a, 0x07, + 0x2e, 0xd9, 0xb7, 0x5d, 0x3a, 0xea, 0xed, 0xbf, 0x73, 0x42, 0x87, 0x7a, 0xfb, 0xef, 0x9e, 0xef, + 0x3b, 0x43, 0x6b, 0x40, 0x4c, 0xcb, 0xf3, 0x68, 0x64, 0x45, 0x34, 0xd8, 0xf3, 0x03, 0x1a, 0x51, + 0xb4, 0x29, 0xac, 0xf7, 0xb8, 0xf5, 0x9e, 0xb0, 0xde, 0x7b, 0xf7, 0x7c, 0xeb, 0x81, 0x64, 0xb1, + 0x7c, 0x67, 0x5f, 0x62, 0x1c, 0xea, 0x85, 0x02, 0xb5, 0xf5, 0x64, 0x86, 0x8f, 0x01, 0xa1, 0x43, + 0x12, 0x05, 0xd7, 0xd2, 0x6c, 0x56, 0x57, 0x22, 0x72, 0x15, 0x99, 0x63, 0x56, 0x69, 0xfd, 0x74, + 0x86, 0xf5, 0x7b, 0x72, 0x69, 0xf6, 0x48, 0x44, 0x6c, 0xc5, 0xf6, 0x9e, 0xb4, 0x0d, 0x7c, 0x7b, + 0x3f, 0x8c, 0xac, 0x68, 0x14, 0x4e, 0x29, 0xa2, 0x6b, 0x9f, 0xec, 0xdb, 0xd4, 0x8d, 0x07, 0xba, + 0x55, 0x56, 0x15, 0xae, 0x15, 0xb9, 0xde, 0x40, 0x68, 0xf4, 0x7f, 0xa4, 0x61, 0xe5, 0x88, 0x58, + 0xd1, 0x28, 0x20, 0xe8, 0x0b, 0x58, 0x64, 0x06, 0xe5, 0xd4, 0x4e, 0x6a, 0xb7, 0x78, 0xf0, 0x7f, + 0x7b, 0x37, 0x47, 0x67, 0x4f, 0x9a, 0xef, 0x75, 0xaf, 0x7d, 0x82, 0x39, 0x02, 0x3d, 0x86, 0xdc, + 0xd0, 0xba, 0x32, 0x03, 0x12, 0x8e, 0xdc, 0x28, 0x2c, 0xa7, 0x77, 0x52, 0xbb, 0x4b, 0x18, 0x86, + 0xd6, 0x15, 0x16, 0x12, 0xfd, 0x5f, 0x29, 0x58, 0x64, 0xf6, 0x68, 0x1d, 0xb4, 0xee, 0x45, 0xdb, + 0x30, 0xcf, 0x9a, 0x9d, 0xb6, 0x51, 0xab, 0x1f, 0xd5, 0x8d, 0x43, 0xed, 0x13, 0x84, 0xa0, 0x78, + 0x54, 0xa9, 0x19, 0xe6, 0xa1, 0xd1, 0x35, 0x6a, 0xdd, 0x7a, 0xab, 0xa9, 0xa5, 0xd0, 0x26, 0xa0, + 0x46, 0xa5, 0x79, 0x78, 0x5a, 0xc1, 0xaf, 0x15, 0x79, 0x9a, 0xd9, 0x36, 0x5a, 0xc7, 0x2d, 0x45, + 0xb6, 0x80, 0xd6, 0xa0, 0xd4, 0xa8, 0x54, 0x8d, 0x86, 0x22, 0x5c, 0x64, 0x86, 0x5d, 0xe3, 0x9b, + 0xae, 0x22, 0x5b, 0x42, 0xdb, 0x70, 0xef, 0xb0, 0x55, 0x3b, 0x3b, 0x35, 0x9a, 0x5d, 0x73, 0x4a, + 0x99, 0x43, 0xf7, 0x61, 0xa3, 0x53, 0x39, 0x32, 0xcc, 0x8e, 0x51, 0xc1, 0xb5, 0x13, 0x45, 0xb5, + 0xcc, 0xba, 0x5d, 0x3f, 0xad, 0x1c, 0x1b, 0x66, 0x1b, 0xb7, 0xda, 0x06, 0xee, 0xd6, 0x8d, 0x8e, + 0xb6, 0x82, 0x8a, 0x00, 0x35, 0xdc, 0x6a, 0x9b, 0x27, 0xf5, 0x66, 0xb7, 0xa3, 0x65, 0xd1, 0x2a, + 0x14, 0xce, 0x8d, 0xaa, 0x02, 0x04, 0xbd, 0x09, 0xb9, 0x3a, 0x5b, 0x7b, 0x1d, 0x3a, 0x0a, 0x6c, + 0x82, 0x74, 0x28, 0x0c, 0xec, 0xd0, 0x14, 0xcb, 0x71, 0x14, 0x38, 0x3c, 0xd6, 0x59, 0x9c, 0x1b, + 0xd8, 0x21, 0x37, 0x3b, 0x0b, 0x1c, 0xb4, 0x0d, 0xd9, 0xb1, 0x3e, 0xcd, 0xf5, 0x19, 0x47, 0x2a, + 0xf5, 0x5f, 0xc3, 0x12, 0x37, 0x44, 0x65, 0x58, 0xb1, 0xa9, 0x17, 0x11, 0x2f, 0xe2, 0x1c, 0x79, + 0x1c, 0x37, 0xd1, 0x4b, 0x58, 0x0e, 0xb9, 0x37, 0x0e, 0xce, 0x1d, 0x7c, 0x36, 0x6b, 0x22, 0x95, + 0x8e, 0x61, 0x09, 0xd1, 0xff, 0x5e, 0x82, 0xe2, 0x91, 0x65, 0x93, 0x4a, 0xb2, 0x40, 0x51, 0x1d, + 0x0a, 0x97, 0x74, 0xe4, 0xf5, 0x1c, 0x6f, 0x60, 0xfa, 0xd4, 0xbd, 0xe6, 0xfe, 0x72, 0xb3, 0xd7, + 0x47, 0x55, 0x1a, 0xb7, 0xa9, 0x7b, 0x8d, 0xf3, 0x97, 0x4a, 0x0b, 0x35, 0x41, 0xeb, 0xf7, 0xcc, + 0x49, 0xb6, 0xf4, 0x1d, 0xd8, 0x8a, 0xfd, 0x9e, 0xda, 0x46, 0xa7, 0x90, 0x75, 0x2d, 0xaf, 0x37, + 0xb4, 0x82, 0x6f, 0xc3, 0xf2, 0xc2, 0xce, 0xc2, 0x6e, 0xee, 0x60, 0x7f, 0xe6, 0xb2, 0x9d, 0x18, + 0xd5, 0x5e, 0x43, 0xe2, 0xf0, 0x98, 0x01, 0x3d, 0x04, 0x08, 0xa8, 0xeb, 0x9a, 0x96, 0x37, 0x70, + 0x49, 0x79, 0x71, 0x27, 0xb5, 0x9b, 0xc6, 0x59, 0x26, 0xa9, 0x30, 0x01, 0x9b, 0x18, 0xdf, 0xf2, + 0xa4, 0x76, 0x89, 0x6b, 0x33, 0xbe, 0xe5, 0x09, 0xe5, 0x43, 0x80, 0xc8, 0x71, 0x23, 0xa9, 0x5d, + 0x16, 0x58, 0x26, 0x11, 0xea, 0xe7, 0xb0, 0x9e, 0xa4, 0xb1, 0x69, 0x53, 0xaf, 0xef, 0xf4, 0x88, + 0x67, 0x93, 0xf2, 0x0a, 0x37, 0x5c, 0x4b, 0x74, 0xb5, 0x44, 0x85, 0x7e, 0x04, 0x9b, 0x71, 0xd7, + 0x58, 0xb0, 0x14, 0x50, 0x86, 0x83, 0x36, 0x14, 0xad, 0x02, 0xab, 0x43, 0xf1, 0x2d, 0xbd, 0x36, + 0x5d, 0xe7, 0x5b, 0xe2, 0x3a, 0x6f, 0x28, 0xed, 0x95, 0xb3, 0x3c, 0x9f, 0xf5, 0x59, 0x81, 0x69, + 0x24, 0x96, 0xb8, 0xf0, 0x96, 0x5e, 0x8f, 0x9b, 0xa8, 0x05, 0xab, 0x21, 0x0d, 0x02, 0xfa, 0x5e, + 0x65, 0x83, 0xb9, 0xd9, 0x34, 0x01, 0x56, 0x08, 0x4f, 0x41, 0xb3, 0xbc, 0x01, 0x09, 0x54, 0xbe, + 0xdc, 0xdc, 0x7c, 0x25, 0x8e, 0x55, 0xe8, 0x3a, 0xb0, 0x16, 0x8e, 0x02, 0x3f, 0x70, 0x42, 0xa2, + 0x32, 0xe6, 0xe7, 0x66, 0x44, 0x31, 0x5c, 0x21, 0xfd, 0x15, 0x94, 0x47, 0x5e, 0x8f, 0x04, 0x26, + 0xb9, 0xf2, 0x69, 0x48, 0x7a, 0x2a, 0x73, 0x61, 0x6e, 0xe6, 0x4d, 0xce, 0x61, 0x08, 0x0a, 0x85, + 0xfd, 0x2b, 0x40, 0x97, 0xee, 0x28, 0x08, 0x26, 0x79, 0x8b, 0x73, 0xf3, 0xae, 0x4a, 0xf4, 0x64, + 0x14, 0xde, 0x10, 0xab, 0xf7, 0x9e, 0x58, 0x13, 0x71, 0x2d, 0xcd, 0x1f, 0x85, 0x18, 0x3e, 0x96, + 0x6d, 0xfd, 0x6d, 0x05, 0x32, 0x71, 0x8a, 0xa0, 0x13, 0x59, 0x18, 0x16, 0x38, 0xe5, 0xe7, 0x77, + 0xcc, 0x30, 0xb5, 0x50, 0xbc, 0x82, 0x8c, 0x4f, 0x43, 0x87, 0xe9, 0x79, 0x7e, 0xe5, 0x0e, 0x76, + 0x66, 0xb1, 0xb5, 0xa5, 0x1d, 0x4e, 0x10, 0xfa, 0x5f, 0x96, 0xc7, 0x55, 0xe4, 0xac, 0xf9, 0xba, + 0xd9, 0x3a, 0x6f, 0x9a, 0x71, 0x8d, 0xd0, 0x3e, 0x41, 0x79, 0xc8, 0x34, 0x8c, 0xa3, 0xae, 0x69, + 0x5c, 0x18, 0x5a, 0x0a, 0x15, 0x20, 0x8b, 0xeb, 0xc7, 0x27, 0xa2, 0x99, 0x46, 0x65, 0x58, 0xe7, + 0xca, 0xd6, 0x91, 0x19, 0x1b, 0x55, 0x71, 0xeb, 0x5c, 0x5b, 0x60, 0xdb, 0xbe, 0x30, 0x9c, 0x56, + 0x2d, 0x32, 0x55, 0x0c, 0x4a, 0xb8, 0xb8, 0x6a, 0x09, 0x6d, 0xc1, 0x66, 0x82, 0x9a, 0xd4, 0x2d, + 0x33, 0xd8, 0x69, 0xfd, 0xb0, 0xdd, 0xaa, 0x37, 0xbb, 0x66, 0xd5, 0xe8, 0x9e, 0x1b, 0x46, 0x93, + 0x69, 0x59, 0xc9, 0xc8, 0x43, 0xa6, 0xd9, 0xea, 0x18, 0x66, 0xb7, 0xde, 0xd6, 0x32, 0xac, 0x8f, + 0x67, 0xed, 0xb6, 0x81, 0xcd, 0x46, 0xbd, 0xad, 0x65, 0x59, 0xb3, 0xd1, 0x3a, 0x97, 0x4d, 0x60, + 0xe5, 0xe5, 0xb4, 0x75, 0xd6, 0x3d, 0xe1, 0xbd, 0xd2, 0x72, 0xa8, 0x04, 0x39, 0xd1, 0xe6, 0xfe, + 0xb4, 0x3c, 0xd2, 0x20, 0x2f, 0x04, 0x35, 0xa3, 0xd9, 0x35, 0xb0, 0x56, 0x40, 0x1b, 0xb0, 0xca, + 0xe9, 0xab, 0xad, 0x6e, 0xb7, 0x75, 0x2a, 0x0d, 0x8b, 0x2c, 0x5e, 0xaa, 0x98, 0xf3, 0x95, 0x58, + 0x85, 0x55, 0xa5, 0x92, 0x44, 0x4b, 0x46, 0x6d, 0x5c, 0x18, 0x66, 0xb7, 0xd5, 0x36, 0xab, 0xad, + 0xb3, 0xe6, 0x61, 0x05, 0x5f, 0x68, 0xab, 0x13, 0x2a, 0x31, 0xea, 0x5a, 0x0b, 0x37, 0x0d, 0xac, + 0x21, 0xf4, 0x00, 0xca, 0x89, 0x4a, 0x32, 0x26, 0xc0, 0xb5, 0x24, 0xfc, 0x4c, 0xcb, 0x3f, 0x24, + 0x6e, 0x7d, 0x1c, 0xc8, 0x0f, 0xdc, 0x6d, 0x4c, 0xea, 0x26, 0xfc, 0x6d, 0xa2, 0x87, 0x70, 0x7f, + 0xac, 0x9b, 0x76, 0x78, 0x6f, 0x3c, 0xab, 0xd3, 0x1e, 0xcb, 0xe8, 0x31, 0x6c, 0xab, 0xf3, 0x6c, + 0x8a, 0x29, 0x88, 0x67, 0x4c, 0xbb, 0x8f, 0x76, 0xe0, 0xc1, 0xc4, 0x94, 0x4e, 0x5b, 0x6c, 0xb1, + 0x80, 0x0a, 0x8a, 0x0a, 0x36, 0xbb, 0xb8, 0x72, 0xcc, 0x8a, 0xfd, 0x36, 0x8b, 0xbe, 0xc4, 0x29, + 0xe2, 0x07, 0xfc, 0xc4, 0x12, 0x8f, 0xbd, 0x7d, 0xd6, 0xae, 0x37, 0xb4, 0x87, 0xec, 0xc4, 0x32, + 0xee, 0x9e, 0x10, 0x3e, 0x62, 0xf8, 0xa3, 0x16, 0x36, 0x4e, 0x8c, 0xca, 0xa1, 0x79, 0xcc, 0x0f, + 0x34, 0x8d, 0x8a, 0xf6, 0x98, 0x1d, 0x2b, 0x6a, 0x27, 0xf5, 0xa6, 0x79, 0xdc, 0xac, 0x74, 0x4f, + 0x18, 0xe5, 0x0e, 0xf3, 0xcf, 0x45, 0x9c, 0xf7, 0xb8, 0xd5, 0x64, 0xd2, 0x4f, 0x19, 0x9e, 0x4b, + 0x05, 0xb3, 0x14, 0xeb, 0xfa, 0x2b, 0xc8, 0x37, 0xa8, 0xcd, 0x93, 0xb2, 0xee, 0xf5, 0x29, 0x7a, + 0x06, 0x2b, 0xae, 0x15, 0x99, 0xae, 0x37, 0x90, 0xa5, 0x7c, 0x2d, 0xce, 0x41, 0x96, 0xa3, 0x7b, + 0x0d, 0x2b, 0x6a, 0x78, 0x03, 0xbc, 0xec, 0xf2, 0x5f, 0xfd, 0x73, 0xc8, 0xb4, 0x03, 0xea, 0x93, + 0x20, 0xba, 0x46, 0x08, 0x16, 0x3d, 0x6b, 0x48, 0xe4, 0xa9, 0x85, 0x7f, 0xa3, 0x75, 0x58, 0x7a, + 0x67, 0xb9, 0x23, 0x22, 0x8f, 0x2a, 0xa2, 0xa1, 0xff, 0x6e, 0x01, 0x34, 0xc3, 0x8b, 0x9c, 0xe8, + 0x5a, 0x39, 0x49, 0x68, 0xb0, 0x30, 0x74, 0x7a, 0x12, 0xcd, 0x3e, 0xd1, 0x26, 0x2c, 0xbb, 0xd4, + 0xb6, 0xdc, 0x18, 0x2d, 0x5b, 0x68, 0x07, 0x72, 0x3d, 0x12, 0xda, 0x81, 0xe3, 0xf3, 0xad, 0x62, + 0x41, 0x9c, 0x92, 0x14, 0x11, 0x73, 0x1b, 0xda, 0x34, 0x88, 0xcb, 0xb4, 0x68, 0xa0, 0x47, 0x00, + 0x4a, 0x9d, 0x14, 0x35, 0x5a, 0x91, 0x30, 0x7d, 0x44, 0x7d, 0xc7, 0xb6, 0x5c, 0x27, 0xba, 0x96, + 0x55, 0x5a, 0x91, 0x7c, 0x78, 0xd6, 0x59, 0xf9, 0xaf, 0xcf, 0x3a, 0x55, 0xc8, 0xba, 0x32, 0xea, + 0x61, 0x39, 0xc3, 0xcf, 0x26, 0x33, 0x69, 0xd4, 0xe9, 0xc1, 0x63, 0x18, 0xfa, 0x39, 0x80, 0x2f, + 0x62, 0xef, 0x90, 0xb0, 0x9c, 0xe5, 0x24, 0xb3, 0x37, 0x4c, 0x39, 0x4b, 0x58, 0xc1, 0xe8, 0xbf, + 0x4f, 0xc3, 0x7a, 0xc7, 0xea, 0x93, 0x0e, 0xb1, 0x02, 0xfb, 0x8d, 0x32, 0x17, 0x5f, 0xc0, 0x92, + 0xd5, 0x1b, 0xb9, 0x91, 0x3c, 0xed, 0xcf, 0x53, 0x27, 0x04, 0x80, 0x21, 0x43, 0x9f, 0xd2, 0x3e, + 0x9f, 0xb2, 0x39, 0x91, 0x1c, 0x80, 0x5e, 0xc1, 0xca, 0x90, 0xf4, 0x58, 0xac, 0x65, 0x29, 0x99, + 0x07, 0x1b, 0x43, 0xd0, 0x4f, 0x21, 0xf3, 0xce, 0xa1, 0x2e, 0x9f, 0xd9, 0xc5, 0xb9, 0xe1, 0x09, + 0x46, 0x7f, 0x0f, 0x39, 0xb6, 0xb4, 0xa9, 0x37, 0xc0, 0xc4, 0x8e, 0xd0, 0x0b, 0xc8, 0x0d, 0x1d, + 0xcf, 0x9c, 0x23, 0x13, 0xb2, 0x43, 0xc7, 0x13, 0x9f, 0x1c, 0x64, 0x5d, 0x25, 0xa0, 0xf4, 0x6d, + 0x20, 0xeb, 0x4a, 0x7c, 0xea, 0x01, 0x64, 0x6b, 0xec, 0x32, 0xc6, 0x93, 0x6f, 0x17, 0x96, 0xf8, + 0xcd, 0x4c, 0x3a, 0x44, 0x13, 0x58, 0x6e, 0x86, 0x85, 0xc1, 0x78, 0x85, 0xa7, 0xd5, 0x15, 0xfe, + 0x04, 0x8a, 0xbe, 0x73, 0x45, 0x5c, 0xb3, 0x1f, 0x58, 0x76, 0x92, 0x1c, 0x69, 0x5c, 0xe0, 0xd2, + 0x23, 0x29, 0xd4, 0xcf, 0xa0, 0x7c, 0x48, 0x87, 0x8e, 0x67, 0x79, 0x11, 0x27, 0x0d, 0x95, 0xa9, + 0xff, 0x09, 0x2c, 0x73, 0x0f, 0x61, 0x39, 0xc5, 0x57, 0xd4, 0xa7, 0xb3, 0xc2, 0x98, 0xf4, 0x1a, + 0x4b, 0x80, 0xee, 0x42, 0x89, 0xdf, 0x1a, 0xda, 0xc9, 0x0a, 0x43, 0x17, 0x50, 0xea, 0x49, 0x4f, + 0x66, 0x42, 0xcb, 0x86, 0xf6, 0xc3, 0x59, 0xb4, 0xb3, 0x3a, 0x86, 0x8b, 0xbd, 0x09, 0x8d, 0xfe, + 0xa7, 0x14, 0x64, 0x6a, 0x01, 0xf5, 0x4f, 0x1c, 0x2f, 0xfa, 0x5f, 0x5e, 0x43, 0x26, 0x77, 0x89, + 0xf4, 0x07, 0xbb, 0xc4, 0x3e, 0xac, 0x39, 0x43, 0x9f, 0x06, 0x91, 0xe5, 0xd9, 0x64, 0x3a, 0xd0, + 0x68, 0xac, 0x4a, 0xa2, 0xfd, 0x35, 0xac, 0xc5, 0xfd, 0x54, 0x03, 0xfd, 0x33, 0x00, 0x3b, 0xa0, + 0xbe, 0xf9, 0x86, 0xc9, 0x65, 0xb0, 0x67, 0xa6, 0x6f, 0x4c, 0x80, 0xb3, 0x76, 0x4c, 0xa5, 0xff, + 0x18, 0x4a, 0x09, 0x6f, 0xdb, 0x0a, 0xac, 0x61, 0x88, 0x3e, 0x83, 0x82, 0x15, 0xfa, 0xc4, 0x8e, + 0xcc, 0x80, 0x39, 0x11, 0xb4, 0x69, 0x9c, 0x17, 0x42, 0xcc, 0x65, 0xfa, 0x77, 0x29, 0xc8, 0xf3, + 0x79, 0xaa, 0xb1, 0x3b, 0xe1, 0x55, 0x84, 0x8e, 0xa1, 0xc0, 0xd7, 0x2c, 0xf5, 0x06, 0x66, 0x40, + 0xec, 0x48, 0x06, 0x6f, 0xe6, 0xd5, 0x50, 0x49, 0x14, 0x9c, 0x73, 0x95, 0xac, 0x79, 0x02, 0x45, + 0xd7, 0xf2, 0x06, 0x23, 0x76, 0x3f, 0x15, 0xc3, 0x4a, 0xef, 0x2c, 0xec, 0x66, 0x71, 0x21, 0x96, + 0xf2, 0xbe, 0xa2, 0x0e, 0xac, 0x8e, 0x47, 0x6e, 0xfa, 0xbc, 0xeb, 0xf2, 0xc0, 0xf7, 0xbd, 0x8f, + 0x05, 0x40, 0x8e, 0x14, 0x97, 0xec, 0x49, 0x01, 0x1b, 0xd5, 0xba, 0x8c, 0x2e, 0xe1, 0xa3, 0xc3, + 0xe4, 0x37, 0x23, 0x12, 0xb2, 0x54, 0x5e, 0xe2, 0x17, 0x64, 0x39, 0xaa, 0x87, 0xb7, 0x5e, 0x78, + 0xb1, 0xb0, 0x45, 0x2f, 0x21, 0xd3, 0x17, 0x2f, 0x19, 0x62, 0x0c, 0xb9, 0x83, 0xc7, 0x1f, 0x79, + 0xf1, 0xc0, 0x09, 0x80, 0x2d, 0x46, 0x71, 0x47, 0xb7, 0x45, 0x80, 0xf9, 0xda, 0xb8, 0x65, 0x31, + 0xaa, 0x93, 0x81, 0xf3, 0x8e, 0xd2, 0xd2, 0xff, 0xba, 0x02, 0x1b, 0x53, 0xa3, 0x0a, 0x7d, 0xea, + 0x85, 0x04, 0x7d, 0x05, 0x5a, 0xdf, 0xb2, 0x89, 0xf2, 0x58, 0x14, 0x2f, 0xa2, 0xff, 0x9f, 0xef, + 0x08, 0x8e, 0x4b, 0xfd, 0x89, 0x76, 0x88, 0x7e, 0x09, 0xeb, 0xf1, 0xad, 0x71, 0x82, 0x56, 0x04, + 0x60, 0x77, 0x16, 0xed, 0x74, 0x25, 0xc7, 0x6b, 0x31, 0x8b, 0x4a, 0xde, 0x01, 0xcd, 0xa5, 0x03, + 0x3a, 0x41, 0xbc, 0x70, 0x47, 0xe2, 0x12, 0x63, 0x50, 0x49, 0xcf, 0x60, 0xd5, 0xb5, 0x2e, 0x89, + 0x3b, 0xc1, 0xba, 0x78, 0x47, 0x56, 0x8d, 0x53, 0x4c, 0xf5, 0x75, 0xea, 0x21, 0x2e, 0x2c, 0x2f, + 0xdd, 0xb5, 0xaf, 0x8c, 0x41, 0x25, 0xfd, 0x06, 0xd6, 0xfb, 0x23, 0xd7, 0x35, 0xa7, 0x98, 0xf9, + 0x85, 0xf4, 0x96, 0x49, 0xeb, 0x4e, 0xd0, 0x60, 0xc4, 0x38, 0x26, 0x65, 0xe8, 0x12, 0x36, 0x43, + 0xab, 0x4f, 0xcc, 0x90, 0x97, 0x71, 0x95, 0x7b, 0x99, 0x73, 0x3f, 0x9b, 0xc5, 0x7d, 0x53, 0xed, + 0xc7, 0xeb, 0xe1, 0x4d, 0x27, 0x82, 0x01, 0x6c, 0x8b, 0x35, 0x3d, 0x3e, 0x3e, 0xa8, 0x8e, 0x32, + 0xb7, 0x67, 0xef, 0x54, 0x59, 0xc0, 0xf7, 0x9d, 0x49, 0x81, 0xe2, 0xc8, 0x84, 0x0d, 0x65, 0x73, + 0x50, 0x5c, 0xe4, 0xb8, 0x8b, 0xef, 0x7f, 0x74, 0x83, 0x50, 0x17, 0xa2, 0x7d, 0xc3, 0xbe, 0x5b, + 0x87, 0xc2, 0xc4, 0xbb, 0x29, 0xbf, 0xb7, 0xdf, 0x92, 0x9d, 0xe7, 0xe4, 0xf2, 0x30, 0xb6, 0xc5, + 0xf9, 0xf7, 0x4a, 0x8b, 0x95, 0x6b, 0x12, 0x04, 0x34, 0xe0, 0x8f, 0x28, 0x4a, 0xb9, 0x0e, 0x7c, + 0x7b, 0xaf, 0xc3, 0xdf, 0x5e, 0xb1, 0x30, 0xd0, 0xfb, 0xb0, 0x55, 0xb5, 0xa2, 0x24, 0xa2, 0x22, + 0x97, 0xc3, 0x78, 0x8b, 0x3a, 0x81, 0x4c, 0x20, 0x3e, 0xe3, 0x1c, 0x9e, 0x39, 0x65, 0x37, 0x6d, + 0x71, 0x38, 0x41, 0xeb, 0x6f, 0x61, 0xfb, 0x46, 0x3f, 0x72, 0xd3, 0x78, 0x0d, 0xd9, 0x40, 0x7e, + 0xc7, 0x9e, 0x7e, 0x30, 0xa7, 0x27, 0x81, 0xc2, 0x63, 0xfc, 0x53, 0x02, 0xa0, 0x3c, 0x34, 0xe4, + 0x60, 0x45, 0xde, 0xba, 0xb5, 0x4f, 0xd8, 0xa5, 0xe4, 0x6b, 0x03, 0x5f, 0x98, 0x67, 0xcd, 0x46, + 0xfd, 0xb5, 0xd1, 0xb8, 0xd0, 0x52, 0xec, 0x6e, 0x9b, 0xb4, 0xd2, 0xac, 0xd5, 0x6e, 0x75, 0x3a, + 0xf5, 0x6a, 0xc3, 0xd0, 0x16, 0x10, 0xc0, 0xb2, 0xd4, 0x2c, 0xb2, 0x7b, 0x2c, 0x87, 0x4a, 0xc1, + 0xd2, 0xc1, 0x9f, 0x53, 0x50, 0xe4, 0x7d, 0xa8, 0xc4, 0x0f, 0xf4, 0xe8, 0x8f, 0x29, 0x58, 0xbb, + 0x61, 0x98, 0xe8, 0x60, 0x66, 0xb9, 0x9f, 0x19, 0xfb, 0xad, 0x17, 0x77, 0xc2, 0x88, 0xb1, 0xeb, + 0x8f, 0x7e, 0xfb, 0xdd, 0x3f, 0xff, 0x90, 0x2e, 0xeb, 0x6b, 0xc9, 0xdf, 0x07, 0xe1, 0x97, 0x72, + 0xa9, 0x92, 0x2f, 0x53, 0x4f, 0xab, 0x11, 0x6c, 0xd9, 0x74, 0x38, 0x83, 0xb9, 0xba, 0x36, 0x39, + 0x9c, 0x76, 0x40, 0x23, 0xda, 0x4e, 0xfd, 0xe2, 0x95, 0x34, 0x1f, 0x50, 0x56, 0x2e, 0xf7, 0x68, + 0x30, 0xd8, 0x1f, 0x10, 0x8f, 0xbf, 0xc4, 0xef, 0x0b, 0x95, 0xe5, 0x3b, 0xe1, 0xf4, 0x9f, 0x00, + 0x2f, 0xc5, 0xd7, 0xbf, 0x53, 0xa9, 0xcb, 0x65, 0x6e, 0xfb, 0xe2, 0x3f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xe5, 0x59, 0xbe, 0xb0, 0xe8, 0x18, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/text_annotation.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/text_annotation.pb.go new file mode 100644 index 000000000..dc342d827 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/text_annotation.pb.go @@ -0,0 +1,538 @@ +// Code generated by protoc-gen-go. +// source: google/cloud/vision/v1/text_annotation.proto +// DO NOT EDIT! + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Enum to denote the type of break found. New line, space etc. +type TextAnnotation_DetectedBreak_BreakType int32 + +const ( + // Unknown break label type. + TextAnnotation_DetectedBreak_UNKNOWN TextAnnotation_DetectedBreak_BreakType = 0 + // Regular space. + TextAnnotation_DetectedBreak_SPACE TextAnnotation_DetectedBreak_BreakType = 1 + // Sure space (very wide). + TextAnnotation_DetectedBreak_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 2 + // Line-wrapping break. + TextAnnotation_DetectedBreak_EOL_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 3 + // End-line hyphen that is not present in text; does + TextAnnotation_DetectedBreak_HYPHEN TextAnnotation_DetectedBreak_BreakType = 4 + // not co-occur with SPACE, LEADER_SPACE, or + // LINE_BREAK. + // Line break that ends a paragraph. + TextAnnotation_DetectedBreak_LINE_BREAK TextAnnotation_DetectedBreak_BreakType = 5 +) + +var TextAnnotation_DetectedBreak_BreakType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SPACE", + 2: "SURE_SPACE", + 3: "EOL_SURE_SPACE", + 4: "HYPHEN", + 5: "LINE_BREAK", +} +var TextAnnotation_DetectedBreak_BreakType_value = map[string]int32{ + "UNKNOWN": 0, + "SPACE": 1, + "SURE_SPACE": 2, + "EOL_SURE_SPACE": 3, + "HYPHEN": 4, + "LINE_BREAK": 5, +} + +func (x TextAnnotation_DetectedBreak_BreakType) String() string { + return proto.EnumName(TextAnnotation_DetectedBreak_BreakType_name, int32(x)) +} +func (TextAnnotation_DetectedBreak_BreakType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 1, 0} +} + +// Type of a block (text, image etc) as identified by OCR. +type Block_BlockType int32 + +const ( + // Unknown block type. + Block_UNKNOWN Block_BlockType = 0 + // Regular text block. + Block_TEXT Block_BlockType = 1 + // Table block. + Block_TABLE Block_BlockType = 2 + // Image block. + Block_PICTURE Block_BlockType = 3 + // Horizontal/vertical line box. + Block_RULER Block_BlockType = 4 + // Barcode block. + Block_BARCODE Block_BlockType = 5 +) + +var Block_BlockType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "TEXT", + 2: "TABLE", + 3: "PICTURE", + 4: "RULER", + 5: "BARCODE", +} +var Block_BlockType_value = map[string]int32{ + "UNKNOWN": 0, + "TEXT": 1, + "TABLE": 2, + "PICTURE": 3, + "RULER": 4, + "BARCODE": 5, +} + +func (x Block_BlockType) String() string { + return proto.EnumName(Block_BlockType_name, int32(x)) +} +func (Block_BlockType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 0} } + +// TextAnnotation contains a structured representation of OCR extracted text. +// The hierarchy of an OCR extracted text structure is like this: +// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol +// Each structural component, starting from Page, may further have their own +// properties. Properties describe detected languages, breaks etc.. Please +// refer to the [google.cloud.vision.v1.TextAnnotation.TextProperty][google.cloud.vision.v1.TextAnnotation.TextProperty] message +// definition below for more detail. +type TextAnnotation struct { + // List of pages detected by OCR. + Pages []*Page `protobuf:"bytes,1,rep,name=pages" json:"pages,omitempty"` + // UTF-8 text detected on the pages. + Text string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"` +} + +func (m *TextAnnotation) Reset() { *m = TextAnnotation{} } +func (m *TextAnnotation) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation) ProtoMessage() {} +func (*TextAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *TextAnnotation) GetPages() []*Page { + if m != nil { + return m.Pages + } + return nil +} + +func (m *TextAnnotation) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +// Detected language for a structural component. +type TextAnnotation_DetectedLanguage struct { + // The BCP-47 language code, such as "en-US" or "sr-Latn". For more + // information, see + // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // Confidence of detected language. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *TextAnnotation_DetectedLanguage) Reset() { *m = TextAnnotation_DetectedLanguage{} } +func (m *TextAnnotation_DetectedLanguage) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_DetectedLanguage) ProtoMessage() {} +func (*TextAnnotation_DetectedLanguage) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +func (m *TextAnnotation_DetectedLanguage) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *TextAnnotation_DetectedLanguage) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Detected start or end of a structural component. +type TextAnnotation_DetectedBreak struct { + Type TextAnnotation_DetectedBreak_BreakType `protobuf:"varint,1,opt,name=type,enum=google.cloud.vision.v1.TextAnnotation_DetectedBreak_BreakType" json:"type,omitempty"` + // True if break prepends the element. + IsPrefix bool `protobuf:"varint,2,opt,name=is_prefix,json=isPrefix" json:"is_prefix,omitempty"` +} + +func (m *TextAnnotation_DetectedBreak) Reset() { *m = TextAnnotation_DetectedBreak{} } +func (m *TextAnnotation_DetectedBreak) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_DetectedBreak) ProtoMessage() {} +func (*TextAnnotation_DetectedBreak) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 1} } + +func (m *TextAnnotation_DetectedBreak) GetType() TextAnnotation_DetectedBreak_BreakType { + if m != nil { + return m.Type + } + return TextAnnotation_DetectedBreak_UNKNOWN +} + +func (m *TextAnnotation_DetectedBreak) GetIsPrefix() bool { + if m != nil { + return m.IsPrefix + } + return false +} + +// Additional information detected on the structural component. +type TextAnnotation_TextProperty struct { + // A list of detected languages together with confidence. + DetectedLanguages []*TextAnnotation_DetectedLanguage `protobuf:"bytes,1,rep,name=detected_languages,json=detectedLanguages" json:"detected_languages,omitempty"` + // Detected start or end of a text segment. + DetectedBreak *TextAnnotation_DetectedBreak `protobuf:"bytes,2,opt,name=detected_break,json=detectedBreak" json:"detected_break,omitempty"` +} + +func (m *TextAnnotation_TextProperty) Reset() { *m = TextAnnotation_TextProperty{} } +func (m *TextAnnotation_TextProperty) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_TextProperty) ProtoMessage() {} +func (*TextAnnotation_TextProperty) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 2} } + +func (m *TextAnnotation_TextProperty) GetDetectedLanguages() []*TextAnnotation_DetectedLanguage { + if m != nil { + return m.DetectedLanguages + } + return nil +} + +func (m *TextAnnotation_TextProperty) GetDetectedBreak() *TextAnnotation_DetectedBreak { + if m != nil { + return m.DetectedBreak + } + return nil +} + +// Detected page from OCR. +type Page struct { + // Additional information detected on the page. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // Page width in pixels. + Width int32 `protobuf:"varint,2,opt,name=width" json:"width,omitempty"` + // Page height in pixels. + Height int32 `protobuf:"varint,3,opt,name=height" json:"height,omitempty"` + // List of blocks of text, images etc on this page. + Blocks []*Block `protobuf:"bytes,4,rep,name=blocks" json:"blocks,omitempty"` +} + +func (m *Page) Reset() { *m = Page{} } +func (m *Page) String() string { return proto.CompactTextString(m) } +func (*Page) ProtoMessage() {} +func (*Page) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *Page) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Page) GetWidth() int32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Page) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Page) GetBlocks() []*Block { + if m != nil { + return m.Blocks + } + return nil +} + +// Logical element on the page. +type Block struct { + // Additional information detected for the block. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the block. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of paragraphs in this block (if this blocks is of type text). + Paragraphs []*Paragraph `protobuf:"bytes,3,rep,name=paragraphs" json:"paragraphs,omitempty"` + // Detected block type (text, image etc) for this block. + BlockType Block_BlockType `protobuf:"varint,4,opt,name=block_type,json=blockType,enum=google.cloud.vision.v1.Block_BlockType" json:"block_type,omitempty"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *Block) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Block) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Block) GetParagraphs() []*Paragraph { + if m != nil { + return m.Paragraphs + } + return nil +} + +func (m *Block) GetBlockType() Block_BlockType { + if m != nil { + return m.BlockType + } + return Block_UNKNOWN +} + +// Structural unit of text representing a number of words in certain order. +type Paragraph struct { + // Additional information detected for the paragraph. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the paragraph. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of words in this paragraph. + Words []*Word `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"` +} + +func (m *Paragraph) Reset() { *m = Paragraph{} } +func (m *Paragraph) String() string { return proto.CompactTextString(m) } +func (*Paragraph) ProtoMessage() {} +func (*Paragraph) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *Paragraph) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Paragraph) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Paragraph) GetWords() []*Word { + if m != nil { + return m.Words + } + return nil +} + +// A word representation. +type Word struct { + // Additional information detected for the word. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the word. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of symbols in the word. + // The order of the symbols follows the natural reading order. + Symbols []*Symbol `protobuf:"bytes,3,rep,name=symbols" json:"symbols,omitempty"` +} + +func (m *Word) Reset() { *m = Word{} } +func (m *Word) String() string { return proto.CompactTextString(m) } +func (*Word) ProtoMessage() {} +func (*Word) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *Word) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Word) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Word) GetSymbols() []*Symbol { + if m != nil { + return m.Symbols + } + return nil +} + +// A single symbol representation. +type Symbol struct { + // Additional information detected for the symbol. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the symbol. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // The actual UTF-8 representation of the symbol. + Text string `protobuf:"bytes,3,opt,name=text" json:"text,omitempty"` +} + +func (m *Symbol) Reset() { *m = Symbol{} } +func (m *Symbol) String() string { return proto.CompactTextString(m) } +func (*Symbol) ProtoMessage() {} +func (*Symbol) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *Symbol) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Symbol) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Symbol) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +func init() { + proto.RegisterType((*TextAnnotation)(nil), "google.cloud.vision.v1.TextAnnotation") + proto.RegisterType((*TextAnnotation_DetectedLanguage)(nil), "google.cloud.vision.v1.TextAnnotation.DetectedLanguage") + proto.RegisterType((*TextAnnotation_DetectedBreak)(nil), "google.cloud.vision.v1.TextAnnotation.DetectedBreak") + proto.RegisterType((*TextAnnotation_TextProperty)(nil), "google.cloud.vision.v1.TextAnnotation.TextProperty") + proto.RegisterType((*Page)(nil), "google.cloud.vision.v1.Page") + proto.RegisterType((*Block)(nil), "google.cloud.vision.v1.Block") + proto.RegisterType((*Paragraph)(nil), "google.cloud.vision.v1.Paragraph") + proto.RegisterType((*Word)(nil), "google.cloud.vision.v1.Word") + proto.RegisterType((*Symbol)(nil), "google.cloud.vision.v1.Symbol") + proto.RegisterEnum("google.cloud.vision.v1.TextAnnotation_DetectedBreak_BreakType", TextAnnotation_DetectedBreak_BreakType_name, TextAnnotation_DetectedBreak_BreakType_value) + proto.RegisterEnum("google.cloud.vision.v1.Block_BlockType", Block_BlockType_name, Block_BlockType_value) +} + +func init() { proto.RegisterFile("google/cloud/vision/v1/text_annotation.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 744 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x55, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xfd, 0xb9, 0xb1, 0xd3, 0x78, 0xd2, 0x46, 0xfe, 0x2d, 0xa8, 0x8a, 0x42, 0xa9, 0x8a, 0x01, + 0xd1, 0x03, 0x72, 0xd4, 0x14, 0x04, 0x12, 0x08, 0x29, 0x4e, 0x0d, 0xad, 0x1a, 0x25, 0xd6, 0x36, + 0x51, 0xf9, 0x73, 0xb0, 0xfc, 0x67, 0xeb, 0x58, 0x4d, 0xbd, 0x96, 0xed, 0xb6, 0xc9, 0x8d, 0x4f, + 0xc5, 0x89, 0x6f, 0xc1, 0x09, 0xee, 0x9c, 0xb9, 0x72, 0x44, 0x5e, 0xdb, 0x69, 0x52, 0x61, 0x04, + 0x88, 0x43, 0x2f, 0xd6, 0xce, 0xe4, 0xed, 0xdb, 0xf7, 0x66, 0x33, 0x3b, 0xf0, 0xd0, 0xa5, 0xd4, + 0x1d, 0x93, 0xa6, 0x3d, 0xa6, 0x67, 0x4e, 0xf3, 0xdc, 0x8b, 0x3c, 0xea, 0x37, 0xcf, 0xb7, 0x9b, + 0x31, 0x99, 0xc4, 0x86, 0xe9, 0xfb, 0x34, 0x36, 0x63, 0x8f, 0xfa, 0x4a, 0x10, 0xd2, 0x98, 0xa2, + 0xb5, 0x14, 0xad, 0x30, 0xb4, 0x92, 0xa2, 0x95, 0xf3, 0xed, 0xc6, 0x7a, 0xc6, 0x62, 0x06, 0x5e, + 0xf3, 0x72, 0x53, 0x94, 0xee, 0x6a, 0xdc, 0x2f, 0x38, 0xc3, 0x25, 0xf4, 0x94, 0xc4, 0xe1, 0x34, + 0x85, 0xc9, 0xdf, 0x78, 0xa8, 0x0d, 0xc8, 0x24, 0x6e, 0xcf, 0x08, 0x50, 0x0b, 0x84, 0xc0, 0x74, + 0x49, 0x54, 0xe7, 0x36, 0x4b, 0x5b, 0xd5, 0xd6, 0xba, 0xf2, 0xf3, 0xf3, 0x15, 0xdd, 0x74, 0x09, + 0x4e, 0xa1, 0x08, 0x01, 0x9f, 0x88, 0xaf, 0x2f, 0x6d, 0x72, 0x5b, 0x22, 0x66, 0xeb, 0xc6, 0x11, + 0x48, 0xbb, 0x24, 0x26, 0x76, 0x4c, 0x9c, 0xae, 0xe9, 0xbb, 0x67, 0xa6, 0x4b, 0xd0, 0x5d, 0x58, + 0x1d, 0x67, 0x6b, 0xc3, 0xa6, 0x0e, 0xa9, 0x73, 0x6c, 0xc3, 0x4a, 0x9e, 0xec, 0x50, 0x87, 0xa0, + 0x0d, 0x00, 0x9b, 0xfa, 0xc7, 0x9e, 0x43, 0x7c, 0x9b, 0x30, 0xca, 0x25, 0x3c, 0x97, 0x69, 0x7c, + 0xe5, 0x60, 0x35, 0x67, 0x56, 0x43, 0x62, 0x9e, 0x20, 0x0c, 0x7c, 0x3c, 0x0d, 0x52, 0xb6, 0x5a, + 0xeb, 0x45, 0x91, 0xe2, 0x45, 0xa3, 0xca, 0x02, 0x87, 0xc2, 0xbe, 0x83, 0x69, 0x40, 0x30, 0xe3, + 0x42, 0xb7, 0x40, 0xf4, 0x22, 0x23, 0x08, 0xc9, 0xb1, 0x37, 0x61, 0x22, 0x2a, 0xb8, 0xe2, 0x45, + 0x3a, 0x8b, 0x65, 0x1b, 0xc4, 0x19, 0x1e, 0x55, 0x61, 0x79, 0xd8, 0x3b, 0xe8, 0xf5, 0x8f, 0x7a, + 0xd2, 0x7f, 0x48, 0x04, 0xe1, 0x50, 0x6f, 0x77, 0x34, 0x89, 0x43, 0x35, 0x80, 0xc3, 0x21, 0xd6, + 0x8c, 0x34, 0x5e, 0x42, 0x08, 0x6a, 0x5a, 0xbf, 0x6b, 0xcc, 0xe5, 0x4a, 0x08, 0xa0, 0xbc, 0xf7, + 0x46, 0xdf, 0xd3, 0x7a, 0x12, 0x9f, 0xe0, 0xbb, 0xfb, 0x3d, 0xcd, 0x50, 0xb1, 0xd6, 0x3e, 0x90, + 0x84, 0xc6, 0x27, 0x0e, 0x56, 0x12, 0xc9, 0x7a, 0x48, 0x03, 0x12, 0xc6, 0x53, 0x74, 0x0c, 0xc8, + 0xc9, 0x34, 0x1b, 0x79, 0xc5, 0xf2, 0x6b, 0x7a, 0xf2, 0x87, 0xa6, 0xf3, 0x2b, 0xc1, 0xff, 0x3b, + 0x57, 0x32, 0x11, 0x7a, 0x07, 0xb5, 0xd9, 0x39, 0x56, 0x62, 0x93, 0xf9, 0xaf, 0xb6, 0x1e, 0xfd, + 0x4d, 0x61, 0xf1, 0xaa, 0x33, 0x1f, 0xca, 0x1f, 0x39, 0xe0, 0x93, 0xbf, 0x0e, 0xea, 0x43, 0x25, + 0xc8, 0x9c, 0xb1, 0x8b, 0xab, 0xb6, 0x76, 0x7e, 0x93, 0x7f, 0xbe, 0x28, 0x78, 0x46, 0x82, 0x6e, + 0x82, 0x70, 0xe1, 0x39, 0xf1, 0x88, 0xa9, 0x15, 0x70, 0x1a, 0xa0, 0x35, 0x28, 0x8f, 0x88, 0xe7, + 0x8e, 0xe2, 0x7a, 0x89, 0xa5, 0xb3, 0x08, 0x3d, 0x86, 0xb2, 0x35, 0xa6, 0xf6, 0x49, 0x54, 0xe7, + 0x59, 0x01, 0x6f, 0x17, 0x1d, 0xae, 0x26, 0x28, 0x9c, 0x81, 0xe5, 0xf7, 0x25, 0x10, 0x58, 0xe6, + 0xdf, 0xeb, 0x7f, 0x05, 0x2b, 0x16, 0x3d, 0xf3, 0x1d, 0xcf, 0x77, 0x0d, 0x8b, 0x4e, 0xb2, 0xa2, + 0xdf, 0x2b, 0xd4, 0x95, 0x61, 0x75, 0x3a, 0x9e, 0xe2, 0x6a, 0xbe, 0x53, 0xa5, 0x13, 0xd4, 0x06, + 0x08, 0xcc, 0xd0, 0x74, 0x43, 0x33, 0x18, 0x45, 0xf5, 0x12, 0xb3, 0x77, 0xa7, 0xb8, 0x8d, 0x33, + 0x24, 0x9e, 0xdb, 0x84, 0x5e, 0x02, 0x30, 0xc3, 0x06, 0xeb, 0x2b, 0x9e, 0xf5, 0xd5, 0x83, 0x5f, + 0x56, 0x28, 0xfd, 0xb2, 0x06, 0x12, 0xad, 0x7c, 0x29, 0x63, 0x10, 0x67, 0xf9, 0xc5, 0x46, 0xa9, + 0x00, 0x3f, 0xd0, 0x5e, 0x0f, 0x24, 0x2e, 0x69, 0x99, 0x41, 0x5b, 0xed, 0x26, 0x2d, 0x52, 0x85, + 0x65, 0x7d, 0xbf, 0x33, 0x18, 0xe2, 0xa4, 0x37, 0x44, 0x10, 0xf0, 0xb0, 0xab, 0x61, 0x89, 0x4f, + 0xf2, 0x6a, 0x1b, 0x77, 0xfa, 0xbb, 0x9a, 0x24, 0xc8, 0x9f, 0x39, 0x10, 0x67, 0xaa, 0xaf, 0xf1, + 0x35, 0xb4, 0x40, 0xb8, 0xa0, 0xa1, 0x93, 0xdf, 0x40, 0xe1, 0x43, 0x7a, 0x44, 0x43, 0x07, 0xa7, + 0x50, 0xf9, 0x0b, 0x07, 0x7c, 0x12, 0x5f, 0x63, 0x5b, 0x4f, 0x61, 0x39, 0x9a, 0x9e, 0x5a, 0x74, + 0x9c, 0x1b, 0xdb, 0x28, 0xe2, 0x38, 0x64, 0x30, 0x9c, 0xc3, 0xe5, 0x0f, 0x1c, 0x94, 0xd3, 0xdc, + 0x35, 0xb6, 0x97, 0x8f, 0xb2, 0xd2, 0xe5, 0x28, 0x53, 0x63, 0x68, 0xd8, 0xf4, 0xb4, 0x80, 0x4b, + 0xbd, 0xb1, 0xa8, 0x50, 0x4f, 0x06, 0xab, 0xce, 0xbd, 0x7d, 0x9e, 0xc1, 0x5d, 0x9a, 0xbc, 0xd5, + 0x0a, 0x0d, 0xdd, 0xa6, 0x4b, 0x7c, 0x36, 0x76, 0x9b, 0xe9, 0x4f, 0x66, 0xe0, 0x45, 0x57, 0x07, + 0xf4, 0xb3, 0x74, 0xf5, 0x9d, 0xe3, 0xac, 0x32, 0xc3, 0xee, 0xfc, 0x08, 0x00, 0x00, 0xff, 0xff, + 0x80, 0x29, 0x2a, 0x3b, 0x2f, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/web_detection.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/web_detection.pb.go new file mode 100644 index 000000000..d43b23dc5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/web_detection.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. +// source: google/cloud/vision/v1/web_detection.proto +// DO NOT EDIT! + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Relevant information for the image from the Internet. +type WebDetection struct { + // Deduced entities from similar images on the Internet. + WebEntities []*WebDetection_WebEntity `protobuf:"bytes,1,rep,name=web_entities,json=webEntities" json:"web_entities,omitempty"` + // Fully matching images from the Internet. + // They're definite neardups and most often a copy of the query image with + // merely a size change. + FullMatchingImages []*WebDetection_WebImage `protobuf:"bytes,2,rep,name=full_matching_images,json=fullMatchingImages" json:"full_matching_images,omitempty"` + // Partial matching images from the Internet. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its crops. + PartialMatchingImages []*WebDetection_WebImage `protobuf:"bytes,3,rep,name=partial_matching_images,json=partialMatchingImages" json:"partial_matching_images,omitempty"` + // Web pages containing the matching images from the Internet. + PagesWithMatchingImages []*WebDetection_WebPage `protobuf:"bytes,4,rep,name=pages_with_matching_images,json=pagesWithMatchingImages" json:"pages_with_matching_images,omitempty"` +} + +func (m *WebDetection) Reset() { *m = WebDetection{} } +func (m *WebDetection) String() string { return proto.CompactTextString(m) } +func (*WebDetection) ProtoMessage() {} +func (*WebDetection) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *WebDetection) GetWebEntities() []*WebDetection_WebEntity { + if m != nil { + return m.WebEntities + } + return nil +} + +func (m *WebDetection) GetFullMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.FullMatchingImages + } + return nil +} + +func (m *WebDetection) GetPartialMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.PartialMatchingImages + } + return nil +} + +func (m *WebDetection) GetPagesWithMatchingImages() []*WebDetection_WebPage { + if m != nil { + return m.PagesWithMatchingImages + } + return nil +} + +// Entity deduced from similar images on the Internet. +type WebDetection_WebEntity struct { + // Opaque entity ID. + EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"` + // Overall relevancy score for the entity. + // Not normalized and not comparable across different image queries. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` + // Canonical description of the entity, in English. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *WebDetection_WebEntity) Reset() { *m = WebDetection_WebEntity{} } +func (m *WebDetection_WebEntity) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebEntity) ProtoMessage() {} +func (*WebDetection_WebEntity) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +func (m *WebDetection_WebEntity) GetEntityId() string { + if m != nil { + return m.EntityId + } + return "" +} + +func (m *WebDetection_WebEntity) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *WebDetection_WebEntity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Metadata for online images. +type WebDetection_WebImage struct { + // The result image URL. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // Overall relevancy score for the image. + // Not normalized and not comparable across different image queries. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` +} + +func (m *WebDetection_WebImage) Reset() { *m = WebDetection_WebImage{} } +func (m *WebDetection_WebImage) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebImage) ProtoMessage() {} +func (*WebDetection_WebImage) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 1} } + +func (m *WebDetection_WebImage) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *WebDetection_WebImage) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// Metadata for web pages. +type WebDetection_WebPage struct { + // The result web page URL. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // Overall relevancy score for the web page. + // Not normalized and not comparable across different image queries. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` +} + +func (m *WebDetection_WebPage) Reset() { *m = WebDetection_WebPage{} } +func (m *WebDetection_WebPage) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebPage) ProtoMessage() {} +func (*WebDetection_WebPage) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 2} } + +func (m *WebDetection_WebPage) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *WebDetection_WebPage) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func init() { + proto.RegisterType((*WebDetection)(nil), "google.cloud.vision.v1.WebDetection") + proto.RegisterType((*WebDetection_WebEntity)(nil), "google.cloud.vision.v1.WebDetection.WebEntity") + proto.RegisterType((*WebDetection_WebImage)(nil), "google.cloud.vision.v1.WebDetection.WebImage") + proto.RegisterType((*WebDetection_WebPage)(nil), "google.cloud.vision.v1.WebDetection.WebPage") +} + +func init() { proto.RegisterFile("google/cloud/vision/v1/web_detection.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 383 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x93, 0x41, 0x4f, 0xea, 0x40, + 0x14, 0x85, 0x53, 0xca, 0x7b, 0x0f, 0x06, 0x16, 0xcf, 0x09, 0x4a, 0x53, 0x5d, 0x34, 0xae, 0x88, + 0xd1, 0x69, 0xc0, 0xa5, 0xae, 0x88, 0x2e, 0x58, 0x98, 0x60, 0x37, 0x24, 0x6e, 0xea, 0xd0, 0x8e, + 0xc3, 0x4d, 0xca, 0x4c, 0xd3, 0x19, 0x20, 0xfc, 0x58, 0xff, 0x87, 0x4b, 0x33, 0xd3, 0x62, 0x10, + 0x30, 0x21, 0xee, 0xee, 0xdc, 0x9e, 0xf3, 0x9d, 0xf6, 0xf6, 0x0e, 0xba, 0xe2, 0x52, 0xf2, 0x8c, + 0x85, 0x49, 0x26, 0x17, 0x69, 0xb8, 0x04, 0x05, 0x52, 0x84, 0xcb, 0x7e, 0xb8, 0x62, 0xd3, 0x38, + 0x65, 0x9a, 0x25, 0x1a, 0xa4, 0x20, 0x79, 0x21, 0xb5, 0xc4, 0x67, 0xa5, 0x96, 0x58, 0x2d, 0x29, + 0xb5, 0x64, 0xd9, 0xf7, 0x2f, 0x2a, 0x06, 0xcd, 0x21, 0xa4, 0x42, 0x48, 0x4d, 0x8d, 0x49, 0x95, + 0xae, 0xcb, 0xf7, 0x3a, 0x6a, 0x4f, 0xd8, 0xf4, 0x61, 0x03, 0xc3, 0xcf, 0xa8, 0x6d, 0xe8, 0x4c, + 0x68, 0xd0, 0xc0, 0x94, 0xe7, 0x04, 0x6e, 0xaf, 0x35, 0x20, 0xe4, 0x30, 0x9d, 0x6c, 0x7b, 0xcd, + 0xe1, 0xd1, 0xf8, 0xd6, 0x51, 0x6b, 0x55, 0x95, 0xc0, 0x14, 0x8e, 0x51, 0xe7, 0x6d, 0x91, 0x65, + 0xf1, 0x9c, 0xea, 0x64, 0x06, 0x82, 0xc7, 0x30, 0xa7, 0x9c, 0x29, 0xaf, 0x66, 0xd1, 0x37, 0xc7, + 0xa2, 0x47, 0xc6, 0x15, 0x61, 0x83, 0x7a, 0xaa, 0x48, 0xb6, 0xa5, 0x30, 0x43, 0xdd, 0x9c, 0x16, + 0x1a, 0xe8, 0x7e, 0x86, 0xfb, 0x9b, 0x8c, 0xd3, 0x8a, 0xb6, 0x13, 0x03, 0xc8, 0xcf, 0x4d, 0x11, + 0xaf, 0x40, 0xcf, 0xf6, 0x92, 0xea, 0x36, 0xe9, 0xfa, 0xd8, 0xa4, 0xb1, 0x09, 0xea, 0x5a, 0xde, + 0x04, 0xf4, 0xec, 0x7b, 0x94, 0xff, 0x8a, 0x9a, 0x5f, 0xc3, 0xc4, 0xe7, 0xa8, 0x69, 0x7f, 0xc7, + 0x3a, 0x86, 0xd4, 0x73, 0x02, 0xa7, 0xd7, 0x8c, 0x1a, 0x65, 0x63, 0x94, 0xe2, 0x0e, 0xfa, 0xa3, + 0x12, 0x59, 0x30, 0xaf, 0x16, 0x38, 0xbd, 0x5a, 0x54, 0x1e, 0x70, 0x80, 0x5a, 0x29, 0x53, 0x49, + 0x01, 0xb9, 0xc9, 0xf3, 0x5c, 0x6b, 0xda, 0x6e, 0xf9, 0x03, 0xd4, 0xd8, 0x7c, 0x2f, 0xfe, 0x8f, + 0xdc, 0x45, 0x91, 0x55, 0x68, 0x53, 0x1e, 0xa6, 0xfa, 0x7d, 0xf4, 0xaf, 0x7a, 0xf3, 0x63, 0x2d, + 0xc3, 0x02, 0xf9, 0x89, 0x9c, 0xff, 0x30, 0x94, 0xe1, 0xc9, 0xf6, 0x54, 0xc6, 0x66, 0x21, 0xc7, + 0xce, 0xcb, 0x7d, 0x25, 0xe6, 0x32, 0xa3, 0x82, 0x13, 0x59, 0xf0, 0x90, 0x33, 0x61, 0xd7, 0x35, + 0x2c, 0x1f, 0xd1, 0x1c, 0xd4, 0xee, 0x9d, 0xb8, 0x2b, 0xab, 0x0f, 0xc7, 0x99, 0xfe, 0xb5, 0xda, + 0xdb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xd9, 0xde, 0x3f, 0x3e, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/admin/database/v1/spanner_database_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/admin/database/v1/spanner_database_admin.pb.go new file mode 100644 index 000000000..8112d788f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/admin/database/v1/spanner_database_admin.pb.go @@ -0,0 +1,920 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// DO NOT EDIT! + +/* +Package database is a generated protocol buffer package. + +It is generated from these files: + google/spanner/admin/database/v1/spanner_database_admin.proto + +It has these top-level messages: + Database + ListDatabasesRequest + ListDatabasesResponse + CreateDatabaseRequest + CreateDatabaseMetadata + GetDatabaseRequest + UpdateDatabaseDdlRequest + UpdateDatabaseDdlMetadata + DropDatabaseRequest + GetDatabaseDdlRequest + GetDatabaseDdlResponse +*/ +package database + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates the current state of the database. +type Database_State int32 + +const ( + // Not specified. + Database_STATE_UNSPECIFIED Database_State = 0 + // The database is still being created. Operations on the database may fail + // with `FAILED_PRECONDITION` in this state. + Database_CREATING Database_State = 1 + // The database is fully created and ready for use. + Database_READY Database_State = 2 +) + +var Database_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "CREATING", + 2: "READY", +} +var Database_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, +} + +func (x Database_State) String() string { + return proto.EnumName(Database_State_name, int32(x)) +} +func (Database_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// A Cloud Spanner database. +type Database struct { + // Required. The name of the database. Values are of the form + // `projects//instances//databases/`, + // where `` is as specified in the `CREATE DATABASE` + // statement. This name can be passed to other API methods to + // identify the database. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Output only. The current database state. + State Database_State `protobuf:"varint,2,opt,name=state,enum=google.spanner.admin.database.v1.Database_State" json:"state,omitempty"` +} + +func (m *Database) Reset() { *m = Database{} } +func (m *Database) String() string { return proto.CompactTextString(m) } +func (*Database) ProtoMessage() {} +func (*Database) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Database) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Database) GetState() Database_State { + if m != nil { + return m.State + } + return Database_STATE_UNSPECIFIED +} + +// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +type ListDatabasesRequest struct { + // Required. The instance whose databases should be listed. + // Values are of the form `projects//instances/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Number of databases to be returned in the response. If 0 or less, + // defaults to the server's maximum allowed page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a + // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListDatabasesRequest) Reset() { *m = ListDatabasesRequest{} } +func (m *ListDatabasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesRequest) ProtoMessage() {} +func (*ListDatabasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ListDatabasesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListDatabasesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDatabasesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +type ListDatabasesResponse struct { + // Databases that matched the request. + Databases []*Database `protobuf:"bytes,1,rep,name=databases" json:"databases,omitempty"` + // `next_page_token` can be sent in a subsequent + // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more + // of the matching databases. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDatabasesResponse) Reset() { *m = ListDatabasesResponse{} } +func (m *ListDatabasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesResponse) ProtoMessage() {} +func (*ListDatabasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListDatabasesResponse) GetDatabases() []*Database { + if m != nil { + return m.Databases + } + return nil +} + +func (m *ListDatabasesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +type CreateDatabaseRequest struct { + // Required. The name of the instance that will serve the new database. + // Values are of the form `projects//instances/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. A `CREATE DATABASE` statement, which specifies the ID of the + // new database. The database ID must conform to the regular expression + // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length. + CreateStatement string `protobuf:"bytes,2,opt,name=create_statement,json=createStatement" json:"create_statement,omitempty"` + // An optional list of DDL statements to run inside the newly created + // database. Statements can create tables, indexes, etc. These + // statements execute atomically with the creation of the database: + // if there is an error in any statement, the database is not created. + ExtraStatements []string `protobuf:"bytes,3,rep,name=extra_statements,json=extraStatements" json:"extra_statements,omitempty"` +} + +func (m *CreateDatabaseRequest) Reset() { *m = CreateDatabaseRequest{} } +func (m *CreateDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseRequest) ProtoMessage() {} +func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *CreateDatabaseRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateDatabaseRequest) GetCreateStatement() string { + if m != nil { + return m.CreateStatement + } + return "" +} + +func (m *CreateDatabaseRequest) GetExtraStatements() []string { + if m != nil { + return m.ExtraStatements + } + return nil +} + +// Metadata type for the operation returned by +// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +type CreateDatabaseMetadata struct { + // The database being created. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *CreateDatabaseMetadata) Reset() { *m = CreateDatabaseMetadata{} } +func (m *CreateDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseMetadata) ProtoMessage() {} +func (*CreateDatabaseMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *CreateDatabaseMetadata) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. +type GetDatabaseRequest struct { + // Required. The name of the requested database. Values are of the form + // `projects//instances//databases/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetDatabaseRequest) Reset() { *m = GetDatabaseRequest{} } +func (m *GetDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseRequest) ProtoMessage() {} +func (*GetDatabaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *GetDatabaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Enqueues the given DDL statements to be applied, in order but not +// necessarily all at once, to the database schema at some point (or +// points) in the future. The server checks that the statements +// are executable (syntactically valid, name tables that exist, etc.) +// before enqueueing them, but they may still fail upon +// later execution (e.g., if a statement from another batch of +// statements is applied first and it conflicts in some way, or if +// there is some data-related problem like a `NULL` value in a column to +// which `NOT NULL` would be added). If a statement fails, all +// subsequent statements in the batch are automatically cancelled. +// +// Each batch of statements is assigned a name which can be used with +// the [Operations][google.longrunning.Operations] API to monitor +// progress. See the +// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more +// details. +type UpdateDatabaseDdlRequest struct { + // Required. The database to update. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // DDL statements to be applied to the database. + Statements []string `protobuf:"bytes,2,rep,name=statements" json:"statements,omitempty"` + // If empty, the new update request is assigned an + // automatically-generated operation ID. Otherwise, `operation_id` + // is used to construct the name of the resulting + // [Operation][google.longrunning.Operation]. + // + // Specifying an explicit operation ID simplifies determining + // whether the statements were executed in the event that the + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, + // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and + // `operation_id` fields can be combined to form the + // [name][google.longrunning.Operation.name] of the resulting + // [longrunning.Operation][google.longrunning.Operation]: `/operations/`. + // + // `operation_id` should be unique within the database, and must be + // a valid identifier: `[a-zA-Z][a-zA-Z0-9_]*`. Note that + // automatically-generated operation IDs always begin with an + // underscore. If the named operation already exists, + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns + // `ALREADY_EXISTS`. + OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` +} + +func (m *UpdateDatabaseDdlRequest) Reset() { *m = UpdateDatabaseDdlRequest{} } +func (m *UpdateDatabaseDdlRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDatabaseDdlRequest) ProtoMessage() {} +func (*UpdateDatabaseDdlRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *UpdateDatabaseDdlRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *UpdateDatabaseDdlRequest) GetStatements() []string { + if m != nil { + return m.Statements + } + return nil +} + +func (m *UpdateDatabaseDdlRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +// Metadata type for the operation returned by +// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. +type UpdateDatabaseDdlMetadata struct { + // The database being modified. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // For an update this list contains all the statements. For an + // individual statement, this list contains only that statement. + Statements []string `protobuf:"bytes,2,rep,name=statements" json:"statements,omitempty"` + // Reports the commit timestamps of all statements that have + // succeeded so far, where `commit_timestamps[i]` is the commit + // timestamp for the statement `statements[i]`. + CommitTimestamps []*google_protobuf3.Timestamp `protobuf:"bytes,3,rep,name=commit_timestamps,json=commitTimestamps" json:"commit_timestamps,omitempty"` +} + +func (m *UpdateDatabaseDdlMetadata) Reset() { *m = UpdateDatabaseDdlMetadata{} } +func (m *UpdateDatabaseDdlMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateDatabaseDdlMetadata) ProtoMessage() {} +func (*UpdateDatabaseDdlMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *UpdateDatabaseDdlMetadata) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *UpdateDatabaseDdlMetadata) GetStatements() []string { + if m != nil { + return m.Statements + } + return nil +} + +func (m *UpdateDatabaseDdlMetadata) GetCommitTimestamps() []*google_protobuf3.Timestamp { + if m != nil { + return m.CommitTimestamps + } + return nil +} + +// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. +type DropDatabaseRequest struct { + // Required. The database to be dropped. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *DropDatabaseRequest) Reset() { *m = DropDatabaseRequest{} } +func (m *DropDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*DropDatabaseRequest) ProtoMessage() {} +func (*DropDatabaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *DropDatabaseRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +type GetDatabaseDdlRequest struct { + // Required. The database whose schema we wish to get. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *GetDatabaseDdlRequest) Reset() { *m = GetDatabaseDdlRequest{} } +func (m *GetDatabaseDdlRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseDdlRequest) ProtoMessage() {} +func (*GetDatabaseDdlRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *GetDatabaseDdlRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +type GetDatabaseDdlResponse struct { + // A list of formatted DDL statements defining the schema of the database + // specified in the request. + Statements []string `protobuf:"bytes,1,rep,name=statements" json:"statements,omitempty"` +} + +func (m *GetDatabaseDdlResponse) Reset() { *m = GetDatabaseDdlResponse{} } +func (m *GetDatabaseDdlResponse) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseDdlResponse) ProtoMessage() {} +func (*GetDatabaseDdlResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *GetDatabaseDdlResponse) GetStatements() []string { + if m != nil { + return m.Statements + } + return nil +} + +func init() { + proto.RegisterType((*Database)(nil), "google.spanner.admin.database.v1.Database") + proto.RegisterType((*ListDatabasesRequest)(nil), "google.spanner.admin.database.v1.ListDatabasesRequest") + proto.RegisterType((*ListDatabasesResponse)(nil), "google.spanner.admin.database.v1.ListDatabasesResponse") + proto.RegisterType((*CreateDatabaseRequest)(nil), "google.spanner.admin.database.v1.CreateDatabaseRequest") + proto.RegisterType((*CreateDatabaseMetadata)(nil), "google.spanner.admin.database.v1.CreateDatabaseMetadata") + proto.RegisterType((*GetDatabaseRequest)(nil), "google.spanner.admin.database.v1.GetDatabaseRequest") + proto.RegisterType((*UpdateDatabaseDdlRequest)(nil), "google.spanner.admin.database.v1.UpdateDatabaseDdlRequest") + proto.RegisterType((*UpdateDatabaseDdlMetadata)(nil), "google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata") + proto.RegisterType((*DropDatabaseRequest)(nil), "google.spanner.admin.database.v1.DropDatabaseRequest") + proto.RegisterType((*GetDatabaseDdlRequest)(nil), "google.spanner.admin.database.v1.GetDatabaseDdlRequest") + proto.RegisterType((*GetDatabaseDdlResponse)(nil), "google.spanner.admin.database.v1.GetDatabaseDdlResponse") + proto.RegisterEnum("google.spanner.admin.database.v1.Database_State", Database_State_name, Database_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DatabaseAdmin service + +type DatabaseAdminClient interface { + // Lists Cloud Spanner databases. + ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) + // Creates a new Cloud Spanner database and starts to prepare it for serving. + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track preparation of the database. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + // [response][google.longrunning.Operation.response] field type is + // [Database][google.spanner.admin.database.v1.Database], if successful. + CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets the state of a Cloud Spanner database. + GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) + // Updates the schema of a Cloud Spanner database by + // creating/altering/dropping tables, columns, indexes, etc. The returned + // [long-running operation][google.longrunning.Operation] will have a name of + // the format `/operations/` and can be used to + // track execution of the schema change(s). The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Drops (aka deletes) a Cloud Spanner database. + DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Returns the schema of a Cloud Spanner database as a list of formatted + // DDL statements. This method does not show pending schema updates, those may + // be queried using the [Operations][google.longrunning.Operations] API. + GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error) + // Sets the access control policy on a database resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.databases.setIamPolicy` permission on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Gets the access control policy for a database resource. Returns an empty + // policy if a database exists but does not have a policy set. + // + // Authorization requires `spanner.databases.getIamPolicy` permission on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified database resource. + // + // Attempting this RPC on a non-existent Cloud Spanner database will result in + // a NOT_FOUND error if the user has `spanner.databases.list` permission on + // the containing Cloud Spanner instance. Otherwise returns an empty set of + // permissions. + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +type databaseAdminClient struct { + cc *grpc.ClientConn +} + +func NewDatabaseAdminClient(cc *grpc.ClientConn) DatabaseAdminClient { + return &databaseAdminClient{cc} +} + +func (c *databaseAdminClient) ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) { + out := new(ListDatabasesResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) { + out := new(Database) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error) { + out := new(GetDatabaseDdlResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DatabaseAdmin service + +type DatabaseAdminServer interface { + // Lists Cloud Spanner databases. + ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) + // Creates a new Cloud Spanner database and starts to prepare it for serving. + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track preparation of the database. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + // [response][google.longrunning.Operation.response] field type is + // [Database][google.spanner.admin.database.v1.Database], if successful. + CreateDatabase(context.Context, *CreateDatabaseRequest) (*google_longrunning.Operation, error) + // Gets the state of a Cloud Spanner database. + GetDatabase(context.Context, *GetDatabaseRequest) (*Database, error) + // Updates the schema of a Cloud Spanner database by + // creating/altering/dropping tables, columns, indexes, etc. The returned + // [long-running operation][google.longrunning.Operation] will have a name of + // the format `/operations/` and can be used to + // track execution of the schema change(s). The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + UpdateDatabaseDdl(context.Context, *UpdateDatabaseDdlRequest) (*google_longrunning.Operation, error) + // Drops (aka deletes) a Cloud Spanner database. + DropDatabase(context.Context, *DropDatabaseRequest) (*google_protobuf2.Empty, error) + // Returns the schema of a Cloud Spanner database as a list of formatted + // DDL statements. This method does not show pending schema updates, those may + // be queried using the [Operations][google.longrunning.Operations] API. + GetDatabaseDdl(context.Context, *GetDatabaseDdlRequest) (*GetDatabaseDdlResponse, error) + // Sets the access control policy on a database resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.databases.setIamPolicy` permission on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Gets the access control policy for a database resource. Returns an empty + // policy if a database exists but does not have a policy set. + // + // Authorization requires `spanner.databases.getIamPolicy` permission on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified database resource. + // + // Attempting this RPC on a non-existent Cloud Spanner database will result in + // a NOT_FOUND error if the user has `spanner.databases.list` permission on + // the containing Cloud Spanner instance. Otherwise returns an empty set of + // permissions. + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +func RegisterDatabaseAdminServer(s *grpc.Server, srv DatabaseAdminServer) { + s.RegisterService(&_DatabaseAdmin_serviceDesc, srv) +} + +func _DatabaseAdmin_ListDatabases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatabasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).ListDatabases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).ListDatabases(ctx, req.(*ListDatabasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_CreateDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).CreateDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).CreateDatabase(ctx, req.(*CreateDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_GetDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).GetDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).GetDatabase(ctx, req.(*GetDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_UpdateDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDatabaseDdlRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, req.(*UpdateDatabaseDdlRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_DropDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DropDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).DropDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).DropDatabase(ctx, req.(*DropDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_GetDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseDdlRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, req.(*GetDatabaseDdlRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DatabaseAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.spanner.admin.database.v1.DatabaseAdmin", + HandlerType: (*DatabaseAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListDatabases", + Handler: _DatabaseAdmin_ListDatabases_Handler, + }, + { + MethodName: "CreateDatabase", + Handler: _DatabaseAdmin_CreateDatabase_Handler, + }, + { + MethodName: "GetDatabase", + Handler: _DatabaseAdmin_GetDatabase_Handler, + }, + { + MethodName: "UpdateDatabaseDdl", + Handler: _DatabaseAdmin_UpdateDatabaseDdl_Handler, + }, + { + MethodName: "DropDatabase", + Handler: _DatabaseAdmin_DropDatabase_Handler, + }, + { + MethodName: "GetDatabaseDdl", + Handler: _DatabaseAdmin_GetDatabaseDdl_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _DatabaseAdmin_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _DatabaseAdmin_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _DatabaseAdmin_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/spanner/admin/database/v1/spanner_database_admin.proto", +} + +func init() { + proto.RegisterFile("google/spanner/admin/database/v1/spanner_database_admin.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 999 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0x99, 0xa4, 0xa9, 0xe2, 0x17, 0x27, 0x75, 0x06, 0x1c, 0xb9, 0x5b, 0x5a, 0xcc, 0x82, + 0x90, 0x6b, 0x89, 0x5d, 0xe2, 0x34, 0x24, 0x18, 0x05, 0x91, 0xc6, 0xae, 0x6b, 0x09, 0x5a, 0xcb, + 0x76, 0x0f, 0x70, 0xb1, 0x26, 0xf6, 0xb0, 0x6c, 0xf1, 0xce, 0x2e, 0x3b, 0xe3, 0xaa, 0x2d, 0xea, + 0x05, 0x89, 0x03, 0x07, 0x4e, 0x80, 0xc4, 0x0d, 0xc4, 0x81, 0x33, 0x37, 0x24, 0x8e, 0xfc, 0x0b, + 0xfc, 0x0b, 0xfc, 0x21, 0x68, 0x66, 0x77, 0x9c, 0xf5, 0x3a, 0x89, 0xd7, 0x1c, 0xb8, 0x79, 0xdf, + 0x8f, 0x79, 0x9f, 0xf7, 0xf6, 0x7d, 0xc7, 0x0b, 0x47, 0x8e, 0xef, 0x3b, 0x63, 0x6a, 0xf3, 0x80, + 0x30, 0x46, 0x43, 0x9b, 0x8c, 0x3c, 0x97, 0xd9, 0x23, 0x22, 0xc8, 0x29, 0xe1, 0xd4, 0x7e, 0xb2, + 0xab, 0x3d, 0x03, 0x6d, 0x1b, 0xa8, 0x10, 0x2b, 0x08, 0x7d, 0xe1, 0xe3, 0x72, 0x94, 0x6e, 0xc5, + 0x41, 0x56, 0xe4, 0xd3, 0xa1, 0xd6, 0x93, 0x5d, 0xe3, 0xd5, 0xb8, 0x00, 0x09, 0x5c, 0x9b, 0x30, + 0xe6, 0x0b, 0x22, 0x5c, 0x9f, 0xf1, 0x28, 0xdf, 0x28, 0x26, 0xbd, 0x13, 0xf1, 0x79, 0x6c, 0xbe, + 0x15, 0x9b, 0x5d, 0xe2, 0x49, 0x04, 0x97, 0x78, 0x83, 0xc0, 0x1f, 0xbb, 0xc3, 0x67, 0xb1, 0xdf, + 0x98, 0xf5, 0xcf, 0xf8, 0xde, 0x88, 0x7d, 0x63, 0x9f, 0x39, 0xe1, 0x84, 0x31, 0x97, 0x39, 0xb6, + 0x1f, 0xd0, 0x70, 0xa6, 0xee, 0x8d, 0x38, 0x48, 0x3d, 0x9d, 0x4e, 0x3e, 0xb3, 0xa9, 0x17, 0x08, + 0x7d, 0xc2, 0x6b, 0x69, 0xa7, 0x70, 0x3d, 0xca, 0x05, 0xf1, 0x82, 0x28, 0xc0, 0xfc, 0x19, 0xc1, + 0x7a, 0x23, 0xee, 0x11, 0x63, 0xb8, 0xc2, 0x88, 0x47, 0x4b, 0xa8, 0x8c, 0x2a, 0xb9, 0xae, 0xfa, + 0x8d, 0xef, 0xc1, 0x1a, 0x17, 0x44, 0xd0, 0xd2, 0x4a, 0x19, 0x55, 0xb6, 0x6a, 0xef, 0x58, 0x8b, + 0xc6, 0x64, 0xe9, 0xe3, 0xac, 0x9e, 0xcc, 0xeb, 0x46, 0xe9, 0xe6, 0x01, 0xac, 0xa9, 0x67, 0x5c, + 0x84, 0xed, 0x5e, 0xff, 0xb8, 0xdf, 0x1c, 0x3c, 0x7a, 0xd0, 0xeb, 0x34, 0x4f, 0xda, 0xf7, 0xda, + 0xcd, 0x46, 0xe1, 0x25, 0x9c, 0x87, 0xf5, 0x93, 0x6e, 0xf3, 0xb8, 0xdf, 0x7e, 0xd0, 0x2a, 0x20, + 0x9c, 0x83, 0xb5, 0x6e, 0xf3, 0xb8, 0xf1, 0x49, 0x61, 0xc5, 0x7c, 0x0c, 0xaf, 0x7c, 0xe4, 0x72, + 0xa1, 0x4f, 0xe5, 0x5d, 0xfa, 0xe5, 0x84, 0x72, 0x81, 0x77, 0xe0, 0x6a, 0x40, 0x42, 0xca, 0x44, + 0x8c, 0x1b, 0x3f, 0xe1, 0x1b, 0x90, 0x0b, 0x88, 0x43, 0x07, 0xdc, 0x7d, 0x4e, 0x4b, 0xab, 0x65, + 0x54, 0x59, 0xeb, 0xae, 0x4b, 0x43, 0xcf, 0x7d, 0x4e, 0xf1, 0x4d, 0x00, 0xe5, 0x14, 0xfe, 0x17, + 0x94, 0x95, 0xae, 0xa8, 0x44, 0x15, 0xde, 0x97, 0x06, 0xf3, 0x5b, 0x04, 0xc5, 0x54, 0x31, 0x1e, + 0xf8, 0x8c, 0x53, 0x7c, 0x1f, 0x72, 0xba, 0x47, 0x5e, 0x42, 0xe5, 0xd5, 0xca, 0x46, 0xad, 0x9a, + 0x7d, 0x14, 0xdd, 0xb3, 0x64, 0xfc, 0x16, 0x5c, 0x63, 0xf4, 0xa9, 0x18, 0x24, 0x38, 0x56, 0x14, + 0xc7, 0xa6, 0x34, 0x77, 0xa6, 0x2c, 0xdf, 0x20, 0x28, 0x9e, 0x84, 0x94, 0x08, 0x3a, 0x3d, 0x65, + 0x41, 0xe7, 0xb7, 0xa1, 0x30, 0x54, 0x09, 0x03, 0x35, 0x72, 0x4f, 0x46, 0x44, 0x47, 0x5f, 0x8b, + 0xec, 0x3d, 0x6d, 0x96, 0xa1, 0xf4, 0xa9, 0x08, 0xc9, 0x59, 0x24, 0x2f, 0xad, 0x96, 0x57, 0x65, + 0xa8, 0xb2, 0x4f, 0x23, 0xb9, 0x79, 0x07, 0x76, 0x66, 0x31, 0x3e, 0xa6, 0x82, 0xc8, 0x76, 0xb0, + 0x01, 0xeb, 0xba, 0xad, 0x98, 0x64, 0xfa, 0x6c, 0x56, 0x00, 0xb7, 0xa8, 0x48, 0x93, 0x9f, 0xb3, + 0x60, 0xe6, 0x33, 0x28, 0x3d, 0x0a, 0x46, 0x89, 0xf3, 0x1b, 0xa3, 0xb1, 0x8e, 0xbf, 0xa4, 0x02, + 0xbe, 0x05, 0x90, 0x80, 0x5f, 0x51, 0xf0, 0x09, 0x0b, 0x7e, 0x1d, 0xf2, 0x53, 0xad, 0x0c, 0xdc, + 0x91, 0x5a, 0x85, 0x5c, 0x77, 0x63, 0x6a, 0x6b, 0x8f, 0xcc, 0x5f, 0x10, 0x5c, 0x9f, 0xab, 0x9d, + 0xa5, 0xbd, 0x85, 0xc5, 0x5b, 0xb0, 0x3d, 0xf4, 0x3d, 0xcf, 0x15, 0x83, 0xa9, 0xe0, 0xa2, 0x01, + 0x6f, 0xd4, 0x0c, 0xbd, 0x36, 0x5a, 0x93, 0x56, 0x5f, 0x87, 0x74, 0x0b, 0x51, 0xd2, 0xd4, 0xc0, + 0xcd, 0x5d, 0x78, 0xb9, 0x11, 0xfa, 0x41, 0x7a, 0x90, 0x97, 0x8d, 0x7e, 0x0f, 0x8a, 0x89, 0xd1, + 0x67, 0x9b, 0xa6, 0x79, 0x08, 0x3b, 0xe9, 0xa4, 0x78, 0xf3, 0x67, 0x5b, 0x45, 0xe9, 0x56, 0x6b, + 0x3f, 0xe6, 0x61, 0x53, 0xe7, 0x1d, 0x4b, 0x05, 0xe0, 0x3f, 0x10, 0x6c, 0xce, 0xa8, 0x08, 0xbf, + 0xbb, 0x58, 0x2a, 0xe7, 0x69, 0xdc, 0x38, 0x58, 0x3a, 0x2f, 0x82, 0x36, 0xf7, 0xbf, 0xfe, 0xfb, + 0x9f, 0xef, 0x57, 0x6c, 0xfc, 0xb6, 0xbc, 0x53, 0xbf, 0x8a, 0xf4, 0x71, 0x14, 0x84, 0xfe, 0x63, + 0x3a, 0x14, 0xdc, 0xae, 0xda, 0x2e, 0xe3, 0x82, 0xb0, 0x21, 0xe5, 0x76, 0xf5, 0x85, 0x7d, 0xa6, + 0xcd, 0x5f, 0x11, 0x6c, 0xcd, 0x2e, 0x3b, 0xce, 0x80, 0x70, 0xae, 0x4a, 0x8d, 0x9b, 0x3a, 0x31, + 0x71, 0x7b, 0x5b, 0x0f, 0xf5, 0xf6, 0x99, 0x87, 0x8a, 0xb0, 0x66, 0x2e, 0x47, 0x58, 0x47, 0x55, + 0xfc, 0x1b, 0x82, 0x8d, 0xc4, 0xbb, 0xc2, 0x77, 0x16, 0x13, 0xce, 0x4b, 0xd1, 0x58, 0xe2, 0xf6, + 0x4a, 0x4d, 0x53, 0xaa, 0xf6, 0x02, 0xd2, 0x33, 0x50, 0xbb, 0xfa, 0x02, 0xff, 0x8e, 0x60, 0x7b, + 0x4e, 0x5e, 0xb8, 0xbe, 0xb8, 0xf0, 0x45, 0xf7, 0xc1, 0xa2, 0x99, 0x7e, 0xa8, 0x38, 0xeb, 0xb5, + 0x7d, 0xc5, 0xa9, 0x4f, 0xcc, 0xc2, 0x6a, 0x8f, 0x46, 0x63, 0x39, 0xdb, 0x9f, 0x10, 0xe4, 0x93, + 0x7a, 0xc3, 0xfb, 0x19, 0xc6, 0x34, 0xaf, 0x4f, 0x63, 0x67, 0x4e, 0xe4, 0x4d, 0xf9, 0xaf, 0x6c, + 0xbe, 0xa7, 0x08, 0xf7, 0xaa, 0xbb, 0x4b, 0x13, 0xe2, 0xbf, 0x10, 0x6c, 0xcd, 0x4a, 0x34, 0xcb, + 0x6e, 0x9e, 0x7b, 0x13, 0x18, 0x87, 0xcb, 0x27, 0xc6, 0xc2, 0x3a, 0x52, 0x0d, 0x1c, 0xe0, 0xff, + 0x36, 0x62, 0xfc, 0x03, 0x82, 0x7c, 0x8f, 0x8a, 0x36, 0xf1, 0x3a, 0xea, 0x43, 0x07, 0x9b, 0x9a, + 0xc4, 0x25, 0x9e, 0x2c, 0x9b, 0x74, 0x6a, 0xda, 0x62, 0x2a, 0x26, 0xf2, 0x9a, 0x6d, 0x85, 0x72, + 0x62, 0x7e, 0xa0, 0x50, 0x42, 0xca, 0xfd, 0x49, 0x38, 0xcc, 0x84, 0x52, 0xe7, 0x89, 0x2a, 0xf2, + 0xb5, 0x4b, 0xac, 0xd6, 0x65, 0x58, 0xad, 0xff, 0x05, 0xcb, 0x49, 0x61, 0xfd, 0x89, 0x00, 0xf7, + 0x29, 0x57, 0x46, 0x1a, 0x7a, 0x2e, 0xe7, 0xf2, 0xbb, 0x0f, 0x57, 0x52, 0x85, 0xe7, 0x43, 0x34, + 0xe2, 0xed, 0x0c, 0x91, 0xf1, 0x8b, 0x7d, 0xa8, 0xb0, 0xdb, 0x66, 0x63, 0x79, 0x6c, 0x31, 0x77, + 0x6a, 0x1d, 0x55, 0xef, 0x7e, 0x87, 0xe0, 0xcd, 0xa1, 0xef, 0x2d, 0xdc, 0xb4, 0xbb, 0xd7, 0x7b, + 0x91, 0x6b, 0xe6, 0x4f, 0xa4, 0x23, 0x75, 0xd3, 0x41, 0x9f, 0xde, 0x8f, 0xd3, 0x1d, 0x7f, 0x4c, + 0x98, 0x63, 0xf9, 0xa1, 0x63, 0x3b, 0x94, 0x29, 0x55, 0xd9, 0x91, 0x8b, 0x04, 0x2e, 0xbf, 0xf8, + 0x9b, 0xff, 0x7d, 0xfd, 0xfb, 0xf4, 0xaa, 0x4a, 0xda, 0xfb, 0x37, 0x00, 0x00, 0xff, 0xff, 0x46, + 0x3d, 0xa5, 0xd9, 0x27, 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/admin/instance/v1/spanner_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/admin/instance/v1/spanner_instance_admin.pb.go new file mode 100644 index 000000000..15f812d1b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/admin/instance/v1/spanner_instance_admin.pb.go @@ -0,0 +1,1281 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// DO NOT EDIT! + +/* +Package instance is a generated protocol buffer package. + +It is generated from these files: + google/spanner/admin/instance/v1/spanner_instance_admin.proto + +It has these top-level messages: + InstanceConfig + Instance + ListInstanceConfigsRequest + ListInstanceConfigsResponse + GetInstanceConfigRequest + GetInstanceRequest + CreateInstanceRequest + ListInstancesRequest + ListInstancesResponse + UpdateInstanceRequest + DeleteInstanceRequest + CreateInstanceMetadata + UpdateInstanceMetadata +*/ +package instance + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates the current state of the instance. +type Instance_State int32 + +const ( + // Not specified. + Instance_STATE_UNSPECIFIED Instance_State = 0 + // The instance is still being created. Resources may not be + // available yet, and operations such as database creation may not + // work. + Instance_CREATING Instance_State = 1 + // The instance is fully created and ready to do work such as + // creating databases. + Instance_READY Instance_State = 2 +) + +var Instance_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "CREATING", + 2: "READY", +} +var Instance_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, +} + +func (x Instance_State) String() string { + return proto.EnumName(Instance_State_name, int32(x)) +} +func (Instance_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// A possible configuration for a Cloud Spanner instance. Configurations +// define the geographic placement of nodes and their replication. +type InstanceConfig struct { + // A unique identifier for the instance configuration. Values + // are of the form + // `projects//instanceConfigs/[a-z][-a-z0-9]*` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of this instance configuration as it appears in UIs. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *InstanceConfig) Reset() { *m = InstanceConfig{} } +func (m *InstanceConfig) String() string { return proto.CompactTextString(m) } +func (*InstanceConfig) ProtoMessage() {} +func (*InstanceConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *InstanceConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InstanceConfig) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// An isolated set of Cloud Spanner resources on which databases can be hosted. +type Instance struct { + // Required. A unique identifier for the instance, which cannot be changed + // after the instance is created. Values are of the form + // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final + // segment of the name must be between 6 and 30 characters in length. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Required. The name of the instance's configuration. Values are of the form + // `projects//instanceConfigs/`. See + // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and + // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + Config string `protobuf:"bytes,2,opt,name=config" json:"config,omitempty"` + // Required. The descriptive name for this instance as it appears in UIs. + // Must be unique per project and between 4 and 30 characters in length. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Required. The number of nodes allocated to this instance. + NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount" json:"node_count,omitempty"` + // Output only. The current instance state. For + // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], the state must be + // either omitted or set to `CREATING`. For + // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be + // either omitted or set to `READY`. + State Instance_State `protobuf:"varint,6,opt,name=state,enum=google.spanner.admin.instance.v1.Instance_State" json:"state,omitempty"` + // Cloud Labels are a flexible and lightweight mechanism for organizing cloud + // resources into groups that reflect a customer's organizational needs and + // deployment strategies. Cloud Labels can be used to filter collections of + // resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, + // firewall, load balancing, etc.). + // + // * Label keys must be between 1 and 63 characters long and must conform to + // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. + // * Label values must be between 0 and 63 characters long and must conform + // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + // * No more than 64 labels can be associated with a given resource. + // + // See https://goo.gl/xmQnxf for more information on and examples of labels. + // + // If you plan to use labels in your own code, please note that additional + // characters may be allowed in the future. And so you are advised to use an + // internal label representation, such as JSON, which doesn't rely upon + // specific characters being disallowed. For example, representing labels + // as the string: name + "_" + value would prove problematic if we were to + // allow "_" in a future release. + Labels map[string]string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Instance) Reset() { *m = Instance{} } +func (m *Instance) String() string { return proto.CompactTextString(m) } +func (*Instance) ProtoMessage() {} +func (*Instance) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Instance) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Instance) GetConfig() string { + if m != nil { + return m.Config + } + return "" +} + +func (m *Instance) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Instance) GetNodeCount() int32 { + if m != nil { + return m.NodeCount + } + return 0 +} + +func (m *Instance) GetState() Instance_State { + if m != nil { + return m.State + } + return Instance_STATE_UNSPECIFIED +} + +func (m *Instance) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// The request for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. +type ListInstanceConfigsRequest struct { + // Required. The name of the project for which a list of supported instance + // configurations is requested. Values are of the form + // `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Number of instance configurations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token] + // from a previous [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListInstanceConfigsRequest) Reset() { *m = ListInstanceConfigsRequest{} } +func (m *ListInstanceConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstanceConfigsRequest) ProtoMessage() {} +func (*ListInstanceConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListInstanceConfigsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListInstanceConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInstanceConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. +type ListInstanceConfigsResponse struct { + // The list of requested instance configurations. + InstanceConfigs []*InstanceConfig `protobuf:"bytes,1,rep,name=instance_configs,json=instanceConfigs" json:"instance_configs,omitempty"` + // `next_page_token` can be sent in a subsequent + // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] call to + // fetch more of the matching instance configurations. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInstanceConfigsResponse) Reset() { *m = ListInstanceConfigsResponse{} } +func (m *ListInstanceConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstanceConfigsResponse) ProtoMessage() {} +func (*ListInstanceConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListInstanceConfigsResponse) GetInstanceConfigs() []*InstanceConfig { + if m != nil { + return m.InstanceConfigs + } + return nil +} + +func (m *ListInstanceConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request for +// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. +type GetInstanceConfigRequest struct { + // Required. The name of the requested instance configuration. Values are of + // the form `projects//instanceConfigs/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetInstanceConfigRequest) Reset() { *m = GetInstanceConfigRequest{} } +func (m *GetInstanceConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceConfigRequest) ProtoMessage() {} +func (*GetInstanceConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GetInstanceConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. +type GetInstanceRequest struct { + // Required. The name of the requested instance. Values are of the form + // `projects//instances/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetInstanceRequest) Reset() { *m = GetInstanceRequest{} } +func (m *GetInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceRequest) ProtoMessage() {} +func (*GetInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *GetInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. +type CreateInstanceRequest struct { + // Required. The name of the project in which to create the instance. Values + // are of the form `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The ID of the instance to create. Valid identifiers are of the + // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 6 and 30 characters in + // length. + InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId" json:"instance_id,omitempty"` + // Required. The instance to create. The name may be omitted, but if + // specified must be `/instances/`. + Instance *Instance `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` +} + +func (m *CreateInstanceRequest) Reset() { *m = CreateInstanceRequest{} } +func (m *CreateInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceRequest) ProtoMessage() {} +func (*CreateInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *CreateInstanceRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *CreateInstanceRequest) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +// The request for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. +type ListInstancesRequest struct { + // Required. The name of the project for which a list of instances is + // requested. Values are of the form `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Number of instances to be returned in the response. If 0 or less, defaults + // to the server's maximum allowed page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] from a + // previous [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // An expression for filtering the results of the request. Filter rules are + // case insensitive. The fields eligible for filtering are: + // + // * name + // * display_name + // * labels.key where key is the name of a label + // + // Some examples of using filters are: + // + // * name:* --> The instance has a name. + // * name:Howl --> The instance's name contains the string "howl". + // * name:HOWL --> Equivalent to above. + // * NAME:howl --> Equivalent to above. + // * labels.env:* --> The instance has the label "env". + // * labels.env:dev --> The instance has the label "env" and the value of + // the label contains the string "dev". + // * name:howl labels.env:dev --> The instance's name contains "howl" and + // it has the label "env" with its value + // containing "dev". + Filter string `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListInstancesRequest) Reset() { *m = ListInstancesRequest{} } +func (m *ListInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstancesRequest) ProtoMessage() {} +func (*ListInstancesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListInstancesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListInstancesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInstancesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListInstancesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// The response for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. +type ListInstancesResponse struct { + // The list of requested instances. + Instances []*Instance `protobuf:"bytes,1,rep,name=instances" json:"instances,omitempty"` + // `next_page_token` can be sent in a subsequent + // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] call to fetch more + // of the matching instances. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInstancesResponse) Reset() { *m = ListInstancesResponse{} } +func (m *ListInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstancesResponse) ProtoMessage() {} +func (*ListInstancesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ListInstancesResponse) GetInstances() []*Instance { + if m != nil { + return m.Instances + } + return nil +} + +func (m *ListInstancesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request for [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. +type UpdateInstanceRequest struct { + // Required. The instance to update, which must always include the instance + // name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. + Instance *Instance `protobuf:"bytes,1,opt,name=instance" json:"instance,omitempty"` + // Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated. + // The field mask must always be specified; this prevents any future fields in + // [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know + // about them. + FieldMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask" json:"field_mask,omitempty"` +} + +func (m *UpdateInstanceRequest) Reset() { *m = UpdateInstanceRequest{} } +func (m *UpdateInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceRequest) ProtoMessage() {} +func (*UpdateInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *UpdateInstanceRequest) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +func (m *UpdateInstanceRequest) GetFieldMask() *google_protobuf3.FieldMask { + if m != nil { + return m.FieldMask + } + return nil +} + +// The request for [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. +type DeleteInstanceRequest struct { + // Required. The name of the instance to be deleted. Values are of the form + // `projects//instances/` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteInstanceRequest) Reset() { *m = DeleteInstanceRequest{} } +func (m *DeleteInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteInstanceRequest) ProtoMessage() {} +func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *DeleteInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Metadata type for the operation returned by +// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. +type CreateInstanceMetadata struct { + // The instance being created. + Instance *Instance `protobuf:"bytes,1,opt,name=instance" json:"instance,omitempty"` + // The time at which the + // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] request was + // received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + CancelTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime" json:"cancel_time,omitempty"` + // The time at which this operation failed or was completed successfully. + EndTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` +} + +func (m *CreateInstanceMetadata) Reset() { *m = CreateInstanceMetadata{} } +func (m *CreateInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceMetadata) ProtoMessage() {} +func (*CreateInstanceMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *CreateInstanceMetadata) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +func (m *CreateInstanceMetadata) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *CreateInstanceMetadata) GetCancelTime() *google_protobuf4.Timestamp { + if m != nil { + return m.CancelTime + } + return nil +} + +func (m *CreateInstanceMetadata) GetEndTime() *google_protobuf4.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +// Metadata type for the operation returned by +// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. +type UpdateInstanceMetadata struct { + // The desired end state of the update. + Instance *Instance `protobuf:"bytes,1,opt,name=instance" json:"instance,omitempty"` + // The time at which [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance] + // request was received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + CancelTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime" json:"cancel_time,omitempty"` + // The time at which this operation failed or was completed successfully. + EndTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` +} + +func (m *UpdateInstanceMetadata) Reset() { *m = UpdateInstanceMetadata{} } +func (m *UpdateInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceMetadata) ProtoMessage() {} +func (*UpdateInstanceMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *UpdateInstanceMetadata) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +func (m *UpdateInstanceMetadata) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *UpdateInstanceMetadata) GetCancelTime() *google_protobuf4.Timestamp { + if m != nil { + return m.CancelTime + } + return nil +} + +func (m *UpdateInstanceMetadata) GetEndTime() *google_protobuf4.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func init() { + proto.RegisterType((*InstanceConfig)(nil), "google.spanner.admin.instance.v1.InstanceConfig") + proto.RegisterType((*Instance)(nil), "google.spanner.admin.instance.v1.Instance") + proto.RegisterType((*ListInstanceConfigsRequest)(nil), "google.spanner.admin.instance.v1.ListInstanceConfigsRequest") + proto.RegisterType((*ListInstanceConfigsResponse)(nil), "google.spanner.admin.instance.v1.ListInstanceConfigsResponse") + proto.RegisterType((*GetInstanceConfigRequest)(nil), "google.spanner.admin.instance.v1.GetInstanceConfigRequest") + proto.RegisterType((*GetInstanceRequest)(nil), "google.spanner.admin.instance.v1.GetInstanceRequest") + proto.RegisterType((*CreateInstanceRequest)(nil), "google.spanner.admin.instance.v1.CreateInstanceRequest") + proto.RegisterType((*ListInstancesRequest)(nil), "google.spanner.admin.instance.v1.ListInstancesRequest") + proto.RegisterType((*ListInstancesResponse)(nil), "google.spanner.admin.instance.v1.ListInstancesResponse") + proto.RegisterType((*UpdateInstanceRequest)(nil), "google.spanner.admin.instance.v1.UpdateInstanceRequest") + proto.RegisterType((*DeleteInstanceRequest)(nil), "google.spanner.admin.instance.v1.DeleteInstanceRequest") + proto.RegisterType((*CreateInstanceMetadata)(nil), "google.spanner.admin.instance.v1.CreateInstanceMetadata") + proto.RegisterType((*UpdateInstanceMetadata)(nil), "google.spanner.admin.instance.v1.UpdateInstanceMetadata") + proto.RegisterEnum("google.spanner.admin.instance.v1.Instance_State", Instance_State_name, Instance_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for InstanceAdmin service + +type InstanceAdminClient interface { + // Lists the supported instance configurations for a given project. + ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error) + // Gets information about a particular instance configuration. + GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error) + // Lists all instances in the given project. + ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) + // Gets information about a particular instance. + GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) + // Creates an instance and begins preparing it to begin serving. The + // returned [long-running operation][google.longrunning.Operation] + // can be used to track the progress of preparing the new + // instance. The instance name is assigned by the caller. If the + // named instance already exists, `CreateInstance` returns + // `ALREADY_EXISTS`. + // + // Immediately upon completion of this request: + // + // * The instance is readable via the API, with all requested attributes + // but no allocated resources. Its state is `CREATING`. + // + // Until completion of the returned operation: + // + // * Cancelling the operation renders the instance immediately unreadable + // via the API. + // * The instance can be deleted. + // * All other attempts to modify the instance are rejected. + // + // Upon completion of the returned operation: + // + // * Billing for all successfully-allocated resources begins (some types + // may have lower than the requested levels). + // * Databases can be created in the instance. + // * The instance's allocated resource levels are readable via the API. + // * The instance's state becomes `READY`. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track creation of the instance. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Updates an instance, and begins allocating or releasing resources + // as requested. The returned [long-running + // operation][google.longrunning.Operation] can be used to track the + // progress of updating the instance. If the named instance does not + // exist, returns `NOT_FOUND`. + // + // Immediately upon completion of this request: + // + // * For resource types for which a decrease in the instance's allocation + // has been requested, billing is based on the newly-requested level. + // + // Until completion of the returned operation: + // + // * Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + // restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, + // after which point it terminates with a `CANCELLED` status. + // * All other attempts to modify the instance are rejected. + // * Reading the instance via the API continues to give the pre-request + // resource levels. + // + // Upon completion of the returned operation: + // + // * Billing begins for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources are available for serving the instance's + // tables. + // * The instance's new resource levels are readable via the API. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track the instance modification. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + // + // Authorization requires `spanner.instances.update` permission on + // resource [name][google.spanner.admin.instance.v1.Instance.name]. + UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes an instance. + // + // Immediately upon completion of the request: + // + // * Billing ceases for all of the instance's reserved resources. + // + // Soon afterward: + // + // * The instance and *all of its databases* immediately and + // irrevocably disappear from the API. All data in the databases + // is permanently deleted. + DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Sets the access control policy on an instance resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.instances.setIamPolicy` on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Gets the access control policy for an instance resource. Returns an empty + // policy if an instance exists but does not have a policy set. + // + // Authorization requires `spanner.instances.getIamPolicy` on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified instance resource. + // + // Attempting this RPC on a non-existent Cloud Spanner instance resource will + // result in a NOT_FOUND error if the user has `spanner.instances.list` + // permission on the containing Google Cloud Project. Otherwise returns an + // empty set of permissions. + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +type instanceAdminClient struct { + cc *grpc.ClientConn +} + +func NewInstanceAdminClient(cc *grpc.ClientConn) InstanceAdminClient { + return &instanceAdminClient{cc} +} + +func (c *instanceAdminClient) ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error) { + out := new(ListInstanceConfigsResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error) { + out := new(InstanceConfig) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) { + out := new(ListInstancesResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) { + out := new(Instance) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for InstanceAdmin service + +type InstanceAdminServer interface { + // Lists the supported instance configurations for a given project. + ListInstanceConfigs(context.Context, *ListInstanceConfigsRequest) (*ListInstanceConfigsResponse, error) + // Gets information about a particular instance configuration. + GetInstanceConfig(context.Context, *GetInstanceConfigRequest) (*InstanceConfig, error) + // Lists all instances in the given project. + ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) + // Gets information about a particular instance. + GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) + // Creates an instance and begins preparing it to begin serving. The + // returned [long-running operation][google.longrunning.Operation] + // can be used to track the progress of preparing the new + // instance. The instance name is assigned by the caller. If the + // named instance already exists, `CreateInstance` returns + // `ALREADY_EXISTS`. + // + // Immediately upon completion of this request: + // + // * The instance is readable via the API, with all requested attributes + // but no allocated resources. Its state is `CREATING`. + // + // Until completion of the returned operation: + // + // * Cancelling the operation renders the instance immediately unreadable + // via the API. + // * The instance can be deleted. + // * All other attempts to modify the instance are rejected. + // + // Upon completion of the returned operation: + // + // * Billing for all successfully-allocated resources begins (some types + // may have lower than the requested levels). + // * Databases can be created in the instance. + // * The instance's allocated resource levels are readable via the API. + // * The instance's state becomes `READY`. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track creation of the instance. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + CreateInstance(context.Context, *CreateInstanceRequest) (*google_longrunning.Operation, error) + // Updates an instance, and begins allocating or releasing resources + // as requested. The returned [long-running + // operation][google.longrunning.Operation] can be used to track the + // progress of updating the instance. If the named instance does not + // exist, returns `NOT_FOUND`. + // + // Immediately upon completion of this request: + // + // * For resource types for which a decrease in the instance's allocation + // has been requested, billing is based on the newly-requested level. + // + // Until completion of the returned operation: + // + // * Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + // restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, + // after which point it terminates with a `CANCELLED` status. + // * All other attempts to modify the instance are rejected. + // * Reading the instance via the API continues to give the pre-request + // resource levels. + // + // Upon completion of the returned operation: + // + // * Billing begins for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources are available for serving the instance's + // tables. + // * The instance's new resource levels are readable via the API. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track the instance modification. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + // + // Authorization requires `spanner.instances.update` permission on + // resource [name][google.spanner.admin.instance.v1.Instance.name]. + UpdateInstance(context.Context, *UpdateInstanceRequest) (*google_longrunning.Operation, error) + // Deletes an instance. + // + // Immediately upon completion of the request: + // + // * Billing ceases for all of the instance's reserved resources. + // + // Soon afterward: + // + // * The instance and *all of its databases* immediately and + // irrevocably disappear from the API. All data in the databases + // is permanently deleted. + DeleteInstance(context.Context, *DeleteInstanceRequest) (*google_protobuf2.Empty, error) + // Sets the access control policy on an instance resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.instances.setIamPolicy` on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Gets the access control policy for an instance resource. Returns an empty + // policy if an instance exists but does not have a policy set. + // + // Authorization requires `spanner.instances.getIamPolicy` on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified instance resource. + // + // Attempting this RPC on a non-existent Cloud Spanner instance resource will + // result in a NOT_FOUND error if the user has `spanner.instances.list` + // permission on the containing Google Cloud Project. Otherwise returns an + // empty set of permissions. + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +func RegisterInstanceAdminServer(s *grpc.Server, srv InstanceAdminServer) { + s.RegisterService(&_InstanceAdmin_serviceDesc, srv) +} + +func _InstanceAdmin_ListInstanceConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstanceConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, req.(*ListInstanceConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_GetInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).GetInstanceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).GetInstanceConfig(ctx, req.(*GetInstanceConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstancesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).ListInstances(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).ListInstances(ctx, req.(*ListInstancesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).GetInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).GetInstance(ctx, req.(*GetInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).CreateInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).CreateInstance(ctx, req.(*CreateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_UpdateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).UpdateInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).UpdateInstance(ctx, req.(*UpdateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).DeleteInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _InstanceAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.spanner.admin.instance.v1.InstanceAdmin", + HandlerType: (*InstanceAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListInstanceConfigs", + Handler: _InstanceAdmin_ListInstanceConfigs_Handler, + }, + { + MethodName: "GetInstanceConfig", + Handler: _InstanceAdmin_GetInstanceConfig_Handler, + }, + { + MethodName: "ListInstances", + Handler: _InstanceAdmin_ListInstances_Handler, + }, + { + MethodName: "GetInstance", + Handler: _InstanceAdmin_GetInstance_Handler, + }, + { + MethodName: "CreateInstance", + Handler: _InstanceAdmin_CreateInstance_Handler, + }, + { + MethodName: "UpdateInstance", + Handler: _InstanceAdmin_UpdateInstance_Handler, + }, + { + MethodName: "DeleteInstance", + Handler: _InstanceAdmin_DeleteInstance_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _InstanceAdmin_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _InstanceAdmin_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _InstanceAdmin_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/spanner/admin/instance/v1/spanner_instance_admin.proto", +} + +func init() { + proto.RegisterFile("google/spanner/admin/instance/v1/spanner_instance_admin.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xff, 0x4e, 0x52, 0xa7, 0xf1, 0x73, 0x9a, 0xa6, 0xf3, 0xad, 0x23, 0xe3, 0x50, 0xea, 0x6e, + 0x51, 0x71, 0x0c, 0xf2, 0x52, 0x43, 0x9b, 0x36, 0x25, 0x87, 0xd4, 0x71, 0x52, 0x4b, 0x6d, 0x88, + 0xd6, 0xee, 0x01, 0x38, 0x58, 0x13, 0x7b, 0x62, 0x96, 0xec, 0xce, 0x2e, 0x3b, 0xe3, 0x88, 0x14, + 0xf5, 0x52, 0x71, 0x00, 0x09, 0xc4, 0x01, 0x09, 0xa1, 0x5e, 0x90, 0xb8, 0x22, 0x71, 0xe0, 0x5f, + 0xe0, 0x4f, 0xe0, 0x5f, 0x40, 0xfc, 0x1d, 0x68, 0x66, 0x77, 0x5c, 0xef, 0xda, 0x8e, 0xed, 0x8a, + 0x9e, 0xb8, 0xed, 0xbc, 0x9f, 0x9f, 0xf9, 0xbc, 0xb7, 0xef, 0xed, 0xc2, 0x56, 0xd7, 0xf3, 0xba, + 0x0e, 0x35, 0xb9, 0x4f, 0x18, 0xa3, 0x81, 0x49, 0x3a, 0xae, 0xcd, 0x4c, 0x9b, 0x71, 0x41, 0x58, + 0x9b, 0x9a, 0x27, 0x37, 0xb5, 0xa6, 0xa5, 0x65, 0x2d, 0x65, 0x52, 0xf6, 0x03, 0x4f, 0x78, 0xb8, + 0x10, 0xba, 0x97, 0x23, 0xa3, 0x72, 0xa8, 0xd3, 0xa6, 0xe5, 0x93, 0x9b, 0xf9, 0xd7, 0xa3, 0x04, + 0xc4, 0xb7, 0x4d, 0xc2, 0x98, 0x27, 0x88, 0xb0, 0x3d, 0xc6, 0x43, 0xff, 0x7c, 0x76, 0x50, 0xdb, + 0x13, 0x9f, 0x46, 0xe2, 0x37, 0x22, 0xb1, 0x4d, 0x5c, 0x09, 0xc1, 0x26, 0x6e, 0xcb, 0xf7, 0x1c, + 0xbb, 0x7d, 0x1a, 0xe9, 0xf3, 0x71, 0x7d, 0x4c, 0x77, 0x3d, 0xd2, 0x39, 0x1e, 0xeb, 0x06, 0x3d, + 0xc6, 0x6c, 0xd6, 0x35, 0x3d, 0x9f, 0x06, 0xb1, 0xbc, 0x6b, 0x91, 0x91, 0x3a, 0x1d, 0xf6, 0x8e, + 0x4c, 0xea, 0xfa, 0x42, 0x47, 0x28, 0x24, 0x95, 0x47, 0x36, 0x75, 0x3a, 0x2d, 0x97, 0xf0, 0xe3, + 0xc8, 0xe2, 0x6a, 0xd2, 0x42, 0xd8, 0x2e, 0xe5, 0x82, 0xb8, 0x7e, 0x68, 0x60, 0xec, 0xc1, 0x72, + 0x3d, 0x22, 0xa1, 0xea, 0xb1, 0x23, 0xbb, 0x8b, 0x31, 0x9c, 0x63, 0xc4, 0xa5, 0x39, 0x54, 0x40, + 0xc5, 0xb4, 0xa5, 0x9e, 0xf1, 0x35, 0x58, 0xea, 0xd8, 0xdc, 0x77, 0xc8, 0x69, 0x4b, 0xe9, 0xe6, + 0x94, 0x2e, 0x13, 0xc9, 0xf6, 0x89, 0x4b, 0x8d, 0xaf, 0xe6, 0x61, 0x51, 0x47, 0x1a, 0x19, 0x63, + 0x15, 0x16, 0xda, 0x2a, 0x43, 0xe4, 0x1d, 0x9d, 0x86, 0x62, 0xcf, 0x0f, 0xc5, 0xc6, 0x57, 0x00, + 0x98, 0xd7, 0xa1, 0xad, 0xb6, 0xd7, 0x63, 0x22, 0x97, 0x2a, 0xa0, 0x62, 0xca, 0x4a, 0x4b, 0x49, + 0x55, 0x0a, 0xf0, 0x2e, 0xa4, 0xb8, 0x20, 0x82, 0xe6, 0x16, 0x0a, 0xa8, 0xb8, 0x5c, 0x79, 0xb7, + 0x3c, 0xa9, 0xd6, 0x65, 0x0d, 0xb4, 0xdc, 0x90, 0x7e, 0x56, 0xe8, 0x8e, 0xf7, 0x61, 0xc1, 0x21, + 0x87, 0xd4, 0xe1, 0xb9, 0xf3, 0x85, 0xf9, 0x62, 0xa6, 0x72, 0x7b, 0x86, 0x40, 0x0f, 0x95, 0x63, + 0x8d, 0x89, 0xe0, 0xd4, 0x8a, 0xa2, 0xe4, 0xef, 0x42, 0x66, 0x40, 0x8c, 0x57, 0x60, 0xfe, 0x98, + 0x9e, 0x46, 0x9c, 0xc8, 0x47, 0x7c, 0x19, 0x52, 0x27, 0xc4, 0xe9, 0x69, 0x3e, 0xc3, 0xc3, 0xe6, + 0xdc, 0x1d, 0x64, 0x6c, 0x40, 0x4a, 0x41, 0xc3, 0x59, 0xb8, 0xd4, 0x68, 0x6e, 0x37, 0x6b, 0xad, + 0xc7, 0xfb, 0x8d, 0x83, 0x5a, 0xb5, 0xbe, 0x5b, 0xaf, 0xed, 0xac, 0xfc, 0x0f, 0x2f, 0xc1, 0x62, + 0xd5, 0xaa, 0x6d, 0x37, 0xeb, 0xfb, 0x7b, 0x2b, 0x08, 0xa7, 0x21, 0x65, 0xd5, 0xb6, 0x77, 0x3e, + 0x5a, 0x99, 0x33, 0x7c, 0xc8, 0x3f, 0xb4, 0xb9, 0x88, 0xd7, 0x94, 0x5b, 0xf4, 0xf3, 0x1e, 0xe5, + 0x42, 0xd6, 0xc0, 0x27, 0x01, 0x65, 0x22, 0x42, 0x11, 0x9d, 0xf0, 0x1a, 0xa4, 0x7d, 0xd2, 0xa5, + 0x2d, 0x6e, 0x3f, 0x09, 0xc1, 0xa4, 0xac, 0x45, 0x29, 0x68, 0xd8, 0x4f, 0x14, 0xfb, 0x4a, 0x29, + 0xbc, 0x63, 0xca, 0xa2, 0xf2, 0x28, 0xf3, 0xa6, 0x14, 0x18, 0xbf, 0x20, 0x58, 0x1b, 0x99, 0x92, + 0xfb, 0x1e, 0xe3, 0x14, 0x7f, 0x02, 0x2b, 0xfd, 0x37, 0x32, 0x2c, 0x39, 0xcf, 0x21, 0xc5, 0xef, + 0x0c, 0x85, 0x0a, 0x83, 0x5a, 0x17, 0xed, 0x78, 0x12, 0x7c, 0x03, 0x2e, 0x32, 0xfa, 0x85, 0x68, + 0x0d, 0x00, 0x0c, 0xb9, 0xbc, 0x20, 0xc5, 0x07, 0x7d, 0x90, 0x65, 0xc8, 0xed, 0xd1, 0x04, 0x44, + 0x4d, 0xca, 0x88, 0x66, 0x35, 0x8a, 0x80, 0x07, 0xec, 0xcf, 0xb2, 0xfc, 0x09, 0x41, 0xb6, 0x1a, + 0x50, 0x22, 0x68, 0xd2, 0x7a, 0x1c, 0xd9, 0x57, 0x21, 0xd3, 0x27, 0xc4, 0xee, 0x44, 0x78, 0x41, + 0x8b, 0xea, 0x1d, 0xbc, 0x0b, 0x8b, 0xfa, 0xa4, 0xe8, 0xce, 0x54, 0x4a, 0xd3, 0x33, 0x65, 0xf5, + 0x7d, 0x8d, 0x67, 0x08, 0x2e, 0x0f, 0x56, 0xe6, 0x55, 0xb6, 0x81, 0x8c, 0x79, 0x64, 0x3b, 0x82, + 0x06, 0xb9, 0x73, 0x61, 0xcc, 0xf0, 0x64, 0x7c, 0x83, 0x20, 0x9b, 0x00, 0x11, 0x35, 0xc6, 0x03, + 0x48, 0x6b, 0xa8, 0xba, 0x23, 0x66, 0xb9, 0xe7, 0x0b, 0xe7, 0xa9, 0xbb, 0xe0, 0x39, 0x82, 0xec, + 0x63, 0xbf, 0x33, 0xa2, 0x56, 0x83, 0x94, 0xa3, 0x97, 0xa7, 0x1c, 0xdf, 0x05, 0x78, 0x31, 0x83, + 0x15, 0x88, 0x4c, 0x25, 0xaf, 0x23, 0xe9, 0x21, 0x5c, 0xde, 0x95, 0x26, 0x8f, 0x08, 0x3f, 0xb6, + 0xd2, 0x47, 0xfa, 0xd1, 0x78, 0x1b, 0xb2, 0x3b, 0xd4, 0xa1, 0xc3, 0xd8, 0x46, 0x75, 0xdd, 0xf7, + 0x73, 0xb0, 0x1a, 0xef, 0xba, 0x47, 0x54, 0x90, 0x0e, 0x11, 0xe4, 0xdf, 0xbc, 0x0a, 0x17, 0x24, + 0x10, 0x2d, 0xb9, 0x32, 0xc6, 0x5e, 0xa5, 0xa9, 0xf7, 0x89, 0x95, 0x56, 0xd6, 0xf2, 0x8c, 0xef, + 0x41, 0xa6, 0x2d, 0x63, 0x38, 0xa1, 0xef, 0xfc, 0x44, 0x5f, 0x08, 0xcd, 0x95, 0xf3, 0x2d, 0x58, + 0xa4, 0xac, 0x13, 0x7a, 0x9e, 0x9b, 0xe8, 0x79, 0x9e, 0xb2, 0x8e, 0x3c, 0x29, 0x46, 0xe2, 0xb5, + 0xfd, 0x8f, 0x33, 0x52, 0xf9, 0x7b, 0x09, 0x2e, 0xe8, 0x5b, 0x6c, 0xcb, 0xfb, 0xe1, 0x3f, 0x10, + 0xfc, 0x7f, 0xc4, 0xa8, 0xc6, 0x1f, 0x4c, 0xa6, 0x63, 0xfc, 0x52, 0xc9, 0x6f, 0xbd, 0xa4, 0x77, + 0x38, 0x06, 0x0c, 0xf3, 0xd9, 0x9f, 0x7f, 0xfd, 0x30, 0xb7, 0x8e, 0xdf, 0x92, 0x1f, 0x48, 0x5f, + 0x86, 0x93, 0x68, 0xcb, 0x0f, 0xbc, 0xcf, 0x68, 0x5b, 0x70, 0xb3, 0xf4, 0xd4, 0x4c, 0xce, 0xfc, + 0xdf, 0x11, 0x5c, 0x1a, 0x1a, 0xe6, 0x78, 0x73, 0x32, 0x8a, 0x71, 0x1b, 0x20, 0x3f, 0xf3, 0x22, + 0x4a, 0x80, 0x96, 0xaf, 0xe4, 0x00, 0xe4, 0x24, 0x62, 0xb3, 0xf4, 0x14, 0xff, 0x8a, 0xe0, 0x42, + 0x6c, 0x0c, 0xe2, 0xdb, 0xb3, 0xd1, 0xd6, 0xa7, 0x7b, 0x63, 0x66, 0xbf, 0x88, 0xe8, 0x75, 0x85, + 0xf9, 0x3a, 0xbe, 0x36, 0x89, 0x68, 0x8e, 0x9f, 0x23, 0xc8, 0x0c, 0xb0, 0x85, 0xdf, 0x9f, 0x89, + 0x5c, 0x8d, 0x74, 0x86, 0xb7, 0x2c, 0x01, 0x6e, 0x1c, 0xa1, 0x8a, 0xca, 0x1f, 0x11, 0x2c, 0xc7, + 0x67, 0x1f, 0x9e, 0x82, 0x93, 0x91, 0x3b, 0x3a, 0x7f, 0x45, 0x3b, 0x0e, 0x7c, 0x84, 0x97, 0x3f, + 0xd4, 0x1f, 0xe1, 0xc6, 0x3b, 0x0a, 0xd5, 0x0d, 0x63, 0x32, 0x65, 0x9b, 0xa8, 0x84, 0x7f, 0x46, + 0xb0, 0x1c, 0x1f, 0x41, 0xd3, 0x00, 0x1b, 0xb9, 0x90, 0x26, 0x01, 0xbb, 0xa5, 0x80, 0x99, 0x95, + 0x92, 0x02, 0xd6, 0x0f, 0x77, 0x16, 0x6f, 0x12, 0xe1, 0xb7, 0x08, 0x96, 0xe3, 0x4b, 0x66, 0x1a, + 0x84, 0x23, 0xd7, 0x52, 0x7e, 0x75, 0x68, 0x06, 0xd5, 0xe4, 0xaf, 0x89, 0xae, 0x64, 0x69, 0x8a, + 0x4a, 0x7e, 0x8d, 0x60, 0xa9, 0x41, 0x45, 0x9d, 0xb8, 0x07, 0xea, 0xc7, 0x08, 0x1b, 0x3a, 0xa6, + 0x4d, 0x5c, 0x99, 0x79, 0x50, 0xa9, 0xf3, 0x66, 0x13, 0x36, 0xa1, 0xd6, 0xd8, 0x52, 0x69, 0x37, + 0x8c, 0x8a, 0x4a, 0x1b, 0x50, 0xee, 0xf5, 0x82, 0xf6, 0x78, 0x32, 0xf8, 0x40, 0x64, 0xc9, 0x8c, + 0x84, 0xb2, 0x77, 0x16, 0x94, 0xbd, 0x57, 0x06, 0xa5, 0x9b, 0x80, 0xf2, 0x1b, 0x02, 0xdc, 0xa4, + 0x5c, 0x09, 0x69, 0xe0, 0xda, 0x9c, 0xcb, 0xff, 0x41, 0x5c, 0x4c, 0x24, 0x1b, 0x36, 0xd1, 0xb0, + 0xd6, 0xa7, 0xb0, 0x8c, 0x66, 0x42, 0x55, 0x41, 0xdd, 0x32, 0xee, 0x4c, 0x07, 0x55, 0x0c, 0x45, + 0xda, 0x44, 0xa5, 0xfb, 0xdf, 0x21, 0x78, 0xb3, 0xed, 0xb9, 0x13, 0x1b, 0xe9, 0xfe, 0x6b, 0x8d, + 0x50, 0x15, 0xdb, 0x4a, 0x07, 0xb2, 0x7d, 0x0e, 0xd0, 0xc7, 0x0f, 0x22, 0xf7, 0xae, 0xe7, 0x10, + 0xd6, 0x2d, 0x7b, 0x41, 0xd7, 0xec, 0x52, 0xa6, 0x9a, 0xcb, 0x0c, 0x55, 0xc4, 0xb7, 0xf9, 0xf8, + 0xff, 0xff, 0x7b, 0xfa, 0xf9, 0x70, 0x41, 0x39, 0xbd, 0xf7, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x22, 0x58, 0xb5, 0xfa, 0x33, 0x10, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go new file mode 100644 index 000000000..8544b3174 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go @@ -0,0 +1,438 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/v1/keys.proto +// DO NOT EDIT! + +/* +Package spanner is a generated protocol buffer package. + +It is generated from these files: + google/spanner/v1/keys.proto + google/spanner/v1/mutation.proto + google/spanner/v1/query_plan.proto + google/spanner/v1/result_set.proto + google/spanner/v1/spanner.proto + google/spanner/v1/transaction.proto + google/spanner/v1/type.proto + +It has these top-level messages: + KeyRange + KeySet + Mutation + PlanNode + QueryPlan + ResultSet + PartialResultSet + ResultSetMetadata + ResultSetStats + CreateSessionRequest + Session + GetSessionRequest + DeleteSessionRequest + ExecuteSqlRequest + ReadRequest + BeginTransactionRequest + CommitRequest + CommitResponse + RollbackRequest + TransactionOptions + Transaction + TransactionSelector + Type + StructType +*/ +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// KeyRange represents a range of rows in a table or index. +// +// A range has a start key and an end key. These keys can be open or +// closed, indicating if the range includes rows with that key. +// +// Keys are represented by lists, where the ith value in the list +// corresponds to the ith component of the table or index primary key. +// Individual values are encoded as described [here][google.spanner.v1.TypeCode]. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10) +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// ["Bob", "2014-09-23"] +// ["Alfred", "2015-06-12"] +// +// Since the `UserEvents` table's `PRIMARY KEY` clause names two +// columns, each `UserEvents` key has two elements; the first is the +// `UserName`, and the second is the `EventDate`. +// +// Key ranges with multiple components are interpreted +// lexicographically by component using the table or index key's declared +// sort order. For example, the following range returns all events for +// user `"Bob"` that occurred in the year 2015: +// +// "start_closed": ["Bob", "2015-01-01"] +// "end_closed": ["Bob", "2015-12-31"] +// +// Start and end keys can omit trailing key components. This affects the +// inclusion and exclusion of rows that exactly match the provided key +// components: if the key is closed, then rows that exactly match the +// provided components are included; if the key is open, then rows +// that exactly match are not included. +// +// For example, the following range includes all events for `"Bob"` that +// occurred during and after the year 2000: +// +// "start_closed": ["Bob", "2000-01-01"] +// "end_closed": ["Bob"] +// +// The next example retrieves all events for `"Bob"`: +// +// "start_closed": ["Bob"] +// "end_closed": ["Bob"] +// +// To retrieve events before the year 2000: +// +// "start_closed": ["Bob"] +// "end_open": ["Bob", "2000-01-01"] +// +// The following range includes all rows in the table: +// +// "start_closed": [] +// "end_closed": [] +// +// This range returns all users whose `UserName` begins with any +// character from A to C: +// +// "start_closed": ["A"] +// "end_open": ["D"] +// +// This range returns all users whose `UserName` begins with B: +// +// "start_closed": ["B"] +// "end_open": ["C"] +// +// Key ranges honor column sort order. For example, suppose a table is +// defined as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 +// and 100 inclusive: +// +// "start_closed": ["100"] +// "end_closed": ["1"] +// +// Note that 100 is passed as the start, and 1 is passed as the end, +// because `Key` is a descending column in the schema. +type KeyRange struct { + // The start key must be provided. It can be either closed or open. + // + // Types that are valid to be assigned to StartKeyType: + // *KeyRange_StartClosed + // *KeyRange_StartOpen + StartKeyType isKeyRange_StartKeyType `protobuf_oneof:"start_key_type"` + // The end key must be provided. It can be either closed or open. + // + // Types that are valid to be assigned to EndKeyType: + // *KeyRange_EndClosed + // *KeyRange_EndOpen + EndKeyType isKeyRange_EndKeyType `protobuf_oneof:"end_key_type"` +} + +func (m *KeyRange) Reset() { *m = KeyRange{} } +func (m *KeyRange) String() string { return proto.CompactTextString(m) } +func (*KeyRange) ProtoMessage() {} +func (*KeyRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isKeyRange_StartKeyType interface { + isKeyRange_StartKeyType() +} +type isKeyRange_EndKeyType interface { + isKeyRange_EndKeyType() +} + +type KeyRange_StartClosed struct { + StartClosed *google_protobuf1.ListValue `protobuf:"bytes,1,opt,name=start_closed,json=startClosed,oneof"` +} +type KeyRange_StartOpen struct { + StartOpen *google_protobuf1.ListValue `protobuf:"bytes,2,opt,name=start_open,json=startOpen,oneof"` +} +type KeyRange_EndClosed struct { + EndClosed *google_protobuf1.ListValue `protobuf:"bytes,3,opt,name=end_closed,json=endClosed,oneof"` +} +type KeyRange_EndOpen struct { + EndOpen *google_protobuf1.ListValue `protobuf:"bytes,4,opt,name=end_open,json=endOpen,oneof"` +} + +func (*KeyRange_StartClosed) isKeyRange_StartKeyType() {} +func (*KeyRange_StartOpen) isKeyRange_StartKeyType() {} +func (*KeyRange_EndClosed) isKeyRange_EndKeyType() {} +func (*KeyRange_EndOpen) isKeyRange_EndKeyType() {} + +func (m *KeyRange) GetStartKeyType() isKeyRange_StartKeyType { + if m != nil { + return m.StartKeyType + } + return nil +} +func (m *KeyRange) GetEndKeyType() isKeyRange_EndKeyType { + if m != nil { + return m.EndKeyType + } + return nil +} + +func (m *KeyRange) GetStartClosed() *google_protobuf1.ListValue { + if x, ok := m.GetStartKeyType().(*KeyRange_StartClosed); ok { + return x.StartClosed + } + return nil +} + +func (m *KeyRange) GetStartOpen() *google_protobuf1.ListValue { + if x, ok := m.GetStartKeyType().(*KeyRange_StartOpen); ok { + return x.StartOpen + } + return nil +} + +func (m *KeyRange) GetEndClosed() *google_protobuf1.ListValue { + if x, ok := m.GetEndKeyType().(*KeyRange_EndClosed); ok { + return x.EndClosed + } + return nil +} + +func (m *KeyRange) GetEndOpen() *google_protobuf1.ListValue { + if x, ok := m.GetEndKeyType().(*KeyRange_EndOpen); ok { + return x.EndOpen + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*KeyRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _KeyRange_OneofMarshaler, _KeyRange_OneofUnmarshaler, _KeyRange_OneofSizer, []interface{}{ + (*KeyRange_StartClosed)(nil), + (*KeyRange_StartOpen)(nil), + (*KeyRange_EndClosed)(nil), + (*KeyRange_EndOpen)(nil), + } +} + +func _KeyRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*KeyRange) + // start_key_type + switch x := m.StartKeyType.(type) { + case *KeyRange_StartClosed: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartClosed); err != nil { + return err + } + case *KeyRange_StartOpen: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartOpen); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("KeyRange.StartKeyType has unexpected type %T", x) + } + // end_key_type + switch x := m.EndKeyType.(type) { + case *KeyRange_EndClosed: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndClosed); err != nil { + return err + } + case *KeyRange_EndOpen: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndOpen); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("KeyRange.EndKeyType has unexpected type %T", x) + } + return nil +} + +func _KeyRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*KeyRange) + switch tag { + case 1: // start_key_type.start_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.StartKeyType = &KeyRange_StartClosed{msg} + return true, err + case 2: // start_key_type.start_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.StartKeyType = &KeyRange_StartOpen{msg} + return true, err + case 3: // end_key_type.end_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.EndKeyType = &KeyRange_EndClosed{msg} + return true, err + case 4: // end_key_type.end_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.EndKeyType = &KeyRange_EndOpen{msg} + return true, err + default: + return false, nil + } +} + +func _KeyRange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*KeyRange) + // start_key_type + switch x := m.StartKeyType.(type) { + case *KeyRange_StartClosed: + s := proto.Size(x.StartClosed) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *KeyRange_StartOpen: + s := proto.Size(x.StartOpen) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // end_key_type + switch x := m.EndKeyType.(type) { + case *KeyRange_EndClosed: + s := proto.Size(x.EndClosed) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *KeyRange_EndOpen: + s := proto.Size(x.EndOpen) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All +// the keys are expected to be in the same table or index. The keys need +// not be sorted in any particular way. +// +// If the same key is specified multiple times in the set (for example +// if two ranges, two keys, or a key and a range overlap), Cloud Spanner +// behaves as if the key were only specified once. +type KeySet struct { + // A list of specific keys. Entries in `keys` should have exactly as + // many elements as there are columns in the primary or index key + // with which this `KeySet` is used. Individual key values are + // encoded as described [here][google.spanner.v1.TypeCode]. + Keys []*google_protobuf1.ListValue `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` + // A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more information about + // key range specifications. + Ranges []*KeyRange `protobuf:"bytes,2,rep,name=ranges" json:"ranges,omitempty"` + // For convenience `all` can be set to `true` to indicate that this + // `KeySet` matches all keys in the table or index. Note that any keys + // specified in `keys` or `ranges` are only yielded once. + All bool `protobuf:"varint,3,opt,name=all" json:"all,omitempty"` +} + +func (m *KeySet) Reset() { *m = KeySet{} } +func (m *KeySet) String() string { return proto.CompactTextString(m) } +func (*KeySet) ProtoMessage() {} +func (*KeySet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *KeySet) GetKeys() []*google_protobuf1.ListValue { + if m != nil { + return m.Keys + } + return nil +} + +func (m *KeySet) GetRanges() []*KeyRange { + if m != nil { + return m.Ranges + } + return nil +} + +func (m *KeySet) GetAll() bool { + if m != nil { + return m.All + } + return false +} + +func init() { + proto.RegisterType((*KeyRange)(nil), "google.spanner.v1.KeyRange") + proto.RegisterType((*KeySet)(nil), "google.spanner.v1.KeySet") +} + +func init() { proto.RegisterFile("google/spanner/v1/keys.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x4b, 0xc3, 0x30, + 0x14, 0xc6, 0xed, 0x36, 0xe6, 0x96, 0x8d, 0x31, 0x0b, 0x42, 0x99, 0x1e, 0xc6, 0x4e, 0x9e, 0x12, + 0xe6, 0x0e, 0x0a, 0x3b, 0x08, 0xf5, 0x22, 0x4c, 0x70, 0x54, 0xf0, 0xe0, 0xc1, 0x91, 0xad, 0xcf, + 0x50, 0x56, 0x5f, 0x42, 0x93, 0x0d, 0x7a, 0xf2, 0x5f, 0xf1, 0x4f, 0x95, 0xa4, 0xa9, 0x08, 0x03, + 0xdd, 0x2d, 0x8f, 0xef, 0xfb, 0xbd, 0xef, 0xf5, 0xbd, 0x92, 0x4b, 0x21, 0xa5, 0xc8, 0x81, 0x69, + 0xc5, 0x11, 0xa1, 0x60, 0xfb, 0x29, 0xdb, 0x42, 0xa9, 0xa9, 0x2a, 0xa4, 0x91, 0xe1, 0x59, 0xa5, + 0x52, 0xaf, 0xd2, 0xfd, 0x74, 0x54, 0x03, 0x5c, 0x65, 0x8c, 0x23, 0x4a, 0xc3, 0x4d, 0x26, 0xd1, + 0x03, 0x3f, 0xaa, 0xab, 0xd6, 0xbb, 0x77, 0xa6, 0x4d, 0xb1, 0xdb, 0x98, 0x4a, 0x9d, 0x7c, 0x35, + 0x48, 0x67, 0x01, 0x65, 0xc2, 0x51, 0x40, 0x78, 0x47, 0xfa, 0xda, 0xf0, 0xc2, 0xac, 0x36, 0xb9, + 0xd4, 0x90, 0x46, 0xc1, 0x38, 0xb8, 0xea, 0x5d, 0x8f, 0xa8, 0x8f, 0xac, 0x3b, 0xd0, 0xc7, 0x4c, + 0x9b, 0x17, 0x9e, 0xef, 0xe0, 0xe1, 0x24, 0xe9, 0x39, 0xe2, 0xde, 0x01, 0xe1, 0x9c, 0x90, 0xaa, + 0x81, 0x54, 0x80, 0x51, 0xe3, 0x08, 0xbc, 0xeb, 0xfc, 0x4f, 0x0a, 0xd0, 0xc2, 0x80, 0x69, 0x9d, + 0xdd, 0xfc, 0x17, 0x0e, 0x92, 0x2e, 0x60, 0xea, 0x93, 0x6f, 0x48, 0xc7, 0xc2, 0x2e, 0xb7, 0x75, + 0x04, 0x7a, 0x0a, 0x98, 0xda, 0xd4, 0x78, 0x48, 0x06, 0xd5, 0xc8, 0x5b, 0x28, 0x57, 0xa6, 0x54, + 0x10, 0x0f, 0x48, 0xdf, 0xb6, 0xaa, 0xeb, 0xc9, 0x27, 0x69, 0x2f, 0xa0, 0x7c, 0x06, 0x13, 0x52, + 0xd2, 0xb2, 0x97, 0x88, 0x82, 0x71, 0xf3, 0xef, 0x80, 0xc4, 0xf9, 0xc2, 0x19, 0x69, 0x17, 0x76, + 0xb1, 0x3a, 0x6a, 0x38, 0xe2, 0x82, 0x1e, 0x1c, 0x8f, 0xd6, 0xcb, 0x4f, 0xbc, 0x35, 0x1c, 0x92, + 0x26, 0xcf, 0x73, 0xf7, 0xfd, 0x9d, 0xc4, 0x3e, 0xe3, 0x37, 0x72, 0xbe, 0x91, 0x1f, 0x87, 0x6c, + 0xdc, 0x5d, 0x40, 0xa9, 0x97, 0x36, 0x7d, 0x19, 0xbc, 0xde, 0x7a, 0x5d, 0xc8, 0x9c, 0xa3, 0xa0, + 0xb2, 0x10, 0x4c, 0x00, 0xba, 0xd9, 0x58, 0x25, 0x71, 0x95, 0xe9, 0x5f, 0x7f, 0xd5, 0xdc, 0x3f, + 0xd7, 0x6d, 0x67, 0x9a, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0x83, 0x58, 0x82, 0xb7, 0x79, 0x02, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go new file mode 100644 index 000000000..0147934cc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go @@ -0,0 +1,346 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/v1/mutation.proto +// DO NOT EDIT! + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A modification to one or more Cloud Spanner rows. Mutations can be +// applied to a Cloud Spanner database by sending them in a +// [Commit][google.spanner.v1.Spanner.Commit] call. +type Mutation struct { + // Required. The operation to perform. + // + // Types that are valid to be assigned to Operation: + // *Mutation_Insert + // *Mutation_Update + // *Mutation_InsertOrUpdate + // *Mutation_Replace + // *Mutation_Delete_ + Operation isMutation_Operation `protobuf_oneof:"operation"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +type isMutation_Operation interface { + isMutation_Operation() +} + +type Mutation_Insert struct { + Insert *Mutation_Write `protobuf:"bytes,1,opt,name=insert,oneof"` +} +type Mutation_Update struct { + Update *Mutation_Write `protobuf:"bytes,2,opt,name=update,oneof"` +} +type Mutation_InsertOrUpdate struct { + InsertOrUpdate *Mutation_Write `protobuf:"bytes,3,opt,name=insert_or_update,json=insertOrUpdate,oneof"` +} +type Mutation_Replace struct { + Replace *Mutation_Write `protobuf:"bytes,4,opt,name=replace,oneof"` +} +type Mutation_Delete_ struct { + Delete *Mutation_Delete `protobuf:"bytes,5,opt,name=delete,oneof"` +} + +func (*Mutation_Insert) isMutation_Operation() {} +func (*Mutation_Update) isMutation_Operation() {} +func (*Mutation_InsertOrUpdate) isMutation_Operation() {} +func (*Mutation_Replace) isMutation_Operation() {} +func (*Mutation_Delete_) isMutation_Operation() {} + +func (m *Mutation) GetOperation() isMutation_Operation { + if m != nil { + return m.Operation + } + return nil +} + +func (m *Mutation) GetInsert() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_Insert); ok { + return x.Insert + } + return nil +} + +func (m *Mutation) GetUpdate() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_Update); ok { + return x.Update + } + return nil +} + +func (m *Mutation) GetInsertOrUpdate() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_InsertOrUpdate); ok { + return x.InsertOrUpdate + } + return nil +} + +func (m *Mutation) GetReplace() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_Replace); ok { + return x.Replace + } + return nil +} + +func (m *Mutation) GetDelete() *Mutation_Delete { + if x, ok := m.GetOperation().(*Mutation_Delete_); ok { + return x.Delete + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, _Mutation_OneofSizer, []interface{}{ + (*Mutation_Insert)(nil), + (*Mutation_Update)(nil), + (*Mutation_InsertOrUpdate)(nil), + (*Mutation_Replace)(nil), + (*Mutation_Delete_)(nil), + } +} + +func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Insert); err != nil { + return err + } + case *Mutation_Update: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *Mutation_InsertOrUpdate: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InsertOrUpdate); err != nil { + return err + } + case *Mutation_Replace: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Replace); err != nil { + return err + } + case *Mutation_Delete_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Delete); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Mutation.Operation has unexpected type %T", x) + } + return nil +} + +func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Mutation) + switch tag { + case 1: // operation.insert + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Insert{msg} + return true, err + case 2: // operation.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Update{msg} + return true, err + case 3: // operation.insert_or_update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_InsertOrUpdate{msg} + return true, err + case 4: // operation.replace + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Replace{msg} + return true, err + case 5: // operation.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Delete) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Delete_{msg} + return true, err + default: + return false, nil + } +} + +func _Mutation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + s := proto.Size(x.Insert) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_InsertOrUpdate: + s := proto.Size(x.InsertOrUpdate) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Replace: + s := proto.Size(x.Replace) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Delete_: + s := proto.Size(x.Delete) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Arguments to [insert][google.spanner.v1.Mutation.insert], [update][google.spanner.v1.Mutation.update], [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and +// [replace][google.spanner.v1.Mutation.replace] operations. +type Mutation_Write struct { + // Required. The table whose rows will be written. + Table string `protobuf:"bytes,1,opt,name=table" json:"table,omitempty"` + // The names of the columns in [table][google.spanner.v1.Mutation.Write.table] to be written. + // + // The list of columns must contain enough columns to allow + // Cloud Spanner to derive values for all primary key columns in the + // row(s) to be modified. + Columns []string `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"` + // The values to be written. `values` can contain more than one + // list of values. If it does, then multiple rows are written, one + // for each entry in `values`. Each list in `values` must have + // exactly as many entries as there are entries in [columns][google.spanner.v1.Mutation.Write.columns] + // above. Sending multiple lists is equivalent to sending multiple + // `Mutation`s, each containing one `values` entry and repeating + // [table][google.spanner.v1.Mutation.Write.table] and [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in each list are + // encoded as described [here][google.spanner.v1.TypeCode]. + Values []*google_protobuf1.ListValue `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` +} + +func (m *Mutation_Write) Reset() { *m = Mutation_Write{} } +func (m *Mutation_Write) String() string { return proto.CompactTextString(m) } +func (*Mutation_Write) ProtoMessage() {} +func (*Mutation_Write) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +func (m *Mutation_Write) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *Mutation_Write) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +func (m *Mutation_Write) GetValues() []*google_protobuf1.ListValue { + if m != nil { + return m.Values + } + return nil +} + +// Arguments to [delete][google.spanner.v1.Mutation.delete] operations. +type Mutation_Delete struct { + // Required. The table whose rows will be deleted. + Table string `protobuf:"bytes,1,opt,name=table" json:"table,omitempty"` + // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. + KeySet *KeySet `protobuf:"bytes,2,opt,name=key_set,json=keySet" json:"key_set,omitempty"` +} + +func (m *Mutation_Delete) Reset() { *m = Mutation_Delete{} } +func (m *Mutation_Delete) String() string { return proto.CompactTextString(m) } +func (*Mutation_Delete) ProtoMessage() {} +func (*Mutation_Delete) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 1} } + +func (m *Mutation_Delete) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *Mutation_Delete) GetKeySet() *KeySet { + if m != nil { + return m.KeySet + } + return nil +} + +func init() { + proto.RegisterType((*Mutation)(nil), "google.spanner.v1.Mutation") + proto.RegisterType((*Mutation_Write)(nil), "google.spanner.v1.Mutation.Write") + proto.RegisterType((*Mutation_Delete)(nil), "google.spanner.v1.Mutation.Delete") +} + +func init() { proto.RegisterFile("google/spanner/v1/mutation.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 382 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xd1, 0x4b, 0xeb, 0x30, + 0x14, 0xc6, 0xef, 0xd6, 0xad, 0xbb, 0xcb, 0xb8, 0x17, 0x2d, 0x0a, 0xb5, 0xf8, 0x50, 0xf7, 0xb4, + 0xa7, 0x94, 0xd5, 0x17, 0x61, 0xfa, 0x32, 0x7c, 0x10, 0x74, 0x38, 0x2a, 0x2a, 0xf8, 0x32, 0xd2, + 0xee, 0x58, 0x4a, 0xbb, 0xa4, 0x24, 0xe9, 0x60, 0xff, 0x8c, 0x7f, 0xab, 0x34, 0x49, 0x65, 0x38, + 0x95, 0xf9, 0xd4, 0x1e, 0xce, 0xf7, 0xfb, 0xce, 0x77, 0x92, 0x20, 0x3f, 0x65, 0x2c, 0x2d, 0x20, + 0x10, 0x25, 0xa1, 0x14, 0x78, 0xb0, 0x1e, 0x07, 0xab, 0x4a, 0x12, 0x99, 0x31, 0x8a, 0x4b, 0xce, + 0x24, 0x73, 0x0e, 0xb5, 0x02, 0x1b, 0x05, 0x5e, 0x8f, 0xbd, 0x53, 0x03, 0x91, 0x32, 0x0b, 0x08, + 0xa5, 0x4c, 0xeb, 0x85, 0x06, 0x3e, 0xba, 0xaa, 0x8a, 0xab, 0xd7, 0x40, 0x48, 0x5e, 0x25, 0xf2, + 0x53, 0x77, 0x6b, 0x60, 0x0e, 0x1b, 0xc3, 0x0e, 0xdf, 0x3a, 0xe8, 0xef, 0xcc, 0xcc, 0x77, 0x26, + 0xc8, 0xce, 0xa8, 0x00, 0x2e, 0xdd, 0x96, 0xdf, 0x1a, 0x0d, 0xc2, 0x33, 0xbc, 0x13, 0x05, 0x37, + 0x62, 0xfc, 0xcc, 0x33, 0x09, 0x37, 0x7f, 0x22, 0x83, 0xd4, 0x70, 0x55, 0x2e, 0x89, 0x04, 0xb7, + 0xfd, 0x0b, 0x58, 0x23, 0xce, 0x0c, 0x1d, 0x68, 0x9b, 0x05, 0xe3, 0x0b, 0x63, 0x63, 0xed, 0x6f, + 0xf3, 0x5f, 0xc3, 0xf7, 0xfc, 0x51, 0xdb, 0x5d, 0xa1, 0x1e, 0x87, 0xb2, 0x20, 0x09, 0xb8, 0x9d, + 0xfd, 0x5d, 0x1a, 0xc6, 0xb9, 0x44, 0xf6, 0x12, 0x0a, 0x90, 0xe0, 0x76, 0x15, 0x3d, 0xfc, 0x89, + 0xbe, 0x56, 0xca, 0x7a, 0x17, 0xcd, 0x78, 0x39, 0xea, 0x2a, 0x47, 0xe7, 0x08, 0x75, 0x25, 0x89, + 0x0b, 0x50, 0xa7, 0xd9, 0x8f, 0x74, 0xe1, 0xb8, 0xa8, 0x97, 0xb0, 0xa2, 0x5a, 0x51, 0xe1, 0xb6, + 0x7d, 0x6b, 0xd4, 0x8f, 0x9a, 0xd2, 0x09, 0x91, 0xbd, 0x26, 0x45, 0x05, 0xc2, 0xb5, 0x7c, 0x6b, + 0x34, 0x08, 0xbd, 0x66, 0x6c, 0x73, 0xb1, 0xf8, 0x2e, 0x13, 0xf2, 0xa9, 0x96, 0x44, 0x46, 0xe9, + 0x45, 0xc8, 0xd6, 0x01, 0xbe, 0x99, 0x16, 0xa2, 0x5e, 0x0e, 0x9b, 0x85, 0x00, 0x69, 0xae, 0xe5, + 0xe4, 0x8b, 0x5d, 0x6e, 0x61, 0xf3, 0x00, 0x32, 0xb2, 0x73, 0xf5, 0x9d, 0x0e, 0x50, 0x9f, 0x95, + 0xc0, 0xd5, 0x7a, 0xd3, 0x18, 0x1d, 0x27, 0x6c, 0xb5, 0x0b, 0x4d, 0xff, 0x35, 0x27, 0x30, 0xaf, + 0xd3, 0xcd, 0x5b, 0x2f, 0x17, 0x46, 0x93, 0xb2, 0x82, 0xd0, 0x14, 0x33, 0x9e, 0x06, 0x29, 0x50, + 0x95, 0x3d, 0xd0, 0x2d, 0x52, 0x66, 0x62, 0xeb, 0x1d, 0x4e, 0xcc, 0x6f, 0x6c, 0x2b, 0xd1, 0xf9, + 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x7c, 0xdc, 0xc0, 0x1c, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go new file mode 100644 index 000000000..98e7f8321 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go @@ -0,0 +1,285 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/v1/query_plan.proto +// DO NOT EDIT! + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between the two different kinds of +// nodes that can appear in a query plan. +type PlanNode_Kind int32 + +const ( + // Not specified. + PlanNode_KIND_UNSPECIFIED PlanNode_Kind = 0 + // Denotes a Relational operator node in the expression tree. Relational + // operators represent iterative processing of rows during query execution. + // For example, a `TableScan` operation that reads rows from a table. + PlanNode_RELATIONAL PlanNode_Kind = 1 + // Denotes a Scalar node in the expression tree. Scalar nodes represent + // non-iterable entities in the query plan. For example, constants or + // arithmetic operators appearing inside predicate expressions or references + // to column names. + PlanNode_SCALAR PlanNode_Kind = 2 +) + +var PlanNode_Kind_name = map[int32]string{ + 0: "KIND_UNSPECIFIED", + 1: "RELATIONAL", + 2: "SCALAR", +} +var PlanNode_Kind_value = map[string]int32{ + "KIND_UNSPECIFIED": 0, + "RELATIONAL": 1, + "SCALAR": 2, +} + +func (x PlanNode_Kind) String() string { + return proto.EnumName(PlanNode_Kind_name, int32(x)) +} +func (PlanNode_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +// Node information for nodes appearing in a [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes]. +type PlanNode struct { + // The `PlanNode`'s index in [node list][google.spanner.v1.QueryPlan.plan_nodes]. + Index int32 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"` + // Used to determine the type of node. May be needed for visualizing + // different kinds of nodes differently. For example, If the node is a + // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a condensed representation + // which can be used to directly embed a description of the node in its + // parent. + Kind PlanNode_Kind `protobuf:"varint,2,opt,name=kind,enum=google.spanner.v1.PlanNode_Kind" json:"kind,omitempty"` + // The display name for the node. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // List of child node `index`es and their relationship to this parent. + ChildLinks []*PlanNode_ChildLink `protobuf:"bytes,4,rep,name=child_links,json=childLinks" json:"child_links,omitempty"` + // Condensed representation for [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes. + ShortRepresentation *PlanNode_ShortRepresentation `protobuf:"bytes,5,opt,name=short_representation,json=shortRepresentation" json:"short_representation,omitempty"` + // Attributes relevant to the node contained in a group of key-value pairs. + // For example, a Parameter Reference node could have the following + // information in its metadata: + // + // { + // "parameter_reference": "param1", + // "parameter_type": "array" + // } + Metadata *google_protobuf1.Struct `protobuf:"bytes,6,opt,name=metadata" json:"metadata,omitempty"` + // The execution statistics associated with the node, contained in a group of + // key-value pairs. Only present if the plan was returned as a result of a + // profile query. For example, number of executions, number of rows/time per + // execution etc. + ExecutionStats *google_protobuf1.Struct `protobuf:"bytes,7,opt,name=execution_stats,json=executionStats" json:"execution_stats,omitempty"` +} + +func (m *PlanNode) Reset() { *m = PlanNode{} } +func (m *PlanNode) String() string { return proto.CompactTextString(m) } +func (*PlanNode) ProtoMessage() {} +func (*PlanNode) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *PlanNode) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *PlanNode) GetKind() PlanNode_Kind { + if m != nil { + return m.Kind + } + return PlanNode_KIND_UNSPECIFIED +} + +func (m *PlanNode) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *PlanNode) GetChildLinks() []*PlanNode_ChildLink { + if m != nil { + return m.ChildLinks + } + return nil +} + +func (m *PlanNode) GetShortRepresentation() *PlanNode_ShortRepresentation { + if m != nil { + return m.ShortRepresentation + } + return nil +} + +func (m *PlanNode) GetMetadata() *google_protobuf1.Struct { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PlanNode) GetExecutionStats() *google_protobuf1.Struct { + if m != nil { + return m.ExecutionStats + } + return nil +} + +// Metadata associated with a parent-child relationship appearing in a +// [PlanNode][google.spanner.v1.PlanNode]. +type PlanNode_ChildLink struct { + // The node to which the link points. + ChildIndex int32 `protobuf:"varint,1,opt,name=child_index,json=childIndex" json:"child_index,omitempty"` + // The type of the link. For example, in Hash Joins this could be used to + // distinguish between the build child and the probe child, or in the case + // of the child being an output variable, to represent the tag associated + // with the output variable. + Type string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` + // Only present if the child node is [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds + // to an output variable of the parent node. The field carries the name of + // the output variable. + // For example, a `TableScan` operator that reads rows from a table will + // have child links to the `SCALAR` nodes representing the output variables + // created for each column that is read by the operator. The corresponding + // `variable` fields will be set to the variable names assigned to the + // columns. + Variable string `protobuf:"bytes,3,opt,name=variable" json:"variable,omitempty"` +} + +func (m *PlanNode_ChildLink) Reset() { *m = PlanNode_ChildLink{} } +func (m *PlanNode_ChildLink) String() string { return proto.CompactTextString(m) } +func (*PlanNode_ChildLink) ProtoMessage() {} +func (*PlanNode_ChildLink) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +func (m *PlanNode_ChildLink) GetChildIndex() int32 { + if m != nil { + return m.ChildIndex + } + return 0 +} + +func (m *PlanNode_ChildLink) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PlanNode_ChildLink) GetVariable() string { + if m != nil { + return m.Variable + } + return "" +} + +// Condensed representation of a node and its subtree. Only present for +// `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode]. +type PlanNode_ShortRepresentation struct { + // A string representation of the expression subtree rooted at this node. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // A mapping of (subquery variable name) -> (subquery node id) for cases + // where the `description` string of this node references a `SCALAR` + // subquery contained in the expression subtree rooted at this node. The + // referenced `SCALAR` subquery may not necessarily be a direct child of + // this node. + Subqueries map[string]int32 `protobuf:"bytes,2,rep,name=subqueries" json:"subqueries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *PlanNode_ShortRepresentation) Reset() { *m = PlanNode_ShortRepresentation{} } +func (m *PlanNode_ShortRepresentation) String() string { return proto.CompactTextString(m) } +func (*PlanNode_ShortRepresentation) ProtoMessage() {} +func (*PlanNode_ShortRepresentation) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 1} } + +func (m *PlanNode_ShortRepresentation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PlanNode_ShortRepresentation) GetSubqueries() map[string]int32 { + if m != nil { + return m.Subqueries + } + return nil +} + +// Contains an ordered list of nodes appearing in the query plan. +type QueryPlan struct { + // The nodes in the query plan. Plan nodes are returned in pre-order starting + // with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id` corresponds to its index in + // `plan_nodes`. + PlanNodes []*PlanNode `protobuf:"bytes,1,rep,name=plan_nodes,json=planNodes" json:"plan_nodes,omitempty"` +} + +func (m *QueryPlan) Reset() { *m = QueryPlan{} } +func (m *QueryPlan) String() string { return proto.CompactTextString(m) } +func (*QueryPlan) ProtoMessage() {} +func (*QueryPlan) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *QueryPlan) GetPlanNodes() []*PlanNode { + if m != nil { + return m.PlanNodes + } + return nil +} + +func init() { + proto.RegisterType((*PlanNode)(nil), "google.spanner.v1.PlanNode") + proto.RegisterType((*PlanNode_ChildLink)(nil), "google.spanner.v1.PlanNode.ChildLink") + proto.RegisterType((*PlanNode_ShortRepresentation)(nil), "google.spanner.v1.PlanNode.ShortRepresentation") + proto.RegisterType((*QueryPlan)(nil), "google.spanner.v1.QueryPlan") + proto.RegisterEnum("google.spanner.v1.PlanNode_Kind", PlanNode_Kind_name, PlanNode_Kind_value) +} + +func init() { proto.RegisterFile("google/spanner/v1/query_plan.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 570 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xd3, 0x4c, + 0x10, 0xfd, 0x9c, 0x26, 0xf9, 0x9a, 0x09, 0x4a, 0xc3, 0xb6, 0x08, 0x2b, 0x20, 0x61, 0x22, 0x21, + 0xe5, 0xca, 0x56, 0x5b, 0x2e, 0xaa, 0x22, 0x04, 0x69, 0x9b, 0xa2, 0xa8, 0x51, 0x08, 0x1b, 0xb8, + 0x41, 0x48, 0xd6, 0xc6, 0x5e, 0xdc, 0x55, 0x9c, 0x5d, 0xe3, 0x5d, 0x47, 0xcd, 0x4b, 0xf0, 0x7a, + 0xbc, 0x0e, 0xda, 0xf5, 0x0f, 0x85, 0xa0, 0x48, 0xdc, 0xcd, 0xec, 0x9c, 0x39, 0x9a, 0x39, 0x67, + 0x6c, 0xe8, 0x47, 0x42, 0x44, 0x31, 0xf5, 0x64, 0x42, 0x38, 0xa7, 0xa9, 0xb7, 0x3e, 0xf6, 0xbe, + 0x65, 0x34, 0xdd, 0xf8, 0x49, 0x4c, 0xb8, 0x9b, 0xa4, 0x42, 0x09, 0xf4, 0x30, 0xc7, 0xb8, 0x05, + 0xc6, 0x5d, 0x1f, 0xf7, 0x9e, 0x16, 0x6d, 0x24, 0x61, 0x1e, 0xe1, 0x5c, 0x28, 0xa2, 0x98, 0xe0, + 0x32, 0x6f, 0xa8, 0xaa, 0x26, 0x5b, 0x64, 0x5f, 0x3d, 0xa9, 0xd2, 0x2c, 0x50, 0x79, 0xb5, 0xff, + 0xbd, 0x09, 0xfb, 0xb3, 0x98, 0xf0, 0xa9, 0x08, 0x29, 0x3a, 0x82, 0x06, 0xe3, 0x21, 0xbd, 0xb3, + 0x2d, 0xc7, 0x1a, 0x34, 0x70, 0x9e, 0xa0, 0x97, 0x50, 0x5f, 0x32, 0x1e, 0xda, 0x35, 0xc7, 0x1a, + 0x74, 0x4e, 0x1c, 0x77, 0x6b, 0x00, 0xb7, 0x24, 0x70, 0x6f, 0x18, 0x0f, 0xb1, 0x41, 0xa3, 0xe7, + 0xf0, 0x20, 0x64, 0x32, 0x89, 0xc9, 0xc6, 0xe7, 0x64, 0x45, 0xed, 0x3d, 0xc7, 0x1a, 0xb4, 0x70, + 0xbb, 0x78, 0x9b, 0x92, 0x15, 0x45, 0xd7, 0xd0, 0x0e, 0x6e, 0x59, 0x1c, 0xfa, 0x31, 0xe3, 0x4b, + 0x69, 0xd7, 0x9d, 0xbd, 0x41, 0xfb, 0xe4, 0xc5, 0x2e, 0xfe, 0x4b, 0x0d, 0x9f, 0x30, 0xbe, 0xc4, + 0x10, 0x94, 0xa1, 0x44, 0x0b, 0x38, 0x92, 0xb7, 0x22, 0x55, 0x7e, 0x4a, 0x93, 0x94, 0x4a, 0xca, + 0x73, 0x01, 0xec, 0x86, 0x63, 0x0d, 0xda, 0x27, 0xde, 0x2e, 0xc2, 0xb9, 0xee, 0xc3, 0xbf, 0xb5, + 0xe1, 0x43, 0xb9, 0xfd, 0x88, 0x4e, 0x61, 0x7f, 0x45, 0x15, 0x09, 0x89, 0x22, 0x76, 0xd3, 0xf0, + 0x3e, 0x2e, 0x79, 0x4b, 0x61, 0xdd, 0xb9, 0x11, 0x16, 0x57, 0x40, 0xf4, 0x16, 0x0e, 0xe8, 0x1d, + 0x0d, 0x32, 0xcd, 0xe0, 0x4b, 0x45, 0x94, 0xb4, 0xff, 0xdf, 0xdd, 0xdb, 0xa9, 0xf0, 0x73, 0x0d, + 0xef, 0x7d, 0x81, 0x56, 0xb5, 0x33, 0x7a, 0x56, 0xea, 0x75, 0xdf, 0xa4, 0x5c, 0x88, 0xb1, 0x71, + 0x0a, 0x41, 0x5d, 0x6d, 0x12, 0x6a, 0x9c, 0x6a, 0x61, 0x13, 0xa3, 0x1e, 0xec, 0xaf, 0x49, 0xca, + 0xc8, 0x22, 0x2e, 0x3d, 0xa8, 0xf2, 0xde, 0x0f, 0x0b, 0x0e, 0xff, 0xa2, 0x00, 0x72, 0xa0, 0x1d, + 0x52, 0x19, 0xa4, 0x2c, 0x31, 0x3a, 0x5a, 0x85, 0x75, 0xbf, 0x9e, 0x90, 0x0f, 0x20, 0xb3, 0x85, + 0x3e, 0x4e, 0x46, 0xa5, 0x5d, 0x33, 0xce, 0xbd, 0xf9, 0x47, 0xa1, 0xdd, 0x79, 0xc5, 0x30, 0xe2, + 0x2a, 0xdd, 0xe0, 0x7b, 0x94, 0xbd, 0xd7, 0x70, 0xf0, 0x47, 0x19, 0x75, 0x61, 0x6f, 0x49, 0x37, + 0xc5, 0x34, 0x3a, 0xd4, 0xf7, 0xba, 0x26, 0x71, 0x96, 0x2f, 0xdc, 0xc0, 0x79, 0x72, 0x5e, 0x3b, + 0xb3, 0xfa, 0x67, 0x50, 0xd7, 0xb7, 0x88, 0x8e, 0xa0, 0x7b, 0x33, 0x9e, 0x5e, 0xf9, 0x9f, 0xa6, + 0xf3, 0xd9, 0xe8, 0x72, 0x7c, 0x3d, 0x1e, 0x5d, 0x75, 0xff, 0x43, 0x1d, 0x00, 0x3c, 0x9a, 0x0c, + 0x3f, 0x8e, 0xdf, 0x4f, 0x87, 0x93, 0xae, 0x85, 0x00, 0x9a, 0xf3, 0xcb, 0xe1, 0x64, 0x88, 0xbb, + 0xb5, 0xfe, 0x3b, 0x68, 0x7d, 0xd0, 0xdf, 0x9c, 0x9e, 0x1c, 0x9d, 0x03, 0xe8, 0x4f, 0xcf, 0xe7, + 0x22, 0xa4, 0xd2, 0xb6, 0xcc, 0x9a, 0x4f, 0x76, 0xac, 0x89, 0x5b, 0x49, 0x11, 0xc9, 0x8b, 0x00, + 0x1e, 0x05, 0x62, 0xb5, 0x0d, 0xbe, 0xe8, 0x54, 0xfc, 0x33, 0xed, 0xfe, 0xcc, 0xfa, 0x7c, 0x56, + 0x80, 0x22, 0x11, 0x13, 0x1e, 0xb9, 0x22, 0x8d, 0xbc, 0x88, 0x72, 0x73, 0x1b, 0x5e, 0x5e, 0x22, + 0x09, 0x93, 0xf7, 0x7e, 0x0b, 0xaf, 0x8a, 0x70, 0xd1, 0x34, 0xa0, 0xd3, 0x9f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x31, 0xcf, 0x6d, 0x19, 0x3a, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go new file mode 100644 index 000000000..c93717dcc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go @@ -0,0 +1,310 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/v1/result_set.proto +// DO NOT EDIT! + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Results from [Read][google.spanner.v1.Spanner.Read] or +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. +type ResultSet struct { + // Metadata about the result set, such as row type information. + Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Each element in `rows` is a row whose format is defined by + // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith element + // in each row matches the ith field in + // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements are + // encoded based on type as described + // [here][google.spanner.v1.TypeCode]. + Rows []*google_protobuf1.ListValue `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + // Query plan and execution statistics for the query that produced this + // result set. These can be requested by setting + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + Stats *ResultSetStats `protobuf:"bytes,3,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ResultSet) Reset() { *m = ResultSet{} } +func (m *ResultSet) String() string { return proto.CompactTextString(m) } +func (*ResultSet) ProtoMessage() {} +func (*ResultSet) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *ResultSet) GetMetadata() *ResultSetMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ResultSet) GetRows() []*google_protobuf1.ListValue { + if m != nil { + return m.Rows + } + return nil +} + +func (m *ResultSet) GetStats() *ResultSetStats { + if m != nil { + return m.Stats + } + return nil +} + +// Partial results from a streaming read or SQL query. Streaming reads and +// SQL queries better tolerate large result sets, large rows, and large +// values, but are a little trickier to consume. +type PartialResultSet struct { + // Metadata about the result set, such as row type information. + // Only present in the first response. + Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // A streamed result set consists of a stream of values, which might + // be split into many `PartialResultSet` messages to accommodate + // large rows and/or large values. Every N complete values defines a + // row, where N is equal to the number of entries in + // [metadata.row_type.fields][google.spanner.v1.StructType.fields]. + // + // Most values are encoded based on type as described + // [here][google.spanner.v1.TypeCode]. + // + // It is possible that the last value in values is "chunked", + // meaning that the rest of the value is sent in subsequent + // `PartialResultSet`(s). This is denoted by the [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] + // field. Two or more chunked values can be merged to form a + // complete value as follows: + // + // * `bool/number/null`: cannot be chunked + // * `string`: concatenate the strings + // * `list`: concatenate the lists. If the last element in a list is a + // `string`, `list`, or `object`, merge it with the first element in + // the next list by applying these rules recursively. + // * `object`: concatenate the (field name, field value) pairs. If a + // field name is duplicated, then apply these rules recursively + // to merge the field values. + // + // Some examples of merging: + // + // # Strings are concatenated. + // "foo", "bar" => "foobar" + // + // # Lists of non-strings are concatenated. + // [2, 3], [4] => [2, 3, 4] + // + // # Lists are concatenated, but the last and first elements are merged + // # because they are strings. + // ["a", "b"], ["c", "d"] => ["a", "bc", "d"] + // + // # Lists are concatenated, but the last and first elements are merged + // # because they are lists. Recursively, the last and first elements + // # of the inner lists are merged because they are strings. + // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] + // + // # Non-overlapping object fields are combined. + // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} + // + // # Overlapping object fields are merged. + // {"a": "1"}, {"a": "2"} => {"a": "12"} + // + // # Examples of merging objects containing lists of strings. + // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} + // + // For a more complete example, suppose a streaming SQL query is + // yielding a result set whose rows contain a single string + // field. The following `PartialResultSet`s might be yielded: + // + // { + // "metadata": { ... } + // "values": ["Hello", "W"] + // "chunked_value": true + // "resume_token": "Af65..." + // } + // { + // "values": ["orl"] + // "chunked_value": true + // "resume_token": "Bqp2..." + // } + // { + // "values": ["d"] + // "resume_token": "Zx1B..." + // } + // + // This sequence of `PartialResultSet`s encodes two rows, one + // containing the field value `"Hello"`, and a second containing the + // field value `"World" = "W" + "orl" + "d"`. + Values []*google_protobuf1.Value `protobuf:"bytes,2,rep,name=values" json:"values,omitempty"` + // If true, then the final value in [values][google.spanner.v1.PartialResultSet.values] is chunked, and must + // be combined with more values from subsequent `PartialResultSet`s + // to obtain a complete field value. + ChunkedValue bool `protobuf:"varint,3,opt,name=chunked_value,json=chunkedValue" json:"chunked_value,omitempty"` + // Streaming calls might be interrupted for a variety of reasons, such + // as TCP connection loss. If this occurs, the stream of results can + // be resumed by re-sending the original request and including + // `resume_token`. Note that executing any other transaction in the + // same session invalidates the token. + ResumeToken []byte `protobuf:"bytes,4,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // Query plan and execution statistics for the query that produced this + // streaming result set. These can be requested by setting + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] and are sent + // only once with the last response in the stream. + Stats *ResultSetStats `protobuf:"bytes,5,opt,name=stats" json:"stats,omitempty"` +} + +func (m *PartialResultSet) Reset() { *m = PartialResultSet{} } +func (m *PartialResultSet) String() string { return proto.CompactTextString(m) } +func (*PartialResultSet) ProtoMessage() {} +func (*PartialResultSet) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *PartialResultSet) GetMetadata() *ResultSetMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PartialResultSet) GetValues() []*google_protobuf1.Value { + if m != nil { + return m.Values + } + return nil +} + +func (m *PartialResultSet) GetChunkedValue() bool { + if m != nil { + return m.ChunkedValue + } + return false +} + +func (m *PartialResultSet) GetResumeToken() []byte { + if m != nil { + return m.ResumeToken + } + return nil +} + +func (m *PartialResultSet) GetStats() *ResultSetStats { + if m != nil { + return m.Stats + } + return nil +} + +// Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet]. +type ResultSetMetadata struct { + // Indicates the field names and types for the rows in the result + // set. For example, a SQL query like `"SELECT UserId, UserName FROM + // Users"` could return a `row_type` value like: + // + // "fields": [ + // { "name": "UserId", "type": { "code": "INT64" } }, + // { "name": "UserName", "type": { "code": "STRING" } }, + // ] + RowType *StructType `protobuf:"bytes,1,opt,name=row_type,json=rowType" json:"row_type,omitempty"` + // If the read or SQL query began a transaction as a side-effect, the + // information about the new transaction is yielded here. + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` +} + +func (m *ResultSetMetadata) Reset() { *m = ResultSetMetadata{} } +func (m *ResultSetMetadata) String() string { return proto.CompactTextString(m) } +func (*ResultSetMetadata) ProtoMessage() {} +func (*ResultSetMetadata) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } + +func (m *ResultSetMetadata) GetRowType() *StructType { + if m != nil { + return m.RowType + } + return nil +} + +func (m *ResultSetMetadata) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +// Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet]. +type ResultSetStats struct { + // [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this result. + QueryPlan *QueryPlan `protobuf:"bytes,1,opt,name=query_plan,json=queryPlan" json:"query_plan,omitempty"` + // Aggregated statistics from the execution of the query. Only present when + // the query is profiled. For example, a query could return the statistics as + // follows: + // + // { + // "rows_returned": "3", + // "elapsed_time": "1.22 secs", + // "cpu_time": "1.19 secs" + // } + QueryStats *google_protobuf1.Struct `protobuf:"bytes,2,opt,name=query_stats,json=queryStats" json:"query_stats,omitempty"` +} + +func (m *ResultSetStats) Reset() { *m = ResultSetStats{} } +func (m *ResultSetStats) String() string { return proto.CompactTextString(m) } +func (*ResultSetStats) ProtoMessage() {} +func (*ResultSetStats) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} } + +func (m *ResultSetStats) GetQueryPlan() *QueryPlan { + if m != nil { + return m.QueryPlan + } + return nil +} + +func (m *ResultSetStats) GetQueryStats() *google_protobuf1.Struct { + if m != nil { + return m.QueryStats + } + return nil +} + +func init() { + proto.RegisterType((*ResultSet)(nil), "google.spanner.v1.ResultSet") + proto.RegisterType((*PartialResultSet)(nil), "google.spanner.v1.PartialResultSet") + proto.RegisterType((*ResultSetMetadata)(nil), "google.spanner.v1.ResultSetMetadata") + proto.RegisterType((*ResultSetStats)(nil), "google.spanner.v1.ResultSetStats") +} + +func init() { proto.RegisterFile("google/spanner/v1/result_set.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 463 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x86, 0xe5, 0xf4, 0x83, 0x74, 0x1c, 0x10, 0x5d, 0x09, 0xb0, 0xa2, 0x82, 0xd2, 0x94, 0x43, + 0x4e, 0xb6, 0x5a, 0x0e, 0x44, 0xea, 0xa5, 0xea, 0x19, 0xa4, 0xb0, 0x89, 0x38, 0x70, 0xb1, 0xa6, + 0xee, 0x62, 0xac, 0x3a, 0xbb, 0xce, 0xee, 0x3a, 0x51, 0x7e, 0x00, 0xe2, 0xc8, 0xbf, 0xe1, 0xf7, + 0xa1, 0xfd, 0xc8, 0x07, 0xd8, 0x42, 0x42, 0xea, 0x6d, 0xb3, 0xfb, 0xbc, 0xf3, 0xce, 0x3b, 0x9e, + 0xc0, 0x30, 0x17, 0x22, 0x2f, 0x59, 0xa2, 0x2a, 0xe4, 0x9c, 0xc9, 0x64, 0x79, 0x99, 0x48, 0xa6, + 0xea, 0x52, 0xa7, 0x8a, 0xe9, 0xb8, 0x92, 0x42, 0x0b, 0x72, 0xea, 0x98, 0xd8, 0x33, 0xf1, 0xf2, + 0xb2, 0x7f, 0xe6, 0x65, 0x58, 0x15, 0x09, 0x72, 0x2e, 0x34, 0xea, 0x42, 0x70, 0xe5, 0x04, 0xdb, + 0x57, 0xfb, 0xeb, 0xae, 0xfe, 0x9a, 0x28, 0x2d, 0xeb, 0xcc, 0x97, 0xeb, 0xb7, 0x58, 0x2e, 0x6a, + 0x26, 0xd7, 0x69, 0x55, 0x22, 0xf7, 0xcc, 0x45, 0x93, 0xd1, 0x12, 0xb9, 0xc2, 0xcc, 0xf8, 0xfc, + 0x65, 0xb3, 0x0f, 0xad, 0x2b, 0xe6, 0x5e, 0x87, 0xbf, 0x02, 0x38, 0xa1, 0x36, 0xca, 0x94, 0x69, + 0x72, 0x03, 0xdd, 0x39, 0xd3, 0x78, 0x8f, 0x1a, 0xa3, 0x60, 0x10, 0x8c, 0xc2, 0xab, 0xb7, 0x71, + 0x23, 0x56, 0xbc, 0xe5, 0x3f, 0x7a, 0x96, 0x6e, 0x55, 0x24, 0x86, 0x43, 0x29, 0x56, 0x2a, 0xea, + 0x0c, 0x0e, 0x46, 0xe1, 0x55, 0x7f, 0xa3, 0xde, 0x64, 0x8c, 0x3f, 0x14, 0x4a, 0x7f, 0xc6, 0xb2, + 0x66, 0xd4, 0x72, 0xe4, 0x3d, 0x1c, 0x29, 0x8d, 0x5a, 0x45, 0x07, 0xd6, 0xee, 0xfc, 0x5f, 0x76, + 0x53, 0x03, 0x52, 0xc7, 0x0f, 0xbf, 0x77, 0xe0, 0xf9, 0x04, 0xa5, 0x2e, 0xb0, 0x7c, 0xdc, 0xfe, + 0x8f, 0x97, 0xa6, 0xbd, 0x4d, 0x82, 0x97, 0x8d, 0x04, 0xae, 0x7b, 0x4f, 0x91, 0x0b, 0x78, 0x9a, + 0x7d, 0xab, 0xf9, 0x03, 0xbb, 0x4f, 0xed, 0x8d, 0xcd, 0xd1, 0xa5, 0x3d, 0x7f, 0x69, 0x61, 0x72, + 0x0e, 0x3d, 0xb3, 0x2e, 0x73, 0x96, 0x6a, 0xf1, 0xc0, 0x78, 0x74, 0x38, 0x08, 0x46, 0x3d, 0x1a, + 0xba, 0xbb, 0x99, 0xb9, 0xda, 0xcd, 0xe1, 0xe8, 0x3f, 0xe7, 0xf0, 0x33, 0x80, 0xd3, 0x46, 0x20, + 0x32, 0x86, 0xae, 0x14, 0xab, 0xd4, 0x7c, 0x68, 0x3f, 0x88, 0xd7, 0x2d, 0x15, 0xa7, 0x76, 0xe1, + 0x66, 0xeb, 0x8a, 0xd1, 0x27, 0x52, 0xac, 0xcc, 0x81, 0xdc, 0x40, 0xb8, 0xb7, 0x43, 0x51, 0xc7, + 0x8a, 0xdf, 0xb4, 0x88, 0x67, 0x3b, 0x8a, 0xee, 0x4b, 0x86, 0x3f, 0x02, 0x78, 0xf6, 0x67, 0xaf, + 0xe4, 0x1a, 0x60, 0xb7, 0xbc, 0xbe, 0xa1, 0xb3, 0x96, 0x9a, 0x9f, 0x0c, 0x34, 0x29, 0x91, 0xd3, + 0x93, 0xc5, 0xe6, 0x48, 0xc6, 0x10, 0x3a, 0xb1, 0x1b, 0x90, 0xeb, 0xe8, 0x55, 0xe3, 0xbb, 0xb8, + 0x30, 0xd4, 0x19, 0x59, 0xdb, 0xdb, 0x0c, 0x5e, 0x64, 0x62, 0xde, 0xf4, 0xb9, 0xdd, 0xf5, 0x37, + 0x31, 0xf2, 0x49, 0xf0, 0x65, 0xec, 0xa1, 0x5c, 0x94, 0xc8, 0xf3, 0x58, 0xc8, 0x3c, 0xc9, 0x19, + 0xb7, 0xc5, 0x13, 0xf7, 0x84, 0x55, 0xa1, 0xf6, 0xfe, 0x44, 0xd7, 0xfe, 0x78, 0x77, 0x6c, 0xa1, + 0x77, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xa5, 0x95, 0xcc, 0x24, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go new file mode 100644 index 000000000..e08f1603d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go @@ -0,0 +1,1217 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/v1/spanner.proto +// DO NOT EDIT! + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Mode in which the query must be processed. +type ExecuteSqlRequest_QueryMode int32 + +const ( + // The default mode where only the query result, without any information + // about the query plan is returned. + ExecuteSqlRequest_NORMAL ExecuteSqlRequest_QueryMode = 0 + // This mode returns only the query plan, without any result rows or + // execution statistics information. + ExecuteSqlRequest_PLAN ExecuteSqlRequest_QueryMode = 1 + // This mode returns both the query plan and the execution statistics along + // with the result rows. + ExecuteSqlRequest_PROFILE ExecuteSqlRequest_QueryMode = 2 +) + +var ExecuteSqlRequest_QueryMode_name = map[int32]string{ + 0: "NORMAL", + 1: "PLAN", + 2: "PROFILE", +} +var ExecuteSqlRequest_QueryMode_value = map[string]int32{ + "NORMAL": 0, + "PLAN": 1, + "PROFILE": 2, +} + +func (x ExecuteSqlRequest_QueryMode) String() string { + return proto.EnumName(ExecuteSqlRequest_QueryMode_name, int32(x)) +} +func (ExecuteSqlRequest_QueryMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor4, []int{4, 0} +} + +// The request for [CreateSession][google.spanner.v1.Spanner.CreateSession]. +type CreateSessionRequest struct { + // Required. The database in which the new session is created. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *CreateSessionRequest) Reset() { *m = CreateSessionRequest{} } +func (m *CreateSessionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSessionRequest) ProtoMessage() {} +func (*CreateSessionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *CreateSessionRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// A session in the Cloud Spanner API. +type Session struct { + // Required. The name of the session. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *Session) Reset() { *m = Session{} } +func (m *Session) String() string { return proto.CompactTextString(m) } +func (*Session) ProtoMessage() {} +func (*Session) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *Session) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [GetSession][google.spanner.v1.Spanner.GetSession]. +type GetSessionRequest struct { + // Required. The name of the session to retrieve. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetSessionRequest) Reset() { *m = GetSessionRequest{} } +func (m *GetSessionRequest) String() string { return proto.CompactTextString(m) } +func (*GetSessionRequest) ProtoMessage() {} +func (*GetSessionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +func (m *GetSessionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. +type DeleteSessionRequest struct { + // Required. The name of the session to delete. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteSessionRequest) Reset() { *m = DeleteSessionRequest{} } +func (m *DeleteSessionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSessionRequest) ProtoMessage() {} +func (*DeleteSessionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *DeleteSessionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and +// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. +type ExecuteSqlRequest struct { + // Required. The session in which the SQL query should be performed. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // The transaction to use. If none is provided, the default is a + // temporary read-only transaction with strong concurrency. + Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + // Required. The SQL query string. + Sql string `protobuf:"bytes,3,opt,name=sql" json:"sql,omitempty"` + // The SQL query string can contain parameter placeholders. A parameter + // placeholder consists of `'@'` followed by the parameter + // name. Parameter names consist of any combination of letters, + // numbers, and underscores. + // + // Parameters can appear anywhere that a literal value is expected. The same + // parameter name can be used more than once, for example: + // `"WHERE id > @msg_id AND id < @msg_id + 100"` + // + // It is an error to execute an SQL query with unbound parameters. + // + // Parameter values are specified using `params`, which is a JSON + // object whose keys are parameter names, and whose values are the + // corresponding parameter values. + Params *google_protobuf1.Struct `protobuf:"bytes,4,opt,name=params" json:"params,omitempty"` + // It is not always possible for Cloud Spanner to infer the right SQL type + // from a JSON value. For example, values of type `BYTES` and values + // of type `STRING` both appear in [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. + // + // In these cases, `param_types` can be used to specify the exact + // SQL type for some or all of the SQL query parameters. See the + // definition of [Type][google.spanner.v1.Type] for more information + // about SQL types. + ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If this request is resuming a previously interrupted SQL query + // execution, `resume_token` should be copied from the last + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this + // enables the new SQL query execution to resume where the last one left + // off. The rest of the request parameters must exactly match the + // request that yielded this token. + ResumeToken []byte `protobuf:"bytes,6,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // Used to control the amount of debugging information returned in + // [ResultSetStats][google.spanner.v1.ResultSetStats]. + QueryMode ExecuteSqlRequest_QueryMode `protobuf:"varint,7,opt,name=query_mode,json=queryMode,enum=google.spanner.v1.ExecuteSqlRequest_QueryMode" json:"query_mode,omitempty"` +} + +func (m *ExecuteSqlRequest) Reset() { *m = ExecuteSqlRequest{} } +func (m *ExecuteSqlRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteSqlRequest) ProtoMessage() {} +func (*ExecuteSqlRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } + +func (m *ExecuteSqlRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *ExecuteSqlRequest) GetTransaction() *TransactionSelector { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *ExecuteSqlRequest) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +func (m *ExecuteSqlRequest) GetParams() *google_protobuf1.Struct { + if m != nil { + return m.Params + } + return nil +} + +func (m *ExecuteSqlRequest) GetParamTypes() map[string]*Type { + if m != nil { + return m.ParamTypes + } + return nil +} + +func (m *ExecuteSqlRequest) GetResumeToken() []byte { + if m != nil { + return m.ResumeToken + } + return nil +} + +func (m *ExecuteSqlRequest) GetQueryMode() ExecuteSqlRequest_QueryMode { + if m != nil { + return m.QueryMode + } + return ExecuteSqlRequest_NORMAL +} + +// The request for [Read][google.spanner.v1.Spanner.Read] and +// [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. +type ReadRequest struct { + // Required. The session in which the read should be performed. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // The transaction to use. If none is provided, the default is a + // temporary read-only transaction with strong concurrency. + Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + // Required. The name of the table in the database to be read. + Table string `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + // If non-empty, the name of an index on [table][google.spanner.v1.ReadRequest.table]. This index is + // used instead of the table primary key when interpreting [key_set][google.spanner.v1.ReadRequest.key_set] + // and sorting result rows. See [key_set][google.spanner.v1.ReadRequest.key_set] for further information. + Index string `protobuf:"bytes,4,opt,name=index" json:"index,omitempty"` + // The columns of [table][google.spanner.v1.ReadRequest.table] to be returned for each row matching + // this request. + Columns []string `protobuf:"bytes,5,rep,name=columns" json:"columns,omitempty"` + // Required. `key_set` identifies the rows to be yielded. `key_set` names the + // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to be yielded, unless [index][google.spanner.v1.ReadRequest.index] + // is present. If [index][google.spanner.v1.ReadRequest.index] is present, then [key_set][google.spanner.v1.ReadRequest.key_set] instead names + // index keys in [index][google.spanner.v1.ReadRequest.index]. + // + // Rows are yielded in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty) + // or index key order (if [index][google.spanner.v1.ReadRequest.index] is non-empty). + // + // It is not an error for the `key_set` to name rows that do not + // exist in the database. Read yields nothing for nonexistent rows. + KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet" json:"key_set,omitempty"` + // If greater than zero, only the first `limit` rows are yielded. If `limit` + // is zero, the default is no limit. + Limit int64 `protobuf:"varint,8,opt,name=limit" json:"limit,omitempty"` + // If this request is resuming a previously interrupted read, + // `resume_token` should be copied from the last + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this + // enables the new read to resume where the last read left off. The + // rest of the request parameters must exactly match the request + // that yielded this token. + ResumeToken []byte `protobuf:"bytes,9,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } + +func (m *ReadRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *ReadRequest) GetTransaction() *TransactionSelector { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *ReadRequest) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *ReadRequest) GetIndex() string { + if m != nil { + return m.Index + } + return "" +} + +func (m *ReadRequest) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +func (m *ReadRequest) GetKeySet() *KeySet { + if m != nil { + return m.KeySet + } + return nil +} + +func (m *ReadRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ReadRequest) GetResumeToken() []byte { + if m != nil { + return m.ResumeToken + } + return nil +} + +// The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. +type BeginTransactionRequest struct { + // Required. The session in which the transaction runs. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Required. Options for the new transaction. + Options *TransactionOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } + +func (m *BeginTransactionRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *BeginTransactionRequest) GetOptions() *TransactionOptions { + if m != nil { + return m.Options + } + return nil +} + +// The request for [Commit][google.spanner.v1.Spanner.Commit]. +type CommitRequest struct { + // Required. The session in which the transaction to be committed is running. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Required. The transaction in which to commit. + // + // Types that are valid to be assigned to Transaction: + // *CommitRequest_TransactionId + // *CommitRequest_SingleUseTransaction + Transaction isCommitRequest_Transaction `protobuf_oneof:"transaction"` + // The mutations to be executed when this transaction commits. All + // mutations are applied atomically, in the order they appear in + // this list. + Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } + +type isCommitRequest_Transaction interface { + isCommitRequest_Transaction() +} + +type CommitRequest_TransactionId struct { + TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3,oneof"` +} +type CommitRequest_SingleUseTransaction struct { + SingleUseTransaction *TransactionOptions `protobuf:"bytes,3,opt,name=single_use_transaction,json=singleUseTransaction,oneof"` +} + +func (*CommitRequest_TransactionId) isCommitRequest_Transaction() {} +func (*CommitRequest_SingleUseTransaction) isCommitRequest_Transaction() {} + +func (m *CommitRequest) GetTransaction() isCommitRequest_Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *CommitRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *CommitRequest) GetTransactionId() []byte { + if x, ok := m.GetTransaction().(*CommitRequest_TransactionId); ok { + return x.TransactionId + } + return nil +} + +func (m *CommitRequest) GetSingleUseTransaction() *TransactionOptions { + if x, ok := m.GetTransaction().(*CommitRequest_SingleUseTransaction); ok { + return x.SingleUseTransaction + } + return nil +} + +func (m *CommitRequest) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CommitRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CommitRequest_OneofMarshaler, _CommitRequest_OneofUnmarshaler, _CommitRequest_OneofSizer, []interface{}{ + (*CommitRequest_TransactionId)(nil), + (*CommitRequest_SingleUseTransaction)(nil), + } +} + +func _CommitRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CommitRequest) + // transaction + switch x := m.Transaction.(type) { + case *CommitRequest_TransactionId: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.TransactionId) + case *CommitRequest_SingleUseTransaction: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SingleUseTransaction); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CommitRequest.Transaction has unexpected type %T", x) + } + return nil +} + +func _CommitRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CommitRequest) + switch tag { + case 2: // transaction.transaction_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Transaction = &CommitRequest_TransactionId{x} + return true, err + case 3: // transaction.single_use_transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.Transaction = &CommitRequest_SingleUseTransaction{msg} + return true, err + default: + return false, nil + } +} + +func _CommitRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CommitRequest) + // transaction + switch x := m.Transaction.(type) { + case *CommitRequest_TransactionId: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TransactionId))) + n += len(x.TransactionId) + case *CommitRequest_SingleUseTransaction: + s := proto.Size(x.SingleUseTransaction) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Commit][google.spanner.v1.Spanner.Commit]. +type CommitResponse struct { + // The Cloud Spanner timestamp at which the transaction committed. + CommitTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp" json:"commit_timestamp,omitempty"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } + +func (m *CommitResponse) GetCommitTimestamp() *google_protobuf3.Timestamp { + if m != nil { + return m.CommitTimestamp + } + return nil +} + +// The request for [Rollback][google.spanner.v1.Spanner.Rollback]. +type RollbackRequest struct { + // Required. The session in which the transaction to roll back is running. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Required. The transaction to roll back. + TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} +func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } + +func (m *RollbackRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *RollbackRequest) GetTransactionId() []byte { + if m != nil { + return m.TransactionId + } + return nil +} + +func init() { + proto.RegisterType((*CreateSessionRequest)(nil), "google.spanner.v1.CreateSessionRequest") + proto.RegisterType((*Session)(nil), "google.spanner.v1.Session") + proto.RegisterType((*GetSessionRequest)(nil), "google.spanner.v1.GetSessionRequest") + proto.RegisterType((*DeleteSessionRequest)(nil), "google.spanner.v1.DeleteSessionRequest") + proto.RegisterType((*ExecuteSqlRequest)(nil), "google.spanner.v1.ExecuteSqlRequest") + proto.RegisterType((*ReadRequest)(nil), "google.spanner.v1.ReadRequest") + proto.RegisterType((*BeginTransactionRequest)(nil), "google.spanner.v1.BeginTransactionRequest") + proto.RegisterType((*CommitRequest)(nil), "google.spanner.v1.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "google.spanner.v1.CommitResponse") + proto.RegisterType((*RollbackRequest)(nil), "google.spanner.v1.RollbackRequest") + proto.RegisterEnum("google.spanner.v1.ExecuteSqlRequest_QueryMode", ExecuteSqlRequest_QueryMode_name, ExecuteSqlRequest_QueryMode_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Spanner service + +type SpannerClient interface { + // Creates a new session. A session can be used to perform + // transactions that read and/or modify data in a Cloud Spanner database. + // Sessions are meant to be reused for many consecutive + // transactions. + // + // Sessions can only execute one transaction at a time. To execute + // multiple concurrent read-write/write-only transactions, create + // multiple sessions. Note that standalone reads and queries use a + // transaction internally, and count toward the one transaction + // limit. + // + // Cloud Spanner limits the number of sessions that can exist at any given + // time; thus, it is a good idea to delete idle and/or unneeded sessions. + // Aside from explicit deletes, Cloud Spanner can delete sessions for + // which no operations are sent for more than an hour, or due to + // internal errors. If a session is deleted, requests to it + // return `NOT_FOUND`. + // + // Idle sessions can be kept alive by sending a trivial SQL query + // periodically, e.g., `"SELECT 1"`. + CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error) + // Gets a session. Returns `NOT_FOUND` if the session does not exist. + // This is mainly useful for determining whether a session is still + // alive. + GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error) + // Ends a session, releasing server resources associated with it. + DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Executes an SQL query, returning all rows in a single reply. This + // method cannot be used to return a result set larger than 10 MiB; + // if the query yields more data than that, the query fails with + // a `FAILED_PRECONDITION` error. + // + // Queries inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be fetched in streaming fashion by calling + // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error) + // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result + // set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there + // is no limit on the size of the returned result set. However, no + // individual row in the result set can exceed 100 MiB, and no + // column value can exceed 10 MiB. + ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error) + // Reads rows from the database using key lookups and scans, as a + // simple key/value style alternative to + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to + // return a result set larger than 10 MiB; if the read matches more + // data than that, the read fails with a `FAILED_PRECONDITION` + // error. + // + // Reads inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be yielded in streaming fashion by calling + // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error) + // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a + // stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the + // size of the returned result set. However, no individual row in + // the result set can exceed 100 MiB, and no column value can exceed + // 10 MiB. + StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error) + // Begins a new transaction. This step can often be skipped: + // [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a + // side-effect. + BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error) + // Commits a transaction. The request includes the mutations to be + // applied to rows in the database. + // + // `Commit` might return an `ABORTED` error. This can occur at any time; + // commonly, the cause is conflicts with concurrent + // transactions. However, it can also happen for a variety of other + // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt + // the transaction from the beginning, re-using the same session. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // Rolls back a transaction, releasing any locks it holds. It is a good + // idea to call this for any transaction that includes one or more + // [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + // ultimately decides not to commit. + // + // `Rollback` returns `OK` if it successfully aborts the transaction, the + // transaction was already aborted, or the transaction is not + // found. `Rollback` never returns `ABORTED`. + Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) +} + +type spannerClient struct { + cc *grpc.ClientConn +} + +func NewSpannerClient(cc *grpc.ClientConn) SpannerClient { + return &spannerClient{cc} +} + +func (c *spannerClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error) { + out := new(Session) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/CreateSession", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error) { + out := new(Session) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/GetSession", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/DeleteSession", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error) { + out := new(ResultSet) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/ExecuteSql", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Spanner_serviceDesc.Streams[0], c.cc, "/google.spanner.v1.Spanner/ExecuteStreamingSql", opts...) + if err != nil { + return nil, err + } + x := &spannerExecuteStreamingSqlClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Spanner_ExecuteStreamingSqlClient interface { + Recv() (*PartialResultSet, error) + grpc.ClientStream +} + +type spannerExecuteStreamingSqlClient struct { + grpc.ClientStream +} + +func (x *spannerExecuteStreamingSqlClient) Recv() (*PartialResultSet, error) { + m := new(PartialResultSet) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *spannerClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error) { + out := new(ResultSet) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/Read", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Spanner_serviceDesc.Streams[1], c.cc, "/google.spanner.v1.Spanner/StreamingRead", opts...) + if err != nil { + return nil, err + } + x := &spannerStreamingReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Spanner_StreamingReadClient interface { + Recv() (*PartialResultSet, error) + grpc.ClientStream +} + +type spannerStreamingReadClient struct { + grpc.ClientStream +} + +func (x *spannerStreamingReadClient) Recv() (*PartialResultSet, error) { + m := new(PartialResultSet) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *spannerClient) BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error) { + out := new(Transaction) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/BeginTransaction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/Commit", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/Rollback", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Spanner service + +type SpannerServer interface { + // Creates a new session. A session can be used to perform + // transactions that read and/or modify data in a Cloud Spanner database. + // Sessions are meant to be reused for many consecutive + // transactions. + // + // Sessions can only execute one transaction at a time. To execute + // multiple concurrent read-write/write-only transactions, create + // multiple sessions. Note that standalone reads and queries use a + // transaction internally, and count toward the one transaction + // limit. + // + // Cloud Spanner limits the number of sessions that can exist at any given + // time; thus, it is a good idea to delete idle and/or unneeded sessions. + // Aside from explicit deletes, Cloud Spanner can delete sessions for + // which no operations are sent for more than an hour, or due to + // internal errors. If a session is deleted, requests to it + // return `NOT_FOUND`. + // + // Idle sessions can be kept alive by sending a trivial SQL query + // periodically, e.g., `"SELECT 1"`. + CreateSession(context.Context, *CreateSessionRequest) (*Session, error) + // Gets a session. Returns `NOT_FOUND` if the session does not exist. + // This is mainly useful for determining whether a session is still + // alive. + GetSession(context.Context, *GetSessionRequest) (*Session, error) + // Ends a session, releasing server resources associated with it. + DeleteSession(context.Context, *DeleteSessionRequest) (*google_protobuf4.Empty, error) + // Executes an SQL query, returning all rows in a single reply. This + // method cannot be used to return a result set larger than 10 MiB; + // if the query yields more data than that, the query fails with + // a `FAILED_PRECONDITION` error. + // + // Queries inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be fetched in streaming fashion by calling + // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error) + // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result + // set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there + // is no limit on the size of the returned result set. However, no + // individual row in the result set can exceed 100 MiB, and no + // column value can exceed 10 MiB. + ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error + // Reads rows from the database using key lookups and scans, as a + // simple key/value style alternative to + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to + // return a result set larger than 10 MiB; if the read matches more + // data than that, the read fails with a `FAILED_PRECONDITION` + // error. + // + // Reads inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be yielded in streaming fashion by calling + // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + Read(context.Context, *ReadRequest) (*ResultSet, error) + // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a + // stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the + // size of the returned result set. However, no individual row in + // the result set can exceed 100 MiB, and no column value can exceed + // 10 MiB. + StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error + // Begins a new transaction. This step can often be skipped: + // [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a + // side-effect. + BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error) + // Commits a transaction. The request includes the mutations to be + // applied to rows in the database. + // + // `Commit` might return an `ABORTED` error. This can occur at any time; + // commonly, the cause is conflicts with concurrent + // transactions. However, it can also happen for a variety of other + // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt + // the transaction from the beginning, re-using the same session. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // Rolls back a transaction, releasing any locks it holds. It is a good + // idea to call this for any transaction that includes one or more + // [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + // ultimately decides not to commit. + // + // `Rollback` returns `OK` if it successfully aborts the transaction, the + // transaction was already aborted, or the transaction is not + // found. `Rollback` never returns `ABORTED`. + Rollback(context.Context, *RollbackRequest) (*google_protobuf4.Empty, error) +} + +func RegisterSpannerServer(s *grpc.Server, srv SpannerServer) { + s.RegisterService(&_Spanner_serviceDesc, srv) +} + +func _Spanner_CreateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).CreateSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/CreateSession", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).CreateSession(ctx, req.(*CreateSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_GetSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).GetSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/GetSession", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).GetSession(ctx, req.(*GetSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_DeleteSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).DeleteSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/DeleteSession", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).DeleteSession(ctx, req.(*DeleteSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_ExecuteSql_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecuteSqlRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).ExecuteSql(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/ExecuteSql", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).ExecuteSql(ctx, req.(*ExecuteSqlRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_ExecuteStreamingSql_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ExecuteSqlRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SpannerServer).ExecuteStreamingSql(m, &spannerExecuteStreamingSqlServer{stream}) +} + +type Spanner_ExecuteStreamingSqlServer interface { + Send(*PartialResultSet) error + grpc.ServerStream +} + +type spannerExecuteStreamingSqlServer struct { + grpc.ServerStream +} + +func (x *spannerExecuteStreamingSqlServer) Send(m *PartialResultSet) error { + return x.ServerStream.SendMsg(m) +} + +func _Spanner_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).Read(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/Read", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).Read(ctx, req.(*ReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_StreamingRead_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SpannerServer).StreamingRead(m, &spannerStreamingReadServer{stream}) +} + +type Spanner_StreamingReadServer interface { + Send(*PartialResultSet) error + grpc.ServerStream +} + +type spannerStreamingReadServer struct { + grpc.ServerStream +} + +func (x *spannerStreamingReadServer) Send(m *PartialResultSet) error { + return x.ServerStream.SendMsg(m) +} + +func _Spanner_BeginTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BeginTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).BeginTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/BeginTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).BeginTransaction(ctx, req.(*BeginTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).Rollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/Rollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).Rollback(ctx, req.(*RollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Spanner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.spanner.v1.Spanner", + HandlerType: (*SpannerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSession", + Handler: _Spanner_CreateSession_Handler, + }, + { + MethodName: "GetSession", + Handler: _Spanner_GetSession_Handler, + }, + { + MethodName: "DeleteSession", + Handler: _Spanner_DeleteSession_Handler, + }, + { + MethodName: "ExecuteSql", + Handler: _Spanner_ExecuteSql_Handler, + }, + { + MethodName: "Read", + Handler: _Spanner_Read_Handler, + }, + { + MethodName: "BeginTransaction", + Handler: _Spanner_BeginTransaction_Handler, + }, + { + MethodName: "Commit", + Handler: _Spanner_Commit_Handler, + }, + { + MethodName: "Rollback", + Handler: _Spanner_Rollback_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ExecuteStreamingSql", + Handler: _Spanner_ExecuteStreamingSql_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingRead", + Handler: _Spanner_StreamingRead_Handler, + ServerStreams: true, + }, + }, + Metadata: "google/spanner/v1/spanner.proto", +} + +func init() { proto.RegisterFile("google/spanner/v1/spanner.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 1188 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x97, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xc7, 0xbb, 0x49, 0x6a, 0xc7, 0x8f, 0x93, 0xc6, 0x19, 0xd2, 0xc6, 0xb8, 0xa1, 0x75, 0xb7, + 0x94, 0x58, 0x96, 0xf0, 0x52, 0xc3, 0x21, 0x18, 0x10, 0xad, 0x5b, 0xb7, 0x89, 0xf2, 0xe6, 0xac, + 0x5d, 0x2a, 0x55, 0x42, 0xd6, 0xd8, 0x7e, 0x30, 0x8b, 0xf7, 0x2d, 0x3b, 0xe3, 0xa8, 0x16, 0xea, + 0x85, 0x2b, 0x17, 0x5e, 0x0e, 0x70, 0xe0, 0x06, 0x47, 0xee, 0xdc, 0xf8, 0x12, 0x7c, 0x05, 0x2e, + 0x7c, 0x06, 0x2e, 0x68, 0x66, 0x77, 0x9d, 0x8d, 0x77, 0x9b, 0xa4, 0x04, 0x71, 0xca, 0xbc, 0xfc, + 0x67, 0x9e, 0xdf, 0xfc, 0xe7, 0xd9, 0x79, 0x1c, 0xb8, 0x39, 0x70, 0x9c, 0x81, 0x89, 0x1a, 0x73, + 0xa9, 0x6d, 0xa3, 0xa7, 0x1d, 0xdd, 0x0d, 0x9b, 0x15, 0xd7, 0x73, 0xb8, 0x43, 0x96, 0x7d, 0x41, + 0x25, 0x1c, 0x3d, 0xba, 0x5b, 0x58, 0x0b, 0xd6, 0x50, 0xd7, 0xd0, 0xa8, 0x6d, 0x3b, 0x9c, 0x72, + 0xc3, 0xb1, 0x99, 0xbf, 0xa0, 0x70, 0x35, 0x3a, 0x3b, 0xe2, 0x9f, 0x07, 0xc3, 0xd7, 0x83, 0x61, + 0xd9, 0xeb, 0x8e, 0x3e, 0xd3, 0xd0, 0x72, 0xf9, 0x38, 0x98, 0x5c, 0x9b, 0x9e, 0x64, 0xdc, 0x1b, + 0xf5, 0x78, 0x30, 0x7b, 0x73, 0x7a, 0x96, 0x1b, 0x16, 0x32, 0x4e, 0x2d, 0x77, 0x6a, 0x79, 0xe4, + 0x10, 0x43, 0x1c, 0x87, 0x40, 0xc5, 0xf8, 0xac, 0x35, 0xf2, 0x99, 0x03, 0x85, 0x1a, 0x57, 0x1c, + 0x8e, 0xd0, 0x1b, 0x77, 0x5c, 0x93, 0x9e, 0xa2, 0xf1, 0x90, 0x8d, 0x4c, 0xde, 0x61, 0x18, 0x82, + 0xde, 0x8e, 0x6b, 0xb8, 0x47, 0x6d, 0x46, 0x7b, 0x91, 0x60, 0x09, 0xb0, 0x7c, 0xec, 0xa2, 0x3f, + 0xab, 0x56, 0x61, 0xe5, 0x81, 0x87, 0x94, 0x63, 0x0b, 0x19, 0x33, 0x1c, 0x5b, 0xc7, 0xc3, 0x11, + 0x32, 0x4e, 0x0a, 0x30, 0xdf, 0xa7, 0x9c, 0x76, 0x29, 0xc3, 0xbc, 0x52, 0x54, 0x4a, 0x19, 0x7d, + 0xd2, 0x57, 0xdf, 0x80, 0x74, 0xa0, 0x26, 0x04, 0xe6, 0x6c, 0x6a, 0x85, 0x12, 0xd9, 0x56, 0xd7, + 0x61, 0xf9, 0x31, 0xf2, 0xa9, 0xfd, 0x92, 0x84, 0x65, 0x58, 0x79, 0x88, 0x26, 0xc6, 0x62, 0x27, + 0x69, 0xbf, 0x9e, 0x83, 0xe5, 0xc6, 0x73, 0xec, 0x8d, 0x38, 0xb6, 0x0e, 0xcd, 0x50, 0x99, 0x87, + 0x34, 0xf3, 0xd7, 0x06, 0xe2, 0xb0, 0x4b, 0x36, 0x21, 0x1b, 0xb1, 0x22, 0x3f, 0x53, 0x54, 0x4a, + 0xd9, 0xea, 0x5b, 0x95, 0x58, 0x72, 0x55, 0xda, 0xc7, 0xaa, 0x16, 0x9a, 0xd8, 0xe3, 0x8e, 0xa7, + 0x47, 0x97, 0x92, 0x1c, 0xcc, 0xb2, 0x43, 0x33, 0x3f, 0x2b, 0xf7, 0x17, 0x4d, 0xa2, 0x41, 0xca, + 0xa5, 0x1e, 0xb5, 0x58, 0x7e, 0x4e, 0x6e, 0xbb, 0x1a, 0x6e, 0x1b, 0x26, 0x4c, 0xa5, 0x25, 0xd3, + 0x49, 0x0f, 0x64, 0xe4, 0x09, 0x64, 0x65, 0xab, 0x23, 0x8c, 0x67, 0xf9, 0xcb, 0xc5, 0xd9, 0x52, + 0xb6, 0xfa, 0x5e, 0x02, 0x4c, 0xec, 0x84, 0x95, 0xa6, 0x58, 0xd7, 0x16, 0xcb, 0x1a, 0x36, 0xf7, + 0xc6, 0x3a, 0xb8, 0x93, 0x01, 0x72, 0x0b, 0x16, 0x44, 0x4a, 0x58, 0xd8, 0xe1, 0xce, 0x10, 0xed, + 0x7c, 0xaa, 0xa8, 0x94, 0x16, 0xf4, 0xac, 0x3f, 0xd6, 0x16, 0x43, 0x64, 0x17, 0xc0, 0xcf, 0x2c, + 0xcb, 0xe9, 0x63, 0x3e, 0x5d, 0x54, 0x4a, 0x57, 0xaa, 0x95, 0x73, 0x05, 0x3e, 0x10, 0xcb, 0x76, + 0x9d, 0x3e, 0xea, 0x99, 0xc3, 0xb0, 0x59, 0xf8, 0x04, 0x96, 0xa6, 0x80, 0x84, 0x3d, 0x43, 0x1c, + 0x07, 0xf6, 0x8b, 0x26, 0x79, 0x1b, 0x2e, 0x1f, 0x51, 0x73, 0x84, 0x81, 0xe9, 0xab, 0x49, 0xa6, + 0x8f, 0x5d, 0xd4, 0x7d, 0x55, 0x6d, 0x66, 0x43, 0x51, 0x2b, 0x90, 0x99, 0xc4, 0x23, 0x00, 0xa9, + 0xbd, 0x7d, 0x7d, 0xf7, 0xfe, 0x4e, 0xee, 0x12, 0x99, 0x87, 0xb9, 0xe6, 0xce, 0xfd, 0xbd, 0x9c, + 0x42, 0xb2, 0x90, 0x6e, 0xea, 0xfb, 0x8f, 0xb6, 0x76, 0x1a, 0xb9, 0x19, 0xf5, 0x97, 0x19, 0xc8, + 0xea, 0x48, 0xfb, 0xff, 0x67, 0x1e, 0xac, 0xc0, 0x65, 0x4e, 0xbb, 0x26, 0x06, 0x99, 0xe0, 0x77, + 0xc4, 0xa8, 0x61, 0xf7, 0xf1, 0xb9, 0x4c, 0x85, 0x8c, 0xee, 0x77, 0x04, 0x4f, 0xcf, 0x31, 0x47, + 0x96, 0xed, 0x5f, 0x76, 0x46, 0x0f, 0xbb, 0xa4, 0x0a, 0xe9, 0x21, 0x8e, 0xc5, 0x37, 0x2c, 0xaf, + 0x2b, 0x5b, 0x7d, 0x3d, 0x81, 0x65, 0x1b, 0xc7, 0x2d, 0xe4, 0x7a, 0x6a, 0x28, 0xff, 0x8a, 0x18, + 0xa6, 0x61, 0x19, 0x3c, 0x3f, 0x5f, 0x54, 0x4a, 0xb3, 0xba, 0xdf, 0x89, 0xdd, 0x7e, 0x26, 0x76, + 0xfb, 0x2a, 0x87, 0xd5, 0x3a, 0x0e, 0x0c, 0x3b, 0x72, 0xb6, 0xb3, 0x1d, 0xfb, 0x18, 0xd2, 0x8e, + 0x2b, 0x1f, 0xd8, 0xc0, 0xad, 0x3b, 0xa7, 0xbb, 0xb5, 0xef, 0x8b, 0xf5, 0x70, 0x95, 0xfa, 0xb7, + 0x02, 0x8b, 0x0f, 0x1c, 0xcb, 0x32, 0xf8, 0xd9, 0xc1, 0xd6, 0xe1, 0x4a, 0xc4, 0xe3, 0x8e, 0xd1, + 0x97, 0x31, 0x17, 0x36, 0x2f, 0xe9, 0x8b, 0x91, 0xf1, 0xad, 0x3e, 0xf9, 0x14, 0xae, 0x31, 0xc3, + 0x1e, 0x98, 0xd8, 0x19, 0x31, 0xec, 0x44, 0xaf, 0x74, 0xf6, 0x15, 0x20, 0x37, 0x2f, 0xe9, 0x2b, + 0xfe, 0x36, 0x4f, 0x18, 0x46, 0xa6, 0xc9, 0xfb, 0x90, 0x09, 0xdf, 0x68, 0xf1, 0x55, 0x8b, 0xef, + 0xf3, 0x7a, 0xc2, 0x8e, 0xbb, 0x81, 0x46, 0x3f, 0x56, 0xd7, 0x17, 0x4f, 0x64, 0x98, 0xfa, 0x14, + 0xae, 0x84, 0x87, 0x67, 0xae, 0x63, 0x33, 0x24, 0x0d, 0xc8, 0xf5, 0xe4, 0x48, 0x67, 0x52, 0x47, + 0xa4, 0x0d, 0xd9, 0x6a, 0x21, 0xf6, 0x70, 0xb4, 0x43, 0x85, 0xbe, 0xe4, 0xaf, 0x99, 0x0c, 0xa8, + 0x3a, 0x2c, 0xe9, 0x8e, 0x69, 0x76, 0x69, 0x6f, 0x78, 0xb6, 0xaf, 0x77, 0x92, 0x7d, 0x9d, 0x72, + 0xb5, 0xfa, 0xd7, 0x02, 0xa4, 0x5b, 0xfe, 0xf1, 0xc8, 0x8f, 0xe2, 0xda, 0xa2, 0xa5, 0x80, 0xac, + 0x27, 0x38, 0x90, 0x54, 0x2c, 0x0a, 0x85, 0x04, 0x61, 0x20, 0x51, 0xeb, 0x5f, 0xfd, 0xf1, 0xe7, + 0xf7, 0x33, 0x1f, 0xaa, 0x35, 0x51, 0x78, 0xbe, 0x0c, 0x6b, 0xc8, 0x47, 0xae, 0xe7, 0x7c, 0x81, + 0x3d, 0xce, 0xb4, 0xb2, 0x66, 0xd8, 0x8c, 0x53, 0xbb, 0x87, 0xa2, 0x1d, 0xce, 0x33, 0xad, 0xfc, + 0x42, 0x0b, 0x0e, 0xc3, 0xc8, 0x37, 0x0a, 0xc0, 0x71, 0x49, 0x21, 0x6f, 0x26, 0x84, 0x8b, 0x55, + 0x9c, 0x53, 0xa1, 0xee, 0x49, 0xa8, 0x1a, 0xd9, 0x90, 0x50, 0xa2, 0xc0, 0x9c, 0x03, 0x68, 0xc2, + 0xa3, 0x95, 0x5f, 0x90, 0xef, 0x14, 0x58, 0x3c, 0x51, 0xbc, 0x12, 0xdd, 0x4a, 0x2a, 0x6f, 0x85, + 0x6b, 0xb1, 0x5b, 0x6f, 0x88, 0x9f, 0x26, 0x21, 0x54, 0xf9, 0xdf, 0x43, 0xfd, 0xac, 0x00, 0x1c, + 0xbf, 0xe4, 0x89, 0x3e, 0xc5, 0x1e, 0xfa, 0xc2, 0x5a, 0x82, 0x4a, 0x97, 0xbf, 0x34, 0x5a, 0xc8, + 0xd5, 0x03, 0x09, 0xb5, 0xad, 0x3e, 0x92, 0x50, 0x41, 0xb0, 0x57, 0xe4, 0xaa, 0xe1, 0x24, 0x68, + 0x4d, 0x29, 0x93, 0xdf, 0x15, 0x78, 0x2d, 0xc4, 0xe0, 0x1e, 0x52, 0xcb, 0xb0, 0x07, 0xe7, 0xc7, + 0xbd, 0x9d, 0xa0, 0x6a, 0x52, 0x8f, 0x1b, 0xd4, 0x3c, 0xa6, 0x7e, 0x26, 0xa9, 0xdb, 0xea, 0xfe, + 0x7f, 0x41, 0x1d, 0x61, 0xac, 0x29, 0xe5, 0x77, 0x14, 0xf2, 0xad, 0x02, 0x73, 0xa2, 0xfa, 0x90, + 0x1b, 0x89, 0xd6, 0x4d, 0xca, 0xd2, 0x19, 0xd6, 0x6e, 0x4b, 0xc8, 0x86, 0x7a, 0xef, 0x22, 0x90, + 0x1e, 0xd2, 0xbe, 0x30, 0xf5, 0x57, 0x05, 0x16, 0x27, 0xa4, 0xe7, 0x82, 0x3b, 0x97, 0x91, 0x6d, + 0xc9, 0xb8, 0xa7, 0x6e, 0x5d, 0x84, 0x91, 0x45, 0xb9, 0x7c, 0x0b, 0x7f, 0x53, 0x20, 0x37, 0x5d, + 0x9a, 0x48, 0x39, 0x81, 0xe8, 0x25, 0xf5, 0xab, 0x70, 0xe3, 0xf4, 0xf7, 0x5e, 0x7d, 0x2a, 0xc1, + 0x0f, 0xd4, 0x9d, 0x8b, 0x80, 0x77, 0xa7, 0x82, 0x0b, 0xa3, 0x7f, 0x52, 0x20, 0xe5, 0x3f, 0xf0, + 0xa4, 0x98, 0xf4, 0x3e, 0x46, 0x0b, 0x5f, 0xe1, 0xd6, 0x29, 0x0a, 0xbf, 0x3a, 0xa8, 0xbb, 0x12, + 0xf4, 0xb1, 0x5a, 0xbf, 0x08, 0xa8, 0x5f, 0x2b, 0x04, 0xde, 0x0f, 0x0a, 0xcc, 0x87, 0x65, 0x82, + 0xa8, 0x49, 0x29, 0x70, 0xb2, 0x86, 0xbc, 0xf4, 0x35, 0xda, 0x97, 0x5c, 0x5b, 0xea, 0xc3, 0x0b, + 0x65, 0x67, 0x10, 0xac, 0xa6, 0x94, 0xeb, 0x14, 0xae, 0xf6, 0x1c, 0x2b, 0x4e, 0x54, 0x5f, 0x08, + 0x2a, 0x50, 0x53, 0x00, 0x34, 0x95, 0x67, 0x1b, 0x81, 0x64, 0xe0, 0x98, 0xd4, 0x1e, 0x54, 0x1c, + 0x6f, 0xa0, 0x0d, 0xd0, 0x96, 0x78, 0x9a, 0x3f, 0x45, 0x5d, 0x83, 0x45, 0xfe, 0x9f, 0xf9, 0x20, + 0x68, 0x76, 0x53, 0x52, 0xf4, 0xee, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x5d, 0xc1, 0xa8, + 0x65, 0x0e, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go new file mode 100644 index 000000000..59aedb85d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go @@ -0,0 +1,830 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/v1/transaction.proto +// DO NOT EDIT! + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// # Transactions +// +// +// Each session can have at most one active transaction at a time. After the +// active transaction is completed, the session can immediately be +// re-used for the next transaction. It is not necessary to create a +// new session for each transaction. +// +// # Transaction Modes +// +// Cloud Spanner supports two transaction modes: +// +// 1. Locking read-write. This type of transaction is the only way +// to write data into Cloud Spanner. These transactions rely on +// pessimistic locking and, if necessary, two-phase commit. +// Locking read-write transactions may abort, requiring the +// application to retry. +// +// 2. Snapshot read-only. This transaction type provides guaranteed +// consistency across several reads, but does not allow +// writes. Snapshot read-only transactions can be configured to +// read at timestamps in the past. Snapshot read-only +// transactions do not need to be committed. +// +// For transactions that only read, snapshot read-only transactions +// provide simpler semantics and are almost always faster. In +// particular, read-only transactions do not take locks, so they do +// not conflict with read-write transactions. As a consequence of not +// taking locks, they also do not abort, so retry loops are not needed. +// +// Transactions may only read/write data in a single database. They +// may, however, read/write data in different tables within that +// database. +// +// ## Locking Read-Write Transactions +// +// Locking transactions may be used to atomically read-modify-write +// data anywhere in a database. This type of transaction is externally +// consistent. +// +// Clients should attempt to minimize the amount of time a transaction +// is active. Faster transactions commit with higher probability +// and cause less contention. Cloud Spanner attempts to keep read locks +// active as long as the transaction continues to do reads, and the +// transaction has not been terminated by +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of +// inactivity at the client may cause Cloud Spanner to release a +// transaction's locks and abort it. +// +// Reads performed within a transaction acquire locks on the data +// being read. Writes can only be done at commit time, after all reads +// have been completed. +// Conceptually, a read-write transaction consists of zero or more +// reads or SQL queries followed by +// [Commit][google.spanner.v1.Spanner.Commit]. At any time before +// [Commit][google.spanner.v1.Spanner.Commit], the client can send a +// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the +// transaction. +// +// ### Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired +// are still valid at commit time, and it is able to acquire write +// locks for all writes. Cloud Spanner can abort the transaction for any +// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees +// that the transaction has not modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about +// how long the transaction's locks were held for. It is an error to +// use Cloud Spanner locks for any sort of mutual exclusion other than +// between Cloud Spanner transactions themselves. +// +// ### Retrying Aborted Transactions +// +// When a transaction aborts, the application can choose to retry the +// whole transaction again. To maximize the chances of successfully +// committing the retry, the client should execute the retry in the +// same session as the original attempt. The original session's lock +// priority increases with each consecutive abort, meaning that each +// attempt has a slightly better chance of success than the previous. +// +// Under some circumstances (e.g., many transactions attempting to +// modify the same row(s)), a transaction can abort many times in a +// short period before successfully committing. Thus, it is not a good +// idea to cap the number of retries a transaction can attempt; +// instead, it is better to limit the total amount of wall time spent +// retrying. +// +// ### Idle Transactions +// +// A transaction is considered idle if it has no outstanding reads or +// SQL queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they +// don't hold on to locks indefinitely. In that case, the commit will +// fail with error `ABORTED`. +// +// If this behavior is undesirable, periodically executing a simple +// SQL query in the transaction (e.g., `SELECT 1`) prevents the +// transaction from becoming idle. +// +// ## Snapshot Read-Only Transactions +// +// Snapshot read-only transactions provides a simpler method than +// locking read-write transactions for doing several consistent +// reads. However, this type of transaction does not support writes. +// +// Snapshot transactions do not take locks. Instead, they work by +// choosing a Cloud Spanner timestamp, then executing all reads at that +// timestamp. Since they do not acquire locks, they do not block +// concurrent read-write transactions. +// +// Unlike locking read-write transactions, snapshot read-only +// transactions never abort. They can fail if the chosen read +// timestamp is garbage collected; however, the default garbage +// collection policy is generous enough that most applications do not +// need to worry about this in practice. +// +// Snapshot read-only transactions do not need to call +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not +// permitted to do so). +// +// To execute a snapshot transaction, the client specifies a timestamp +// bound, which tells Cloud Spanner how to choose a read timestamp. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, +// stale read-only transactions can execute more quickly than strong +// or read-write transaction, because they are able to execute far +// from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. +// +// ### Strong +// +// Strong reads are guaranteed to see the effects of all transactions +// that have committed before the start of the read. Furthermore, all +// rows yielded by a single read are consistent with each other -- if +// any part of the read observes a transaction, all parts of the read +// see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are +// concurrent writes. If consistency across reads is required, the +// reads should be executed within a transaction or at an exact read +// timestamp. +// +// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. +// +// ### Exact Staleness +// +// These timestamp bounds execute reads at a user-specified +// timestamp. Reads at a timestamp are guaranteed to see a consistent +// prefix of the global transaction history: they observe +// modifications done by all transactions with a commit timestamp <= +// the read timestamp, and observe none of the modifications done by +// transactions with a larger commit timestamp. They will block until +// all conflicting transactions that may be assigned commit timestamps +// <= the read timestamp have finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a +// timestamp. As a result, they execute slightly faster than the +// equivalent boundedly stale concurrency modes. On the other hand, +// boundedly stale reads usually return fresher results. +// +// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and +// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. +// +// ### Bounded Staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, +// subject to a user-provided staleness bound. Cloud Spanner chooses the +// newest timestamp within the staleness bound that allows execution +// of the reads at the closest available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of +// the read observes a transaction, all parts of the read see the +// transaction. Boundedly stale reads are not repeatable: two stale +// reads, even if they use the same staleness bound, can execute at +// different timestamps and thus return inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase +// negotiates a timestamp among all replicas needed to serve the +// read. In the second phase, reads are executed at the negotiated +// timestamp. +// +// As a result of the two phase execution, bounded staleness reads are +// usually a little slower than comparable exact staleness +// reads. However, they are typically able to return fresher +// results, and are more likely to execute at the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of +// which rows will be read, it can only be used with single-use +// read-only transactions. +// +// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and +// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. +// +// ### Old Read Timestamps and Garbage Collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data +// in the background to reclaim storage space. This process is known +// as "version GC". By default, version GC reclaims versions after they +// are one hour old. Because of this, Cloud Spanner cannot perform reads +// at read timestamps more than one hour in the past. This +// restriction also applies to in-progress reads and/or SQL queries whose +// timestamp become too old while executing. Reads and SQL queries with +// too-old read timestamps fail with the error `FAILED_PRECONDITION`. +type TransactionOptions struct { + // Required. The type of transaction. + // + // Types that are valid to be assigned to Mode: + // *TransactionOptions_ReadWrite_ + // *TransactionOptions_ReadOnly_ + Mode isTransactionOptions_Mode `protobuf_oneof:"mode"` +} + +func (m *TransactionOptions) Reset() { *m = TransactionOptions{} } +func (m *TransactionOptions) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions) ProtoMessage() {} +func (*TransactionOptions) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +type isTransactionOptions_Mode interface { + isTransactionOptions_Mode() +} + +type TransactionOptions_ReadWrite_ struct { + ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,1,opt,name=read_write,json=readWrite,oneof"` +} +type TransactionOptions_ReadOnly_ struct { + ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,oneof"` +} + +func (*TransactionOptions_ReadWrite_) isTransactionOptions_Mode() {} +func (*TransactionOptions_ReadOnly_) isTransactionOptions_Mode() {} + +func (m *TransactionOptions) GetMode() isTransactionOptions_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWrite { + if x, ok := m.GetMode().(*TransactionOptions_ReadWrite_); ok { + return x.ReadWrite + } + return nil +} + +func (m *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnly { + if x, ok := m.GetMode().(*TransactionOptions_ReadOnly_); ok { + return x.ReadOnly + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_OneofMarshaler, _TransactionOptions_OneofUnmarshaler, _TransactionOptions_OneofSizer, []interface{}{ + (*TransactionOptions_ReadWrite_)(nil), + (*TransactionOptions_ReadOnly_)(nil), + } +} + +func _TransactionOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadWrite); err != nil { + return err + } + case *TransactionOptions_ReadOnly_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadOnly); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions.Mode has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions) + switch tag { + case 1: // mode.read_write + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadWrite) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadWrite_{msg} + return true, err + case 2: // mode.read_only + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadOnly) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadOnly_{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + s := proto.Size(x.ReadWrite) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_: + s := proto.Size(x.ReadOnly) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options for read-write transactions. +type TransactionOptions_ReadWrite struct { +} + +func (m *TransactionOptions_ReadWrite) Reset() { *m = TransactionOptions_ReadWrite{} } +func (m *TransactionOptions_ReadWrite) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadWrite) ProtoMessage() {} +func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 0} } + +// Options for read-only transactions. +type TransactionOptions_ReadOnly struct { + // How to choose the timestamp for the read-only transaction. + // + // Types that are valid to be assigned to TimestampBound: + // *TransactionOptions_ReadOnly_Strong + // *TransactionOptions_ReadOnly_MinReadTimestamp + // *TransactionOptions_ReadOnly_MaxStaleness + // *TransactionOptions_ReadOnly_ReadTimestamp + // *TransactionOptions_ReadOnly_ExactStaleness + TimestampBound isTransactionOptions_ReadOnly_TimestampBound `protobuf_oneof:"timestamp_bound"` + // If true, the Cloud Spanner-selected read timestamp is included in + // the [Transaction][google.spanner.v1.Transaction] message that describes the transaction. + ReturnReadTimestamp bool `protobuf:"varint,6,opt,name=return_read_timestamp,json=returnReadTimestamp" json:"return_read_timestamp,omitempty"` +} + +func (m *TransactionOptions_ReadOnly) Reset() { *m = TransactionOptions_ReadOnly{} } +func (m *TransactionOptions_ReadOnly) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadOnly) ProtoMessage() {} +func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 1} } + +type isTransactionOptions_ReadOnly_TimestampBound interface { + isTransactionOptions_ReadOnly_TimestampBound() +} + +type TransactionOptions_ReadOnly_Strong struct { + Strong bool `protobuf:"varint,1,opt,name=strong,oneof"` +} +type TransactionOptions_ReadOnly_MinReadTimestamp struct { + MinReadTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=min_read_timestamp,json=minReadTimestamp,oneof"` +} +type TransactionOptions_ReadOnly_MaxStaleness struct { + MaxStaleness *google_protobuf2.Duration `protobuf:"bytes,3,opt,name=max_staleness,json=maxStaleness,oneof"` +} +type TransactionOptions_ReadOnly_ReadTimestamp struct { + ReadTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=read_timestamp,json=readTimestamp,oneof"` +} +type TransactionOptions_ReadOnly_ExactStaleness struct { + ExactStaleness *google_protobuf2.Duration `protobuf:"bytes,5,opt,name=exact_staleness,json=exactStaleness,oneof"` +} + +func (*TransactionOptions_ReadOnly_Strong) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_MinReadTimestamp) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_MaxStaleness) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_ReadTimestamp) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_ExactStaleness) isTransactionOptions_ReadOnly_TimestampBound() {} + +func (m *TransactionOptions_ReadOnly) GetTimestampBound() isTransactionOptions_ReadOnly_TimestampBound { + if m != nil { + return m.TimestampBound + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetStrong() bool { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_Strong); ok { + return x.Strong + } + return false +} + +func (m *TransactionOptions_ReadOnly) GetMinReadTimestamp() *google_protobuf3.Timestamp { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_MinReadTimestamp); ok { + return x.MinReadTimestamp + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetMaxStaleness() *google_protobuf2.Duration { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_MaxStaleness); ok { + return x.MaxStaleness + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetReadTimestamp() *google_protobuf3.Timestamp { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_ReadTimestamp); ok { + return x.ReadTimestamp + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetExactStaleness() *google_protobuf2.Duration { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_ExactStaleness); ok { + return x.ExactStaleness + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetReturnReadTimestamp() bool { + if m != nil { + return m.ReturnReadTimestamp + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions_ReadOnly) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_ReadOnly_OneofMarshaler, _TransactionOptions_ReadOnly_OneofUnmarshaler, _TransactionOptions_ReadOnly_OneofSizer, []interface{}{ + (*TransactionOptions_ReadOnly_Strong)(nil), + (*TransactionOptions_ReadOnly_MinReadTimestamp)(nil), + (*TransactionOptions_ReadOnly_MaxStaleness)(nil), + (*TransactionOptions_ReadOnly_ReadTimestamp)(nil), + (*TransactionOptions_ReadOnly_ExactStaleness)(nil), + } +} + +func _TransactionOptions_ReadOnly_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions_ReadOnly) + // timestamp_bound + switch x := m.TimestampBound.(type) { + case *TransactionOptions_ReadOnly_Strong: + t := uint64(0) + if x.Strong { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *TransactionOptions_ReadOnly_MinReadTimestamp: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MinReadTimestamp); err != nil { + return err + } + case *TransactionOptions_ReadOnly_MaxStaleness: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MaxStaleness); err != nil { + return err + } + case *TransactionOptions_ReadOnly_ReadTimestamp: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTimestamp); err != nil { + return err + } + case *TransactionOptions_ReadOnly_ExactStaleness: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExactStaleness); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions_ReadOnly.TimestampBound has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_ReadOnly_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions_ReadOnly) + switch tag { + case 1: // timestamp_bound.strong + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TimestampBound = &TransactionOptions_ReadOnly_Strong{x != 0} + return true, err + case 2: // timestamp_bound.min_read_timestamp + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Timestamp) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_MinReadTimestamp{msg} + return true, err + case 3: // timestamp_bound.max_staleness + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Duration) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_MaxStaleness{msg} + return true, err + case 4: // timestamp_bound.read_timestamp + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Timestamp) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_ReadTimestamp{msg} + return true, err + case 5: // timestamp_bound.exact_staleness + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Duration) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_ExactStaleness{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_ReadOnly_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions_ReadOnly) + // timestamp_bound + switch x := m.TimestampBound.(type) { + case *TransactionOptions_ReadOnly_Strong: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *TransactionOptions_ReadOnly_MinReadTimestamp: + s := proto.Size(x.MinReadTimestamp) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_MaxStaleness: + s := proto.Size(x.MaxStaleness) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_ReadTimestamp: + s := proto.Size(x.ReadTimestamp) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_ExactStaleness: + s := proto.Size(x.ExactStaleness) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A transaction. +type Transaction struct { + // `id` may be used to identify the transaction in subsequent + // [Read][google.spanner.v1.Spanner.Read], + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], + // [Commit][google.spanner.v1.Spanner.Commit], or + // [Rollback][google.spanner.v1.Spanner.Rollback] calls. + // + // Single-use read-only transactions do not have IDs, because + // single-use transactions do not support multiple requests. + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // For snapshot read-only transactions, the read timestamp chosen + // for the transaction. Not returned by default: see + // [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp]. + ReadTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=read_timestamp,json=readTimestamp" json:"read_timestamp,omitempty"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} +func (*Transaction) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } + +func (m *Transaction) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +func (m *Transaction) GetReadTimestamp() *google_protobuf3.Timestamp { + if m != nil { + return m.ReadTimestamp + } + return nil +} + +// This message is used to select the transaction in which a +// [Read][google.spanner.v1.Spanner.Read] or +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs. +// +// See [TransactionOptions][google.spanner.v1.TransactionOptions] for more information about transactions. +type TransactionSelector struct { + // If no fields are set, the default is a single use transaction + // with strong concurrency. + // + // Types that are valid to be assigned to Selector: + // *TransactionSelector_SingleUse + // *TransactionSelector_Id + // *TransactionSelector_Begin + Selector isTransactionSelector_Selector `protobuf_oneof:"selector"` +} + +func (m *TransactionSelector) Reset() { *m = TransactionSelector{} } +func (m *TransactionSelector) String() string { return proto.CompactTextString(m) } +func (*TransactionSelector) ProtoMessage() {} +func (*TransactionSelector) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{2} } + +type isTransactionSelector_Selector interface { + isTransactionSelector_Selector() +} + +type TransactionSelector_SingleUse struct { + SingleUse *TransactionOptions `protobuf:"bytes,1,opt,name=single_use,json=singleUse,oneof"` +} +type TransactionSelector_Id struct { + Id []byte `protobuf:"bytes,2,opt,name=id,proto3,oneof"` +} +type TransactionSelector_Begin struct { + Begin *TransactionOptions `protobuf:"bytes,3,opt,name=begin,oneof"` +} + +func (*TransactionSelector_SingleUse) isTransactionSelector_Selector() {} +func (*TransactionSelector_Id) isTransactionSelector_Selector() {} +func (*TransactionSelector_Begin) isTransactionSelector_Selector() {} + +func (m *TransactionSelector) GetSelector() isTransactionSelector_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *TransactionSelector) GetSingleUse() *TransactionOptions { + if x, ok := m.GetSelector().(*TransactionSelector_SingleUse); ok { + return x.SingleUse + } + return nil +} + +func (m *TransactionSelector) GetId() []byte { + if x, ok := m.GetSelector().(*TransactionSelector_Id); ok { + return x.Id + } + return nil +} + +func (m *TransactionSelector) GetBegin() *TransactionOptions { + if x, ok := m.GetSelector().(*TransactionSelector_Begin); ok { + return x.Begin + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionSelector) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionSelector_OneofMarshaler, _TransactionSelector_OneofUnmarshaler, _TransactionSelector_OneofSizer, []interface{}{ + (*TransactionSelector_SingleUse)(nil), + (*TransactionSelector_Id)(nil), + (*TransactionSelector_Begin)(nil), + } +} + +func _TransactionSelector_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionSelector) + // selector + switch x := m.Selector.(type) { + case *TransactionSelector_SingleUse: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SingleUse); err != nil { + return err + } + case *TransactionSelector_Id: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Id) + case *TransactionSelector_Begin: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Begin); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionSelector.Selector has unexpected type %T", x) + } + return nil +} + +func _TransactionSelector_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionSelector) + switch tag { + case 1: // selector.single_use + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.Selector = &TransactionSelector_SingleUse{msg} + return true, err + case 2: // selector.id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Selector = &TransactionSelector_Id{x} + return true, err + case 3: // selector.begin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.Selector = &TransactionSelector_Begin{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionSelector_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionSelector) + // selector + switch x := m.Selector.(type) { + case *TransactionSelector_SingleUse: + s := proto.Size(x.SingleUse) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionSelector_Id: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Id))) + n += len(x.Id) + case *TransactionSelector_Begin: + s := proto.Size(x.Begin) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*TransactionOptions)(nil), "google.spanner.v1.TransactionOptions") + proto.RegisterType((*TransactionOptions_ReadWrite)(nil), "google.spanner.v1.TransactionOptions.ReadWrite") + proto.RegisterType((*TransactionOptions_ReadOnly)(nil), "google.spanner.v1.TransactionOptions.ReadOnly") + proto.RegisterType((*Transaction)(nil), "google.spanner.v1.Transaction") + proto.RegisterType((*TransactionSelector)(nil), "google.spanner.v1.TransactionSelector") +} + +func init() { proto.RegisterFile("google/spanner/v1/transaction.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 503 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xd1, 0x6a, 0xdb, 0x30, + 0x14, 0x86, 0x9d, 0x34, 0x0d, 0xe9, 0x49, 0x9a, 0xa6, 0x2a, 0x85, 0xcc, 0x8c, 0x6d, 0x64, 0x0c, + 0x76, 0x65, 0xd3, 0xee, 0x66, 0x30, 0x06, 0x5b, 0x56, 0x86, 0x19, 0x8c, 0x06, 0xb7, 0x63, 0xb0, + 0x1b, 0x4f, 0x89, 0xcf, 0x8c, 0xc0, 0x96, 0x8c, 0x24, 0x77, 0xe9, 0xfd, 0x9e, 0x6a, 0x6f, 0xb1, + 0x37, 0x1a, 0x96, 0xe5, 0xc4, 0x8d, 0x2f, 0x9a, 0x3b, 0x9f, 0x9c, 0xff, 0xff, 0xf5, 0xe9, 0x1c, + 0x11, 0x78, 0x99, 0x08, 0x91, 0xa4, 0xe8, 0xab, 0x9c, 0x72, 0x8e, 0xd2, 0xbf, 0xbb, 0xf0, 0xb5, + 0xa4, 0x5c, 0xd1, 0x95, 0x66, 0x82, 0x7b, 0xb9, 0x14, 0x5a, 0x90, 0xd3, 0x4a, 0xe4, 0x59, 0x91, + 0x77, 0x77, 0xe1, 0x3e, 0xb5, 0x3e, 0x9a, 0x33, 0x9f, 0x72, 0x2e, 0x34, 0x2d, 0xf5, 0xaa, 0x32, + 0xb8, 0xcf, 0x6c, 0xd7, 0x54, 0xcb, 0xe2, 0x97, 0x1f, 0x17, 0x92, 0x6e, 0x03, 0xdd, 0xe7, 0xbb, + 0x7d, 0xcd, 0x32, 0x54, 0x9a, 0x66, 0x79, 0x25, 0x98, 0xfd, 0xeb, 0x01, 0xb9, 0xdd, 0x72, 0x5c, + 0xe7, 0x26, 0x9d, 0x2c, 0x00, 0x24, 0xd2, 0x38, 0xfa, 0x2d, 0x99, 0xc6, 0x69, 0xe7, 0x45, 0xe7, + 0xf5, 0xf0, 0xd2, 0xf7, 0x5a, 0x74, 0x5e, 0xdb, 0xea, 0x85, 0x48, 0xe3, 0xef, 0xa5, 0x2d, 0x70, + 0xc2, 0x23, 0x59, 0x17, 0xe4, 0x2b, 0x98, 0x22, 0x12, 0x3c, 0xbd, 0x9f, 0x76, 0x4d, 0xa0, 0xb7, + 0x7f, 0xe0, 0x35, 0x4f, 0xef, 0x03, 0x27, 0x1c, 0x48, 0xfb, 0xed, 0x0e, 0xe1, 0x68, 0x73, 0x90, + 0xfb, 0xe7, 0x00, 0x06, 0xb5, 0x8a, 0x4c, 0xa1, 0xaf, 0xb4, 0x14, 0x3c, 0x31, 0xd8, 0x83, 0xc0, + 0x09, 0x6d, 0x4d, 0xbe, 0x00, 0xc9, 0x18, 0x8f, 0x0c, 0xc6, 0x66, 0x0e, 0x96, 0xc5, 0xad, 0x59, + 0xea, 0x49, 0x79, 0xb7, 0xb5, 0x22, 0x70, 0xc2, 0x49, 0xc6, 0x78, 0x79, 0xc0, 0xe6, 0x37, 0xf2, + 0x01, 0x8e, 0x33, 0xba, 0x8e, 0x94, 0xa6, 0x29, 0x72, 0x54, 0x6a, 0x7a, 0x60, 0x62, 0x9e, 0xb4, + 0x62, 0xae, 0xec, 0x42, 0x02, 0x27, 0x1c, 0x65, 0x74, 0x7d, 0x53, 0x1b, 0xc8, 0x27, 0x18, 0xef, + 0x90, 0xf4, 0xf6, 0x20, 0x39, 0x96, 0x0f, 0x30, 0xae, 0xe0, 0x04, 0xd7, 0x74, 0xa5, 0x1b, 0x20, + 0x87, 0x8f, 0x83, 0x8c, 0x8d, 0x67, 0x8b, 0x72, 0x09, 0xe7, 0x12, 0x75, 0x21, 0x5b, 0xb3, 0xe9, + 0x97, 0x13, 0x0c, 0xcf, 0xaa, 0xe6, 0x83, 0x01, 0xcc, 0x4f, 0xe1, 0x64, 0xa3, 0x8b, 0x96, 0xa2, + 0xe0, 0xf1, 0xbc, 0x0f, 0xbd, 0x4c, 0xc4, 0x38, 0xfb, 0x09, 0xc3, 0xc6, 0x1a, 0xc9, 0x18, 0xba, + 0x2c, 0x36, 0xcb, 0x18, 0x85, 0x5d, 0x16, 0x93, 0x8f, 0xad, 0x8b, 0x3f, 0xba, 0x82, 0x9d, 0x6b, + 0xcf, 0xfe, 0x76, 0xe0, 0xac, 0x71, 0xc4, 0x0d, 0xa6, 0xb8, 0xd2, 0x42, 0x92, 0xcf, 0x00, 0x8a, + 0xf1, 0x24, 0xc5, 0xa8, 0x50, 0xf5, 0xb3, 0x7d, 0xb5, 0xd7, 0x2b, 0x2b, 0x1f, 0x6b, 0x65, 0xfd, + 0xa6, 0x90, 0x4c, 0x0c, 0x72, 0x89, 0x35, 0x0a, 0x1c, 0x03, 0xfd, 0x1e, 0x0e, 0x97, 0x98, 0x30, + 0x6e, 0xf7, 0xbc, 0x77, 0x68, 0xe5, 0x9a, 0x03, 0x0c, 0x94, 0x85, 0x9c, 0x23, 0x9c, 0xaf, 0x44, + 0xd6, 0x0e, 0x98, 0x4f, 0x1a, 0x09, 0x8b, 0x72, 0x06, 0x8b, 0xce, 0x8f, 0xb7, 0x56, 0x96, 0x88, + 0x94, 0xf2, 0xc4, 0x13, 0x32, 0xf1, 0x13, 0xe4, 0x66, 0x42, 0x7e, 0xd5, 0xa2, 0x39, 0x53, 0x8d, + 0x7f, 0x95, 0x77, 0xf6, 0x73, 0xd9, 0x37, 0xa2, 0x37, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd6, + 0x9c, 0xa4, 0x92, 0x79, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go new file mode 100644 index 000000000..ff1939415 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go @@ -0,0 +1,216 @@ +// Code generated by protoc-gen-go. +// source: google/spanner/v1/type.proto +// DO NOT EDIT! + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `TypeCode` is used as part of [Type][google.spanner.v1.Type] to +// indicate the type of a Cloud Spanner value. +// +// Each legal value of a type can be encoded to or decoded from a JSON +// value, using the encodings described below. All Cloud Spanner values can +// be `null`, regardless of type; `null`s are always encoded as a JSON +// `null`. +type TypeCode int32 + +const ( + // Not specified. + TypeCode_TYPE_CODE_UNSPECIFIED TypeCode = 0 + // Encoded as JSON `true` or `false`. + TypeCode_BOOL TypeCode = 1 + // Encoded as `string`, in decimal format. + TypeCode_INT64 TypeCode = 2 + // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or + // `"-Infinity"`. + TypeCode_FLOAT64 TypeCode = 3 + // Encoded as `string` in RFC 3339 timestamp format. The time zone + // must be present, and must be `"Z"`. + TypeCode_TIMESTAMP TypeCode = 4 + // Encoded as `string` in RFC 3339 date format. + TypeCode_DATE TypeCode = 5 + // Encoded as `string`. + TypeCode_STRING TypeCode = 6 + // Encoded as a base64-encoded `string`, as described in RFC 4648, + // section 4. + TypeCode_BYTES TypeCode = 7 + // Encoded as `list`, where the list elements are represented + // according to [array_element_type][google.spanner.v1.Type.array_element_type]. + TypeCode_ARRAY TypeCode = 8 + // Encoded as `list`, where list element `i` is represented according + // to [struct_type.fields[i]][google.spanner.v1.StructType.fields]. + TypeCode_STRUCT TypeCode = 9 +) + +var TypeCode_name = map[int32]string{ + 0: "TYPE_CODE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "FLOAT64", + 4: "TIMESTAMP", + 5: "DATE", + 6: "STRING", + 7: "BYTES", + 8: "ARRAY", + 9: "STRUCT", +} +var TypeCode_value = map[string]int32{ + "TYPE_CODE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "FLOAT64": 3, + "TIMESTAMP": 4, + "DATE": 5, + "STRING": 6, + "BYTES": 7, + "ARRAY": 8, + "STRUCT": 9, +} + +func (x TypeCode) String() string { + return proto.EnumName(TypeCode_name, int32(x)) +} +func (TypeCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +// `Type` indicates the type of a Cloud Spanner value, as might be stored in a +// table cell or returned from an SQL query. +type Type struct { + // Required. The [TypeCode][google.spanner.v1.TypeCode] for this type. + Code TypeCode `protobuf:"varint,1,opt,name=code,enum=google.spanner.v1.TypeCode" json:"code,omitempty"` + // If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` + // is the type of the array elements. + ArrayElementType *Type `protobuf:"bytes,2,opt,name=array_element_type,json=arrayElementType" json:"array_element_type,omitempty"` + // If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` + // provides type information for the struct's fields. + StructType *StructType `protobuf:"bytes,3,opt,name=struct_type,json=structType" json:"struct_type,omitempty"` +} + +func (m *Type) Reset() { *m = Type{} } +func (m *Type) String() string { return proto.CompactTextString(m) } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +func (m *Type) GetCode() TypeCode { + if m != nil { + return m.Code + } + return TypeCode_TYPE_CODE_UNSPECIFIED +} + +func (m *Type) GetArrayElementType() *Type { + if m != nil { + return m.ArrayElementType + } + return nil +} + +func (m *Type) GetStructType() *StructType { + if m != nil { + return m.StructType + } + return nil +} + +// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. +type StructType struct { + // The list of fields that make up this struct. Order is + // significant, because values of this struct type are represented as + // lists, where the order of field values matches the order of + // fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields + // matches the order of columns in a read request, or the order of + // fields in the `SELECT` clause of a query. + Fields []*StructType_Field `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"` +} + +func (m *StructType) Reset() { *m = StructType{} } +func (m *StructType) String() string { return proto.CompactTextString(m) } +func (*StructType) ProtoMessage() {} +func (*StructType) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } + +func (m *StructType) GetFields() []*StructType_Field { + if m != nil { + return m.Fields + } + return nil +} + +// Message representing a single field of a struct. +type StructType_Field struct { + // The name of the field. For reads, this is the column name. For + // SQL queries, it is the column alias (e.g., `"Word"` in the + // query `"SELECT 'hello' AS Word"`), or the column name (e.g., + // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some + // columns might have an empty name (e.g., !"SELECT + // UPPER(ColName)"`). Note that a query result can contain + // multiple fields with the same name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The type of the field. + Type *Type `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` +} + +func (m *StructType_Field) Reset() { *m = StructType_Field{} } +func (m *StructType_Field) String() string { return proto.CompactTextString(m) } +func (*StructType_Field) ProtoMessage() {} +func (*StructType_Field) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1, 0} } + +func (m *StructType_Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *StructType_Field) GetType() *Type { + if m != nil { + return m.Type + } + return nil +} + +func init() { + proto.RegisterType((*Type)(nil), "google.spanner.v1.Type") + proto.RegisterType((*StructType)(nil), "google.spanner.v1.StructType") + proto.RegisterType((*StructType_Field)(nil), "google.spanner.v1.StructType.Field") + proto.RegisterEnum("google.spanner.v1.TypeCode", TypeCode_name, TypeCode_value) +} + +func init() { proto.RegisterFile("google/spanner/v1/type.proto", fileDescriptor6) } + +var fileDescriptor6 = []byte{ + // 413 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xd1, 0x8a, 0xd3, 0x40, + 0x14, 0x86, 0x9d, 0x6d, 0xda, 0x6d, 0x4e, 0x51, 0xc6, 0x81, 0xc5, 0xba, 0x2a, 0x94, 0xf5, 0xa6, + 0x28, 0x24, 0x6c, 0x15, 0x11, 0x16, 0x84, 0x34, 0x9d, 0x6a, 0x60, 0xb7, 0x09, 0xc9, 0xec, 0x45, + 0xbd, 0x30, 0x8c, 0xed, 0x18, 0x02, 0xe9, 0x4c, 0x48, 0xe2, 0x42, 0x5f, 0xc2, 0x1b, 0xdf, 0xc7, + 0x67, 0x93, 0x99, 0x64, 0x55, 0xa8, 0xca, 0xde, 0xfd, 0x33, 0xff, 0xff, 0x9d, 0x39, 0x73, 0x38, + 0xf0, 0x34, 0x53, 0x2a, 0x2b, 0x84, 0x5b, 0x97, 0x5c, 0x4a, 0x51, 0xb9, 0x37, 0xe7, 0x6e, 0xb3, + 0x2f, 0x85, 0x53, 0x56, 0xaa, 0x51, 0xe4, 0x61, 0xeb, 0x3a, 0x9d, 0xeb, 0xdc, 0x9c, 0x9f, 0xde, + 0x02, 0xbc, 0xcc, 0x5d, 0x2e, 0xa5, 0x6a, 0x78, 0x93, 0x2b, 0x59, 0xb7, 0xc0, 0xd9, 0x0f, 0x04, + 0x16, 0xdb, 0x97, 0x82, 0xb8, 0x60, 0x6d, 0xd4, 0x56, 0x8c, 0xd1, 0x04, 0x4d, 0x1f, 0xcc, 0x9e, + 0x38, 0x07, 0x85, 0x1c, 0x1d, 0xf3, 0xd5, 0x56, 0xc4, 0x26, 0x48, 0x28, 0x10, 0x5e, 0x55, 0x7c, + 0x9f, 0x8a, 0x42, 0xec, 0x84, 0x6c, 0x52, 0xdd, 0xc6, 0xf8, 0x68, 0x82, 0xa6, 0xa3, 0xd9, 0xa3, + 0x7f, 0xe0, 0x31, 0x36, 0x08, 0x6d, 0x09, 0xf3, 0xee, 0x3b, 0x18, 0xd5, 0x4d, 0xf5, 0x75, 0xd3, + 0xf1, 0x3d, 0xc3, 0x3f, 0xfb, 0x0b, 0x9f, 0x98, 0x94, 0xa9, 0x02, 0xf5, 0x2f, 0x7d, 0xf6, 0x1d, + 0x01, 0xfc, 0xb6, 0xc8, 0x05, 0x0c, 0xbe, 0xe4, 0xa2, 0xd8, 0xd6, 0x63, 0x34, 0xe9, 0x4d, 0x47, + 0xb3, 0xe7, 0xff, 0xad, 0xe4, 0x2c, 0x75, 0x36, 0xee, 0x90, 0xd3, 0x0f, 0xd0, 0x37, 0x17, 0x84, + 0x80, 0x25, 0xf9, 0xae, 0x1d, 0x86, 0x1d, 0x1b, 0x4d, 0x5e, 0x82, 0x75, 0x97, 0x1f, 0x9a, 0xd0, + 0x8b, 0x6f, 0x08, 0x86, 0xb7, 0xf3, 0x22, 0x8f, 0xe1, 0x84, 0xad, 0x23, 0x9a, 0xfa, 0xe1, 0x82, + 0xa6, 0xd7, 0xab, 0x24, 0xa2, 0x7e, 0xb0, 0x0c, 0xe8, 0x02, 0xdf, 0x23, 0x43, 0xb0, 0xe6, 0x61, + 0x78, 0x89, 0x11, 0xb1, 0xa1, 0x1f, 0xac, 0xd8, 0x9b, 0xd7, 0xf8, 0x88, 0x8c, 0xe0, 0x78, 0x79, + 0x19, 0x7a, 0xfa, 0xd0, 0x23, 0xf7, 0xc1, 0x66, 0xc1, 0x15, 0x4d, 0x98, 0x77, 0x15, 0x61, 0x4b, + 0x03, 0x0b, 0x8f, 0x51, 0xdc, 0x27, 0x00, 0x83, 0x84, 0xc5, 0xc1, 0xea, 0x3d, 0x1e, 0x68, 0x78, + 0xbe, 0x66, 0x34, 0xc1, 0xc7, 0x5a, 0x7a, 0x71, 0xec, 0xad, 0xf1, 0xb0, 0x4b, 0x5c, 0xfb, 0x0c, + 0xdb, 0xf3, 0x4f, 0x70, 0xb2, 0x51, 0xbb, 0xc3, 0xa6, 0xe7, 0xb6, 0x6e, 0x33, 0xd2, 0xbb, 0x10, + 0xa1, 0x8f, 0x6f, 0x3b, 0x3f, 0x53, 0x05, 0x97, 0x99, 0xa3, 0xaa, 0xcc, 0xcd, 0x84, 0x34, 0x9b, + 0xe2, 0xb6, 0x16, 0x2f, 0xf3, 0xfa, 0x8f, 0xdd, 0xbb, 0xe8, 0xe4, 0xe7, 0x81, 0x09, 0xbd, 0xfa, + 0x19, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x01, 0x17, 0xda, 0x9f, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 0c1a96c68..6ec5732cf 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -4,6 +4,7 @@ go: - 1.5.4 - 1.6.3 - 1.7 + - 1.8 go_import_path: google.golang.org/grpc @@ -14,6 +15,6 @@ before_install: script: - '! gofmt -s -d -l . 2>&1 | read' - '! goimports -l . | read' - - 'if [[ $TRAVIS_GO_VERSION != 1.5* ]]; then ! golint ./... | grep -vE "(_string|\.pb)\.go:"; fi' + - 'if [[ $TRAVIS_GO_VERSION != 1.5* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214 - make test testrace diff --git a/vendor/google.golang.org/grpc/Documentation/gomock-example.md b/vendor/google.golang.org/grpc/Documentation/gomock-example.md new file mode 100644 index 000000000..9f7e023d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/Documentation/gomock-example.md @@ -0,0 +1,121 @@ +# Mocking Service for gRPC + +[Example code](https://github.com/grpc/grpc-go/tree/master/examples/helloworld/mock) + +## Why? + +To test client-side logic without the overhead of connecting to a real server. Mocking enables users to write light-weight unit tests to check functionalities on client-side without invoking RPC calls to a server. + +## Idea: Mock the client stub that connects to the server. + +We use Gomock to mock the client interface (in the generated code) and programmatically set its methods to expect and return pre-determined values. This enables users to write tests around the client logic and use this mocked stub while making RPC calls. + +## How to use Gomock? + +Documentation on Gomock can be found [here](https://github.com/golang/mock). +A quick reading of the documentation should enable users to follow the code below. + +Consider a gRPC service based on following proto file: + +```proto +//helloworld.proto + +package helloworld; + +message HelloRequest { + string name = 1; +} + +message HelloReply { + string name = 1; +} + +service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} +} +``` + +The generated file helloworld.pb.go will have a client interface for each service defined in the proto file. This interface will have methods corresponding to each rpc inside that service. + +```Go +type GreeterClient interface { + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} +``` + +The generated code also contains a struct that implements this interface. + +```Go +type greeterClient struct { + cc *grpc.ClientConn +} +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error){ + // ... + // gRPC specific code here + // ... +} +``` + +Along with this the generated code has a method to create an instance of this struct. +```Go +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient +``` + +The user code uses this function to create an instance of the struct greeterClient which then can be used to make rpc calls to the server. +We will mock this interface GreeterClient and use an instance of that mock to make rpc calls. These calls instead of going to server will return pre-determined values. + +To create a mock we’ll use [mockgen](https://github.com/golang/mock#running-mockgen). +From the directory ``` examples/helloworld/mock/ ``` run ``` mockgen google.golang.org/grpc/examples/helloworld/helloworld GreeterClient > mock_helloworld/hw_mock.go ``` + +Notice that in the above command we specify GreeterClient as the interface to be mocked. + +The user test code can import the package generated by mockgen along with library package gomock to write unit tests around client-side logic. +```Go +import "github.com/golang/mock/gomock" +import hwmock "google.golang.org/grpc/examples/helloworld/mock/mock_helloworld" +``` + +An instance of the mocked interface can be created as: +```Go +mockGreeterClient := hwmock.NewMockGreeterClient(ctrl) +``` +This mocked object can be programmed to expect calls to its methods and return pre-determined values. For instance, we can program mockGreeterClient to expect a call to its method SayHello and return a HelloReply with message “Mocked RPC”. + +```Go +mockGreeterClient.EXPECT().SayHello( + gomock.Any(), // expect any value for first parameter + gomock.Any(), // expect any value for second parameter +).Return(&helloworld.HelloReply{Message: “Mocked RPC”}, nil) +``` + +gomock.Any() indicates that the parameter can have any value or type. We can indicate specific values for built-in types with gomock.Eq(). +However, if the test code needs to specify the parameter to have a proto message type, we can replace gomock.Any() with an instance of a struct that implements gomock.Matcher interface. + +```Go +type rpcMsg struct { + msg proto.Message +} + +func (r *rpcMsg) Matches(msg interface{}) bool { + m, ok := msg.(proto.Message) + if !ok { + return false + } + return proto.Equal(m, r.msg) +} + +func (r *rpcMsg) String() string { + return fmt.Sprintf("is %s", r.msg) +} + +... + +req := &helloworld.HelloRequest{Name: "unit_test"} +mockGreeterClient.EXPECT().SayHello( + gomock.Any(), + &rpcMsg{msg: req}, +).Return(&helloworld.HelloReply{Message: "Mocked Interface"}, nil) +``` + + + diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark.go b/vendor/google.golang.org/grpc/benchmark/benchmark.go index d11432708..fc3304acc 100644 --- a/vendor/google.golang.org/grpc/benchmark/benchmark.go +++ b/vendor/google.golang.org/grpc/benchmark/benchmark.go @@ -47,7 +47,8 @@ import ( "google.golang.org/grpc/grpclog" ) -func newPayload(t testpb.PayloadType, size int) *testpb.Payload { +// Allows reuse of the same testpb.Payload object. +func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) { if size < 0 { grpclog.Fatalf("Requested a response with invalid length %d", size) } @@ -59,10 +60,15 @@ func newPayload(t testpb.PayloadType, size int) *testpb.Payload { default: grpclog.Fatalf("Unsupported payload type: %d", t) } - return &testpb.Payload{ - Type: t, - Body: body, - } + p.Type = t + p.Body = body + return +} + +func newPayload(t testpb.PayloadType, size int) *testpb.Payload { + p := new(testpb.Payload) + setPayload(p, t, size) + return p } type testServer struct { @@ -75,8 +81,13 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* } func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { + response := &testpb.SimpleResponse{ + Payload: new(testpb.Payload), + } + in := new(testpb.SimpleRequest) for { - in, err := stream.Recv() + // use ServerStream directly to reuse the same testpb.SimpleRequest object + err := stream.(grpc.ServerStream).RecvMsg(in) if err == io.EOF { // read done. return nil @@ -84,9 +95,8 @@ func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallS if err != nil { return err } - if err := stream.Send(&testpb.SimpleResponse{ - Payload: newPayload(in.ResponseType, int(in.ResponseSize)), - }); err != nil { + setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) + if err := stream.Send(response); err != nil { return err } } diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index ba1772190..81b52be29 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -42,6 +42,7 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -85,6 +86,9 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) } c.trailerMD = stream.Trailer() + if peer, ok := peer.FromContext(stream.Context()); ok { + c.peer = peer + } return nil } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 146166a73..459ce0b64 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -263,6 +263,15 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { } } +// WithAuthority returns a DialOption that specifies the value to be used as +// the :authority pseudo-header. This value only works with WithInsecure and +// has no effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return func(o *dialOptions) { + o.copts.Authority = a + } +} + // Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) @@ -321,6 +330,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { + cc.authority = cc.dopts.copts.Authority } else { colonPos := strings.LastIndex(target, ":") if colonPos == -1 { diff --git a/vendor/google.golang.org/grpc/clientconn_test.go b/vendor/google.golang.org/grpc/clientconn_test.go index 9b759a1f0..93e78a5a8 100644 --- a/vendor/google.golang.org/grpc/clientconn_test.go +++ b/vendor/google.golang.org/grpc/clientconn_test.go @@ -85,6 +85,34 @@ func TestTLSServerNameOverwrite(t *testing.T) { } } +func TestWithAuthority(t *testing.T) { + overwriteServerName := "over.write.server.name" + conn, err := Dial("Non-Existent.Server:80", WithInsecure(), WithAuthority(overwriteServerName)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v, want _, ", err) + } + conn.Close() + if conn.authority != overwriteServerName { + t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) + } +} + +func TestWithAuthorityAndTLS(t *testing.T) { + overwriteServerName := "over.write.server.name" + creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", overwriteServerName) + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + conn, err := Dial("Non-Existent.Server:80", WithTransportCredentials(creds), WithAuthority("no.effect.authority")) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v, want _, ", err) + } + conn.Close() + if conn.authority != overwriteServerName { + t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) + } +} + func TestDialContextCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go index 9647b9ec8..7597b09e3 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -1,4 +1,5 @@ // +build go1.7 +// +build !go1.8 /* * @@ -44,8 +45,6 @@ import ( // contains a mutex and must not be copied. // // If cfg is nil, a new zero tls.Config is returned. -// -// TODO replace this function with official clone function. func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { return &tls.Config{} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go new file mode 100644 index 000000000..0ecf342da --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -0,0 +1,53 @@ +// +build go1.8 + +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go index 09b8d12c7..cfd40dfa3 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -44,8 +44,6 @@ import ( // contains a mutex and must not be copied. // // If cfg is nil, a new zero tls.Config is returned. -// -// TODO replace this function with official clone function. func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { return &tls.Config{} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock/hw_test.go b/vendor/google.golang.org/grpc/examples/helloworld/mock/hw_test.go new file mode 100644 index 000000000..6f7b4f860 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/mock/hw_test.go @@ -0,0 +1,49 @@ +package mock + +import ( + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + helloworld "google.golang.org/grpc/examples/helloworld/helloworld" + hwmock "google.golang.org/grpc/examples/helloworld/mock/mock_helloworld" +) + +// rpcMsg implements the gomock.Matcher interface +type rpcMsg struct { + msg proto.Message +} + +func (r *rpcMsg) Matches(msg interface{}) bool { + m, ok := msg.(proto.Message) + if !ok { + return false + } + return proto.Equal(m, r.msg) +} + +func (r *rpcMsg) String() string { + return fmt.Sprintf("is %s", r.msg) +} + +func TestSayHello(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockGreeterClient := hwmock.NewMockGreeterClient(ctrl) + req := &helloworld.HelloRequest{Name: "unit_test"} + mockGreeterClient.EXPECT().SayHello( + gomock.Any(), + &rpcMsg{msg: req}, + ).Return(&helloworld.HelloReply{Message: "Mocked Interface"}, nil) + testSayHello(t, mockGreeterClient) +} + +func testSayHello(t *testing.T, client helloworld.GreeterClient) { + r, err := client.SayHello(context.Background(), &helloworld.HelloRequest{Name: "unit_test"}) + if err != nil || r.Message != "Mocked Interface" { + t.Errorf("mocking failed") + } + t.Log("Reply : ", r.Message) +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock/mock_helloworld/hw_mock.go b/vendor/google.golang.org/grpc/examples/helloworld/mock/mock_helloworld/hw_mock.go new file mode 100644 index 000000000..14957ed5f --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/mock/mock_helloworld/hw_mock.go @@ -0,0 +1,48 @@ +// Automatically generated by MockGen. DO NOT EDIT! +// Source: google.golang.org/grpc/examples/helloworld/helloworld (interfaces: GreeterClient) + +package mock_helloworld + +import ( + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + helloworld "google.golang.org/grpc/examples/helloworld/helloworld" +) + +// Mock of GreeterClient interface +type MockGreeterClient struct { + ctrl *gomock.Controller + recorder *_MockGreeterClientRecorder +} + +// Recorder for MockGreeterClient (not exported) +type _MockGreeterClientRecorder struct { + mock *MockGreeterClient +} + +func NewMockGreeterClient(ctrl *gomock.Controller) *MockGreeterClient { + mock := &MockGreeterClient{ctrl: ctrl} + mock.recorder = &_MockGreeterClientRecorder{mock} + return mock +} + +func (_m *MockGreeterClient) EXPECT() *_MockGreeterClientRecorder { + return _m.recorder +} + +func (_m *MockGreeterClient) SayHello(_param0 context.Context, _param1 *helloworld.HelloRequest, _param2 ...grpc.CallOption) (*helloworld.HelloReply, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "SayHello", _s...) + ret0, _ := ret[0].(*helloworld.HelloReply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockGreeterClientRecorder) SayHello(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "SayHello", _s...) +} diff --git a/vendor/google.golang.org/grpc/interop/client/client.go b/vendor/google.golang.org/grpc/interop/client/client.go index 7961752b6..38bad3f66 100644 --- a/vendor/google.golang.org/grpc/interop/client/client.go +++ b/vendor/google.golang.org/grpc/interop/client/client.go @@ -52,7 +52,7 @@ var ( serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") - serverHost = flag.String("server_host", "127.0.0.1", "The server host name") + serverHost = flag.String("server_host", "localhost", "The server host name") serverPort = flag.Int("server_port", 10000, "The server port number") tlsServerName = flag.String("server_host_override", "", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") testCase = flag.String("test_case", "large_unary", diff --git a/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go b/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go new file mode 100644 index 000000000..9c09ad7b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go @@ -0,0 +1,174 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Client used to test http2 error edge cases like GOAWAYs and RST_STREAMs + * + * Documentation: + * https://github.com/grpc/grpc/blob/master/doc/negative-http2-interop-test-descriptions.md + */ + +package main + +import ( + "flag" + "net" + "strconv" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + serverHost = flag.String("server_host", "127.0.0.1", "The server host name") + serverPort = flag.Int("server_port", 8080, "The server port number") + testCase = flag.String("test_case", "goaway", + `Configure different test cases. Valid options are: + goaway : client sends two requests, the server will send a goaway in between; + rst_after_header : server will send rst_stream after it sends headers; + rst_during_data : server will send rst_stream while sending data; + rst_after_data : server will send rst_stream after sending data; + ping : server will send pings between each http2 frame; + max_streams : server will ensure that the max_concurrent_streams limit is upheld;`) + largeReqSize = 271828 + largeRespSize = 314159 +) + +func largeSimpleRequest() *testpb.SimpleRequest { + pl := interop.ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + return &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + } +} + +// sends two unary calls. The server asserts that the calls use different connections. +func goaway(tc testpb.TestServiceClient) { + interop.DoLargeUnaryCall(tc) + // sleep to ensure that the client has time to recv the GOAWAY. + // TODO(ncteisen): make this less hacky. + time.Sleep(1 * time.Second) + interop.DoLargeUnaryCall(tc) +} + +func rstAfterHeader(tc testpb.TestServiceClient) { + req := largeSimpleRequest() + reply, err := tc.UnaryCall(context.Background(), req) + if reply != nil { + grpclog.Fatalf("Client received reply despite server sending rst stream after header") + } + if grpc.Code(err) != codes.Internal { + grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, grpc.Code(err), codes.Internal) + } +} + +func rstDuringData(tc testpb.TestServiceClient) { + req := largeSimpleRequest() + reply, err := tc.UnaryCall(context.Background(), req) + if reply != nil { + grpclog.Fatalf("Client received reply despite server sending rst stream during data") + } + if grpc.Code(err) != codes.Unknown { + grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, grpc.Code(err), codes.Unknown) + } +} + +func rstAfterData(tc testpb.TestServiceClient) { + req := largeSimpleRequest() + reply, err := tc.UnaryCall(context.Background(), req) + if reply != nil { + grpclog.Fatalf("Client received reply despite server sending rst stream after data") + } + if grpc.Code(err) != codes.Internal { + grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, grpc.Code(err), codes.Internal) + } +} + +func ping(tc testpb.TestServiceClient) { + // The server will assert that every ping it sends was ACK-ed by the client. + interop.DoLargeUnaryCall(tc) +} + +func maxStreams(tc testpb.TestServiceClient) { + interop.DoLargeUnaryCall(tc) + var wg sync.WaitGroup + for i := 0; i < 15; i++ { + wg.Add(1) + go func() { + defer wg.Done() + interop.DoLargeUnaryCall(tc) + }() + } + wg.Wait() +} + +func main() { + flag.Parse() + serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) + var opts []grpc.DialOption + opts = append(opts, grpc.WithInsecure()) + conn, err := grpc.Dial(serverAddr, opts...) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + tc := testpb.NewTestServiceClient(conn) + switch *testCase { + case "goaway": + goaway(tc) + grpclog.Println("goaway done") + case "rst_after_header": + rstAfterHeader(tc) + grpclog.Println("rst_after_header done") + case "rst_during_data": + rstDuringData(tc) + grpclog.Println("rst_during_data done") + case "rst_after_data": + rstAfterData(tc) + grpclog.Println("rst_after_data done") + case "ping": + ping(tc) + grpclog.Println("ping done") + case "max_streams": + maxStreams(tc) + grpclog.Println("max_streams done") + default: + grpclog.Fatal("Unsupported test case: ", *testCase) + } +} diff --git a/vendor/google.golang.org/grpc/interop/test_utils.go b/vendor/google.golang.org/grpc/interop/test_utils.go index 051787483..e4e427c75 100644 --- a/vendor/google.golang.org/grpc/interop/test_utils.go +++ b/vendor/google.golang.org/grpc/interop/test_utils.go @@ -60,7 +60,8 @@ var ( trailingMetadataKey = "x-grpc-test-echo-trailing-bin" ) -func clientNewPayload(t testpb.PayloadType, size int) *testpb.Payload { +// ClientNewPayload returns a payload of the given type and size. +func ClientNewPayload(t testpb.PayloadType, size int) *testpb.Payload { if size < 0 { grpclog.Fatalf("Requested a response with invalid length %d", size) } @@ -91,7 +92,7 @@ func DoEmptyUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) { // DoLargeUnaryCall performs a unary RPC with large payload in the request and response. func DoLargeUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) { - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), @@ -116,16 +117,14 @@ func DoClientStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) { } var sum int for _, s := range reqSizes { - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, s) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { - grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) } sum += s - grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) - } reply, err := stream.CloseAndRecv() if err != nil { @@ -173,7 +172,7 @@ func DoServerStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) { respCnt++ } if rpcStatus != io.EOF { - grpclog.Fatalf("Failed to finish the server streaming rpc: %v", err) + grpclog.Fatalf("Failed to finish the server streaming rpc: %v", rpcStatus) } if respCnt != len(respSizes) { grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) @@ -193,14 +192,14 @@ func DoPingPong(tc testpb.TestServiceClient, args ...grpc.CallOption) { Size: proto.Int32(int32(respSizes[index])), }, } - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { - grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) } reply, err := stream.Recv() if err != nil { @@ -249,7 +248,7 @@ func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient, args ...grpc.CallOpt } grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), Payload: pl, @@ -266,7 +265,7 @@ func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient, args ...grpc.CallOpt // DoComputeEngineCreds performs a unary RPC with compute engine auth. func DoComputeEngineCreds(tc testpb.TestServiceClient, serviceAccount, oauthScope string) { - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), @@ -298,7 +297,7 @@ func getServiceAccountJSONKey(keyFile string) []byte { // DoServiceAccountCreds performs a unary RPC with service account auth. func DoServiceAccountCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), @@ -323,7 +322,7 @@ func DoServiceAccountCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, o // DoJWTTokenCreds performs a unary RPC with JWT token auth. func DoJWTTokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile string) { - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), @@ -357,7 +356,7 @@ func GetToken(serviceAccountKeyFile string, oauthScope string) *oauth2.Token { // DoOauth2TokenCreds performs a unary RPC with OAUTH2 token auth. func DoOauth2TokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), @@ -383,7 +382,7 @@ func DoOauth2TokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oaut // DoPerRPCCreds performs a unary RPC with per RPC OAUTH2 token. func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), @@ -441,14 +440,14 @@ func DoCancelAfterFirstResponse(tc testpb.TestServiceClient, args ...grpc.CallOp Size: proto.Int32(31415), }, } - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { - grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) @@ -486,7 +485,7 @@ func validateMetadata(header, trailer metadata.MD) { // DoCustomMetadata checks that metadata is echoed back to the client. func DoCustomMetadata(tc testpb.TestServiceClient, args ...grpc.CallOption) { // Testing with UnaryCall. - pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(1)), @@ -526,7 +525,7 @@ func DoCustomMetadata(tc testpb.TestServiceClient, args ...grpc.CallOption) { Payload: pl, } if err := stream.Send(streamReq); err != nil { - grpclog.Fatalf("%v.Send(%v) = %v", stream, streamReq, err) + grpclog.Fatalf("%v has error %v while sending %v", stream, err, streamReq) } streamHeader, err := stream.Header() if err != nil { @@ -570,7 +569,7 @@ func DoStatusCodeAndMessage(tc testpb.TestServiceClient, args ...grpc.CallOption ResponseStatus: respStatus, } if err := stream.Send(streamReq); err != nil { - grpclog.Fatalf("%v.Send(%v) = %v, want ", stream, streamReq, err) + grpclog.Fatalf("%v has error %v while sending %v, want ", stream, err, streamReq) } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() = %v, want ", stream, err) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 65dc5af57..733239502 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -32,6 +32,7 @@ */ // Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to http://www.grpc.io/docs/guides/wire.html for more information about custom-metadata. package metadata // import "google.golang.org/grpc/metadata" import ( @@ -82,6 +83,7 @@ func DecodeKeyValue(k, v string) (string, string, error) { type MD map[string][]string // New creates a MD from given key-value map. +// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be applied Base64 encoding. func New(m map[string]string) MD { md := MD{} for k, v := range m { @@ -93,6 +95,7 @@ func New(m map[string]string) MD { // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. +// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be appplied Base64 encoding. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go index ac49de4aa..0b503d665 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go @@ -2,6 +2,22 @@ // source: proto2.proto // DO NOT EDIT! +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + proto2.proto + proto2_ext.proto + proto2_ext2.proto + test.proto + +It has these top-level messages: + ToBeExtended + Extension + AnotherExtension + SearchResponse + SearchRequest +*/ package grpc_testing import proto "github.com/golang/protobuf/proto" @@ -13,26 +29,32 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -type ToBeExtened struct { +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ToBeExtended struct { Foo *int32 `protobuf:"varint,1,req,name=foo" json:"foo,omitempty"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` } -func (m *ToBeExtened) Reset() { *m = ToBeExtened{} } -func (m *ToBeExtened) String() string { return proto.CompactTextString(m) } -func (*ToBeExtened) ProtoMessage() {} -func (*ToBeExtened) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } +func (m *ToBeExtended) Reset() { *m = ToBeExtended{} } +func (m *ToBeExtended) String() string { return proto.CompactTextString(m) } +func (*ToBeExtended) ProtoMessage() {} +func (*ToBeExtended) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -var extRange_ToBeExtened = []proto.ExtensionRange{ - {10, 20}, +var extRange_ToBeExtended = []proto.ExtensionRange{ + {10, 30}, } -func (*ToBeExtened) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ToBeExtened +func (*ToBeExtended) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ToBeExtended } -func (m *ToBeExtened) GetFoo() int32 { +func (m *ToBeExtended) GetFoo() int32 { if m != nil && m.Foo != nil { return *m.Foo } @@ -40,17 +62,17 @@ func (m *ToBeExtened) GetFoo() int32 { } func init() { - proto.RegisterType((*ToBeExtened)(nil), "grpc.testing.ToBeExtened") + proto.RegisterType((*ToBeExtended)(nil), "grpc.testing.ToBeExtended") } -func init() { proto.RegisterFile("proto2.proto", fileDescriptor1) } +func init() { proto.RegisterFile("proto2.proto", fileDescriptor0) } -var fileDescriptor1 = []byte{ - // 85 bytes of a gzipped FileDescriptorProto +var fileDescriptor0 = []byte{ + // 86 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0xd2, 0x03, 0x53, 0x42, 0x3c, 0xe9, 0x45, 0x05, 0xc9, 0x7a, 0x25, 0xa9, 0xc5, 0x25, - 0x99, 0x79, 0xe9, 0x4a, 0xaa, 0x5c, 0xdc, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, - 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, 0x41, - 0x20, 0xa6, 0x16, 0x0b, 0x07, 0x97, 0x80, 0x28, 0x20, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xed, 0xbc, - 0xc2, 0x43, 0x00, 0x00, 0x00, + 0x99, 0x79, 0xe9, 0x4a, 0x6a, 0x5c, 0x3c, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, + 0x29, 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, + 0x41, 0x20, 0xa6, 0x16, 0x0b, 0x07, 0x97, 0x80, 0x3c, 0x20, 0x00, 0x00, 0xff, 0xff, 0x74, 0x86, + 0x9c, 0x08, 0x44, 0x00, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto index f79adc49d..6b120f320 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto @@ -2,7 +2,7 @@ syntax = "proto2"; package grpc.testing; -message ToBeExtened { +message ToBeExtended { required int32 foo = 1; - extensions 10 to 20; + extensions 10 to 30; } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go index 0120ca972..dbd09425f 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go @@ -2,20 +2,6 @@ // source: proto2_ext.proto // DO NOT EDIT! -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - proto2_ext.proto - proto2.proto - test.proto - -It has these top-level messages: - Extension - ToBeExtened - SearchResponse - SearchRequest -*/ package grpc_testing import proto "github.com/golang/protobuf/proto" @@ -27,62 +13,71 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - type Extension struct { - Baz *int32 `protobuf:"varint,1,opt,name=baz" json:"baz,omitempty"` + Whatzit *int32 `protobuf:"varint,1,opt,name=whatzit" json:"whatzit,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Extension) Reset() { *m = Extension{} } func (m *Extension) String() string { return proto.CompactTextString(m) } func (*Extension) ProtoMessage() {} -func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } -func (m *Extension) GetBaz() int32 { - if m != nil && m.Baz != nil { - return *m.Baz +func (m *Extension) GetWhatzit() int32 { + if m != nil && m.Whatzit != nil { + return *m.Whatzit } return 0 } -var E_Bar = &proto.ExtensionDesc{ - ExtendedType: (*ToBeExtened)(nil), +var E_Foo = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), ExtensionType: (*int32)(nil), Field: 13, - Name: "grpc.testing.bar", - Tag: "varint,13,opt,name=bar", + Name: "grpc.testing.foo", + Tag: "varint,13,opt,name=foo", + Filename: "proto2_ext.proto", } -var E_Baz = &proto.ExtensionDesc{ - ExtendedType: (*ToBeExtened)(nil), +var E_Bar = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), ExtensionType: (*Extension)(nil), Field: 17, + Name: "grpc.testing.bar", + Tag: "bytes,17,opt,name=bar", + Filename: "proto2_ext.proto", +} + +var E_Baz = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*SearchRequest)(nil), + Field: 19, Name: "grpc.testing.baz", - Tag: "bytes,17,opt,name=baz", + Tag: "bytes,19,opt,name=baz", + Filename: "proto2_ext.proto", } func init() { proto.RegisterType((*Extension)(nil), "grpc.testing.Extension") + proto.RegisterExtension(E_Foo) proto.RegisterExtension(E_Bar) proto.RegisterExtension(E_Baz) } -func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor0) } +func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor1) } -var fileDescriptor0 = []byte{ - // 130 bytes of a gzipped FileDescriptorProto +var fileDescriptor1 = []byte{ + // 179 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0xd1, 0x03, 0x33, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, - 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x25, 0x59, 0x2e, - 0x4e, 0xd7, 0x8a, 0x92, 0xd4, 0xbc, 0xe2, 0xcc, 0xfc, 0x3c, 0x21, 0x01, 0x2e, 0xe6, 0xa4, 0xc4, - 0x2a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xd6, 0x20, 0x10, 0xd3, 0x4a, 0x1b, 0x24, 0x52, 0x24, 0x24, - 0xa9, 0x87, 0x6c, 0x84, 0x5e, 0x48, 0xbe, 0x53, 0x2a, 0x58, 0x57, 0x6a, 0x8a, 0x04, 0x2f, 0x4c, - 0x71, 0x91, 0x95, 0x0b, 0x58, 0x3b, 0x3e, 0xc5, 0x82, 0x40, 0xc5, 0xdc, 0x46, 0xe2, 0xa8, 0x0a, - 0xe0, 0xf6, 0x83, 0xad, 0x04, 0x04, 0x00, 0x00, 0xff, 0xff, 0x59, 0xfa, 0x16, 0xbc, 0xc0, 0x00, - 0x00, 0x00, + 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x29, 0x2e, 0x90, + 0x30, 0x84, 0xad, 0xa4, 0xca, 0xc5, 0xe9, 0x5a, 0x51, 0x92, 0x9a, 0x57, 0x9c, 0x99, 0x9f, 0x27, + 0x24, 0xc1, 0xc5, 0x5e, 0x9e, 0x91, 0x58, 0x52, 0x95, 0x59, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, + 0x1a, 0x04, 0xe3, 0x5a, 0xe9, 0x70, 0x31, 0xa7, 0xe5, 0xe7, 0x0b, 0x49, 0xe9, 0x21, 0x1b, 0xab, + 0x17, 0x92, 0xef, 0x94, 0x0a, 0xd6, 0x9d, 0x92, 0x9a, 0x22, 0xc1, 0x0b, 0xd6, 0x01, 0x52, 0x66, + 0xe5, 0xca, 0xc5, 0x9c, 0x94, 0x58, 0x84, 0x57, 0xb5, 0xa0, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x38, + 0xaa, 0x0a, 0xb8, 0x4b, 0x82, 0x40, 0xfa, 0xad, 0x3c, 0x41, 0xc6, 0x54, 0xe1, 0x35, 0x46, 0x18, + 0x6c, 0x8c, 0x34, 0xaa, 0x8a, 0xe0, 0xd4, 0xc4, 0xa2, 0xe4, 0x8c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, + 0xe2, 0x12, 0x90, 0x51, 0x55, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x71, 0x6b, 0x94, 0x9f, 0x21, + 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto index c2dd737ac..b669141f4 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto @@ -3,11 +3,14 @@ syntax = "proto2"; package grpc.testing; import "proto2.proto"; -extend ToBeExtened { - optional int32 bar = 13; - optional Extension baz = 17; +import "test.proto"; + +extend ToBeExtended { + optional int32 foo = 13; + optional Extension bar = 17; + optional SearchRequest baz = 19; } message Extension { - optional int32 baz = 1; + optional int32 whatzit = 1; } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go new file mode 100644 index 000000000..0aaec7c9a --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go @@ -0,0 +1,72 @@ +// Code generated by protoc-gen-go. +// source: proto2_ext2.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AnotherExtension struct { + Whatchamacallit *int32 `protobuf:"varint,1,opt,name=whatchamacallit" json:"whatchamacallit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AnotherExtension) Reset() { *m = AnotherExtension{} } +func (m *AnotherExtension) String() string { return proto.CompactTextString(m) } +func (*AnotherExtension) ProtoMessage() {} +func (*AnotherExtension) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *AnotherExtension) GetWhatchamacallit() int32 { + if m != nil && m.Whatchamacallit != nil { + return *m.Whatchamacallit + } + return 0 +} + +var E_Frob = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*string)(nil), + Field: 23, + Name: "grpc.testing.frob", + Tag: "bytes,23,opt,name=frob", + Filename: "proto2_ext2.proto", +} + +var E_Nitz = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*AnotherExtension)(nil), + Field: 29, + Name: "grpc.testing.nitz", + Tag: "bytes,29,opt,name=nitz", + Filename: "proto2_ext2.proto", +} + +func init() { + proto.RegisterType((*AnotherExtension)(nil), "grpc.testing.AnotherExtension") + proto.RegisterExtension(E_Frob) + proto.RegisterExtension(E_Nitz) +} + +func init() { proto.RegisterFile("proto2_ext2.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 165 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0x31, 0xd2, 0x03, 0xb3, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, + 0xf5, 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0x0a, 0x20, 0x72, 0x4a, 0x36, + 0x5c, 0x02, 0x8e, 0x79, 0xf9, 0x25, 0x19, 0xa9, 0x45, 0xae, 0x15, 0x25, 0xa9, 0x79, 0xc5, 0x99, + 0xf9, 0x79, 0x42, 0x1a, 0x5c, 0xfc, 0xe5, 0x19, 0x89, 0x25, 0xc9, 0x19, 0x89, 0xb9, 0x89, 0xc9, + 0x89, 0x39, 0x39, 0x99, 0x25, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0xe8, 0xc2, 0x56, 0x7a, + 0x5c, 0x2c, 0x69, 0x45, 0xf9, 0x49, 0x42, 0x52, 0x7a, 0xc8, 0x56, 0xe8, 0x85, 0xe4, 0x3b, 0xa5, + 0x82, 0x8d, 0x4b, 0x49, 0x4d, 0x91, 0x10, 0x57, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xab, 0xb3, 0xf2, + 0xe3, 0x62, 0xc9, 0xcb, 0x2c, 0xa9, 0xc2, 0xab, 0x5e, 0x56, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x0e, + 0x55, 0x05, 0xba, 0x1b, 0x83, 0xc0, 0xe6, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x7e, 0x0d, + 0x26, 0xed, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto new file mode 100644 index 000000000..16fa69e62 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto @@ -0,0 +1,14 @@ +syntax = "proto2"; + +package grpc.testing; + +import "proto2.proto"; + +extend ToBeExtended { + optional string frob = 23; + optional AnotherExtension nitz = 29; +} + +message AnotherExtension { + optional int32 whatchamacallit = 1; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go index 607dfd3b9..27d71fc89 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go @@ -25,7 +25,7 @@ type SearchResponse struct { func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } +func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } func (m *SearchResponse) GetResults() []*SearchResponse_Result { if m != nil { @@ -43,7 +43,28 @@ type SearchResponse_Result struct { func (m *SearchResponse_Result) Reset() { *m = SearchResponse_Result{} } func (m *SearchResponse_Result) String() string { return proto.CompactTextString(m) } func (*SearchResponse_Result) ProtoMessage() {} -func (*SearchResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } +func (*SearchResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +func (m *SearchResponse_Result) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *SearchResponse_Result) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *SearchResponse_Result) GetSnippets() []string { + if m != nil { + return m.Snippets + } + return nil +} type SearchRequest struct { Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` @@ -52,7 +73,14 @@ type SearchRequest struct { func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } +func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *SearchRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} func init() { proto.RegisterType((*SearchResponse)(nil), "grpc.testing.SearchResponse") @@ -198,23 +226,23 @@ var _SearchService_serviceDesc = grpc.ServiceDesc{ Metadata: "test.proto", } -func init() { proto.RegisterFile("test.proto", fileDescriptor2) } - -var fileDescriptor2 = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, - 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x03, 0x09, 0x64, - 0xe6, 0xa5, 0x2b, 0xcd, 0x65, 0xe4, 0xe2, 0x0b, 0x4e, 0x4d, 0x2c, 0x4a, 0xce, 0x08, 0x4a, 0x2d, - 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0xb2, 0xe5, 0x62, 0x2f, 0x4a, 0x2d, 0x2e, 0xcd, 0x29, 0x29, - 0x96, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd6, 0x43, 0xd6, 0xa2, 0x87, 0xaa, 0x5c, 0x2f, - 0x08, 0xac, 0x36, 0x08, 0xa6, 0x47, 0xca, 0x87, 0x8b, 0x0d, 0x22, 0x24, 0x24, 0xc0, 0xc5, 0x5c, - 0x5a, 0x94, 0x03, 0x34, 0x84, 0x51, 0x83, 0x33, 0x08, 0xc4, 0x14, 0x12, 0xe1, 0x62, 0x2d, 0xc9, - 0x2c, 0xc9, 0x49, 0x95, 0x60, 0x02, 0x8b, 0x41, 0x38, 0x42, 0x52, 0x5c, 0x1c, 0xc5, 0x79, 0x99, - 0x05, 0x05, 0xa9, 0x40, 0x1b, 0x99, 0x81, 0x36, 0x72, 0x06, 0xc1, 0xf9, 0x4a, 0xaa, 0x5c, 0xbc, - 0x30, 0xfb, 0x0a, 0x4b, 0x81, 0x0e, 0x00, 0x19, 0x01, 0x64, 0x14, 0x55, 0x42, 0x8d, 0x85, 0x70, - 0x8c, 0x96, 0x31, 0xc2, 0xd4, 0x05, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x0a, 0x39, 0x73, 0xb1, - 0x41, 0x04, 0x84, 0xa4, 0xb1, 0x3b, 0x1f, 0x6c, 0x9c, 0x94, 0x0c, 0x3e, 0xbf, 0x09, 0x05, 0x70, - 0xf1, 0x07, 0x97, 0x14, 0xa5, 0x26, 0xe6, 0x02, 0xe5, 0x28, 0x36, 0x4d, 0x83, 0xd1, 0x80, 0x31, - 0x89, 0x0d, 0x1c, 0x09, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0xd6, 0x09, 0xb8, 0x92, - 0x01, 0x00, 0x00, +func init() { proto.RegisterFile("test.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 231 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x91, 0xbd, 0x4a, 0xc5, 0x40, + 0x10, 0x85, 0x59, 0x83, 0xd1, 0x3b, 0xfe, 0x32, 0x58, 0x84, 0x68, 0x11, 0xae, 0x08, 0xa9, 0x16, + 0xb9, 0xd6, 0x56, 0xb6, 0x16, 0xb2, 0x79, 0x82, 0x6b, 0x18, 0xe2, 0x42, 0x4c, 0x36, 0x33, 0x13, + 0xc1, 0x87, 0xb1, 0xf5, 0x39, 0x25, 0x59, 0x23, 0x0a, 0x62, 0x63, 0xb7, 0xe7, 0xe3, 0xcc, 0xb7, + 0xbb, 0x0c, 0x80, 0x92, 0xa8, 0x0d, 0xdc, 0x6b, 0x8f, 0x87, 0x0d, 0x87, 0xda, 0x4e, 0xc0, 0x77, + 0xcd, 0xfa, 0xcd, 0xc0, 0x71, 0x45, 0x5b, 0xae, 0x9f, 0x1c, 0x49, 0xe8, 0x3b, 0x21, 0xbc, 0x85, + 0x3d, 0x26, 0x19, 0x5b, 0x95, 0xcc, 0x14, 0x49, 0x79, 0xb0, 0xb9, 0xb4, 0xdf, 0x47, 0xec, 0xcf, + 0xba, 0x75, 0x73, 0xd7, 0x2d, 0x33, 0xf9, 0x3d, 0xa4, 0x11, 0xe1, 0x29, 0x24, 0x23, 0xb7, 0x99, + 0x29, 0x4c, 0xb9, 0x72, 0xd3, 0x11, 0xcf, 0x60, 0x57, 0xbd, 0xb6, 0x94, 0xed, 0xcc, 0x2c, 0x06, + 0xcc, 0x61, 0x5f, 0x3a, 0x1f, 0x02, 0xa9, 0x64, 0x49, 0x91, 0x94, 0x2b, 0xf7, 0x95, 0xd7, 0x57, + 0x70, 0xb4, 0xdc, 0x37, 0x8c, 0x24, 0x3a, 0x29, 0x86, 0x91, 0xf8, 0xf5, 0x53, 0x1b, 0xc3, 0xe6, + 0xdd, 0x2c, 0xbd, 0x8a, 0xf8, 0xc5, 0xd7, 0x84, 0x77, 0x90, 0x46, 0x80, 0xe7, 0xbf, 0x3f, 0x7f, + 0xd6, 0xe5, 0x17, 0x7f, 0xfd, 0x0d, 0x1f, 0xe0, 0xa4, 0x52, 0xa6, 0xed, 0xb3, 0xef, 0x9a, 0x7f, + 0xdb, 0x4a, 0x73, 0x6d, 0x1e, 0xd3, 0x79, 0x09, 0x37, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x20, + 0xd6, 0x09, 0xb8, 0x92, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index d26eac3e5..f28304d1b 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -156,9 +156,7 @@ func (s *serverReflectionServer) fileDescContainingExtension(st reflect.Type, ex return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) } - extT := reflect.TypeOf(extDesc.ExtensionType).Elem() - - return s.fileDescForType(extT) + return s.decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) } func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection_test.go b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go index 1759e66ae..daef8ff69 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection_test.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go @@ -51,13 +51,15 @@ import ( var ( s = &serverReflectionServer{} // fileDescriptor of each test proto file. - fdTest *dpb.FileDescriptorProto - fdProto2 *dpb.FileDescriptorProto - fdProto2Ext *dpb.FileDescriptorProto + fdTest *dpb.FileDescriptorProto + fdProto2 *dpb.FileDescriptorProto + fdProto2Ext *dpb.FileDescriptorProto + fdProto2Ext2 *dpb.FileDescriptorProto // fileDescriptor marshalled. - fdTestByte []byte - fdProto2Byte []byte - fdProto2ExtByte []byte + fdTestByte []byte + fdProto2Byte []byte + fdProto2ExtByte []byte + fdProto2Ext2Byte []byte ) func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { @@ -80,6 +82,7 @@ func init() { fdTest, fdTestByte = loadFileDesc("test.proto") fdProto2, fdProto2Byte = loadFileDesc("proto2.proto") fdProto2Ext, fdProto2ExtByte = loadFileDesc("proto2_ext.proto") + fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("proto2_ext2.proto") } func TestFileDescForType(t *testing.T) { @@ -88,7 +91,7 @@ func TestFileDescForType(t *testing.T) { wantFd *dpb.FileDescriptorProto }{ {reflect.TypeOf(pb.SearchResponse_Result{}), fdTest}, - {reflect.TypeOf(pb.ToBeExtened{}), fdProto2}, + {reflect.TypeOf(pb.ToBeExtended{}), fdProto2}, } { fd, err := s.fileDescForType(test.st) if err != nil || !reflect.DeepEqual(fd, test.wantFd) { @@ -128,7 +131,11 @@ func TestFileDescContainingExtension(t *testing.T) { extNum int32 want *dpb.FileDescriptorProto }{ - {reflect.TypeOf(pb.ToBeExtened{}), 17, fdProto2Ext}, + {reflect.TypeOf(pb.ToBeExtended{}), 13, fdProto2Ext}, + {reflect.TypeOf(pb.ToBeExtended{}), 17, fdProto2Ext}, + {reflect.TypeOf(pb.ToBeExtended{}), 19, fdProto2Ext}, + {reflect.TypeOf(pb.ToBeExtended{}), 23, fdProto2Ext2}, + {reflect.TypeOf(pb.ToBeExtended{}), 29, fdProto2Ext2}, } { fd, err := s.fileDescContainingExtension(test.st, test.extNum) if err != nil || !reflect.DeepEqual(fd, test.want) { @@ -149,7 +156,7 @@ func TestAllExtensionNumbersForType(t *testing.T) { st reflect.Type want []int32 }{ - {reflect.TypeOf(pb.ToBeExtened{}), []int32{13, 17}}, + {reflect.TypeOf(pb.ToBeExtended{}), []int32{13, 17, 19, 23, 29}}, } { r, err := s.allExtensionNumbersForType(test.st) sort.Sort(intArray(r)) @@ -278,7 +285,7 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe {"grpc.testing.SearchService.Search", fdTestByte}, {"grpc.testing.SearchService.StreamingSearch", fdTestByte}, {"grpc.testing.SearchResponse", fdTestByte}, - {"grpc.testing.ToBeExtened", fdProto2Byte}, + {"grpc.testing.ToBeExtended", fdProto2Byte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ @@ -309,7 +316,7 @@ func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_Ser "grpc.testing.SerchService", "grpc.testing.SearchService.SearchE", "grpc.tesing.SearchResponse", - "gpc.testing.ToBeExtened", + "gpc.testing.ToBeExtended", } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ @@ -338,7 +345,11 @@ func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_Serve extNum int32 want []byte }{ - {"grpc.testing.ToBeExtened", 17, fdProto2ExtByte}, + {"grpc.testing.ToBeExtended", 13, fdProto2ExtByte}, + {"grpc.testing.ToBeExtended", 17, fdProto2ExtByte}, + {"grpc.testing.ToBeExtended", 19, fdProto2ExtByte}, + {"grpc.testing.ToBeExtended", 23, fdProto2Ext2Byte}, + {"grpc.testing.ToBeExtended", 29, fdProto2Ext2Byte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ @@ -372,8 +383,8 @@ func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ typeName string extNum int32 }{ - {"grpc.testing.ToBExtened", 17}, - {"grpc.testing.ToBeExtened", 15}, + {"grpc.testing.ToBExtended", 17}, + {"grpc.testing.ToBeExtended", 15}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ @@ -404,7 +415,7 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser typeName string want []int32 }{ - {"grpc.testing.ToBeExtened", []int32{13, 17}}, + {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ @@ -435,7 +446,7 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ - "grpc.testing.ToBeExtenedE", + "grpc.testing.ToBeExtendedE", } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fa69d58cb..73c3a9665 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -48,6 +48,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -140,6 +141,7 @@ type callInfo struct { failFast bool headerMD metadata.MD trailerMD metadata.MD + peer *peer.Peer traceInfo traceInfo // in trace.go } @@ -183,12 +185,20 @@ func Trailer(md *metadata.MD) CallOption { }) } +// Peer returns a CallOption that retrieves peer information for a +// unary RPC. +func Peer(peer *peer.Peer) CallOption { + return afterCall(func(c *callInfo) { + *peer = *c.peer + }) +} + // FailFast configures the action to take when an RPC is attempted on broken // connections or unreachable servers. If failfast is true, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will retry // the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md +// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true. func FailFast(failFast bool) CallOption { return beforeCall(func(c *callInfo) error { c.failFast = failFast @@ -367,7 +377,7 @@ type rpcError struct { } func (e *rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) + return fmt.Sprintf("rpc error: code = %s desc = %s", e.code, e.desc) } // Code returns the error code for err if it was produced by the rpc system. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 985226d60..157f35eea 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -116,6 +116,7 @@ type options struct { statsHandler stats.Handler maxConcurrentStreams uint32 useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc } var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit @@ -208,6 +209,24 @@ func StatsHandler(h stats.Handler) ServerOption { } } +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation passes through interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return func(o *options) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -815,15 +834,19 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } var appErr error + var server interface{} + if srv != nil { + server = srv.server + } if s.opts.streamInt == nil { - appErr = sd.Handler(srv.server, ss) + appErr = sd.Handler(server, ss) } else { info := &StreamServerInfo{ FullMethod: stream.Method(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) + appErr = s.opts.streamInt(server, ss, info, sd.Handler) } if appErr != nil { if err, ok := appErr.(*rpcError); ok { @@ -883,6 +906,10 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str method := sm[pos+1:] srv, ok := s.m[service] if !ok { + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) trInfo.tr.SetError() @@ -913,6 +940,10 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) trInfo.tr.SetError() } + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } errDesc := fmt.Sprintf("unknown method %v", method) if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil { if trInfo != nil { diff --git a/vendor/google.golang.org/grpc/server_test.go b/vendor/google.golang.org/grpc/server_test.go index 23838806d..53968cc27 100644 --- a/vendor/google.golang.org/grpc/server_test.go +++ b/vendor/google.golang.org/grpc/server_test.go @@ -60,7 +60,7 @@ func TestStopBeforeServe(t *testing.T) { // server.Serve is responsible for closing the listener, even if the // server was already stopped. err = lis.Close() - if got, want := ErrorDesc(err), "use of closed network connection"; !strings.Contains(got, want) { + if got, want := ErrorDesc(err), "use of closed"; !strings.Contains(got, want) { t.Errorf("Close() error = %q, want %q", got, want) } } diff --git a/vendor/google.golang.org/grpc/test/end2end_test.go b/vendor/google.golang.org/grpc/test/end2end_test.go index 6fe45f211..d743623f9 100644 --- a/vendor/google.golang.org/grpc/test/end2end_test.go +++ b/vendor/google.golang.org/grpc/test/end2end_test.go @@ -428,6 +428,7 @@ type test struct { streamClientInt grpc.StreamClientInterceptor unaryServerInt grpc.UnaryServerInterceptor streamServerInt grpc.StreamServerInterceptor + unknownHandler grpc.StreamHandler sc <-chan grpc.ServiceConfig // srv and srvAddr are set once startServer is called. @@ -493,10 +494,13 @@ func (te *test) startServer(ts testpb.TestServiceServer) { if te.streamServerInt != nil { sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt)) } + if te.unknownHandler != nil { + sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler)) + } la := "localhost:0" switch te.e.network { case "unix": - la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now()) + la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano()) syscall.Unlink(la) } lis, err := net.Listen(te.e.network, la) @@ -1234,6 +1238,33 @@ func testHealthCheckOff(t *testing.T, e env) { } } +func TestUnknownHandler(t *testing.T) { + defer leakCheck(t)() + // An example unknownHandler that returns a different code and a different method, making sure that we do not + // expose what methods are implemented to a client that is not authenticated. + unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { + return grpc.Errorf(codes.Unauthenticated, "user unauthenticated") + } + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testUnknownHandler(t, e, unknownHandler) + } +} + +func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) { + te := newTest(t, e) + te.unknownHandler = unknownHandler + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + want := grpc.Errorf(codes.Unauthenticated, "user unauthenticated") + if _, err := healthCheck(1*time.Second, te.clientConn(), ""); !equalErrors(err, want) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, %v", err, want) + } +} + func TestHealthCheckServingStatus(t *testing.T) { defer leakCheck(t)() for _, e := range listTestEnv() { @@ -1440,6 +1471,43 @@ func testExceedMsgLimit(t *testing.T, e env) { } } +func TestPeerClientSide(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testPeerClientSide(t, e) + } +} + +func testPeerClientSide(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + peer := new(peer.Peer) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(peer), grpc.FailFast(false)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + pa := peer.Addr.String() + if e.network == "unix" { + if pa != te.srvAddr { + t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr) + } + return + } + _, pp, err := net.SplitHostPort(pa) + if err != nil { + t.Fatalf("Failed to parse address from peer.") + } + _, sp, err := net.SplitHostPort(te.srvAddr) + if err != nil { + t.Fatalf("Failed to parse address of test server.") + } + if pp != sp { + t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp) + } +} + func TestMetadataUnaryRPC(t *testing.T) { defer leakCheck(t)() for _, e := range listTestEnv() { @@ -2634,6 +2702,48 @@ func testExceedMaxStreamsLimit(t *testing.T, e env) { } } +const defaultMaxStreamsClient = 100 + +func TestExceedDefaultMaxStreamsLimit(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testExceedDefaultMaxStreamsLimit(t, e) + } +} + +func testExceedDefaultMaxStreamsLimit(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "http2Client.notifyError got notified that the client transport was broken", + "Conn.resetTransport failed to create client transport", + "grpc: the connection is closing", + ) + // When masStream is set to 0 the server doesn't send a settings frame for + // MaxConcurrentStreams, essentially allowing infinite (math.MaxInt32) streams. + // In such a case, there should be a default cap on the client-side. + te.maxStream = 0 + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + // Create as many streams as a client can. + for i := 0; i < defaultMaxStreamsClient; i++ { + if _, err := tc.StreamingInputCall(te.ctx); err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + } + + // Trying to create one more should timeout. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _, err := tc.StreamingInputCall(ctx) + if err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded) + } +} + func TestStreamsQuotaRecovery(t *testing.T) { defer leakCheck(t)() for _, e := range listTestEnv() { diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index 2586cba46..33de7b60b 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -44,8 +44,9 @@ const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection + initialWindowSize = defaultWindowSize // for an RPC + initialConnWindowSize = defaultWindowSize * 16 // for a connection + defaultMaxStreamsClient = 100 ) // The following defines various control items which could flow through diff --git a/vendor/google.golang.org/grpc/transport/handler_server_test.go b/vendor/google.golang.org/grpc/transport/handler_server_test.go index 9843d36b6..44adf2eed 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server_test.go +++ b/vendor/google.golang.org/grpc/transport/handler_server_test.go @@ -188,7 +188,7 @@ func TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { }, RequestURI: "/service/foo.bar", }, - wantErr: `stream error: code = 13 desc = "malformed time-out: transport: timeout unit is not recognized: \"tomorrow\""`, + wantErr: `stream error: code = Internal desc = "malformed time-out: transport: timeout unit is not recognized: \"tomorrow\""`, }, { name: "with metadata", diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 892f8ba67..001522bdb 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -208,7 +208,8 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( state: reachable, activeStreams: make(map[uint32]*Stream), creds: opts.PerRPCCredentials, - maxStreams: math.MaxInt32, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), streamSendQuota: defaultWindowSize, statsHandler: opts.StatsHandler, } @@ -337,21 +338,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.mu.Unlock() return nil, ErrConnClosing } - checkStreamsQuota := t.streamsQuota != nil t.mu.Unlock() - if checkStreamsQuota { - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) - if err != nil { - return nil, err - } - // Returns the quota balance back. - if sq > 1 { - t.streamsQuota.add(sq - 1) - } + sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) } if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { // Return the quota back now because there is no stream returned to the caller. - if _, ok := err.(StreamError); ok && checkStreamsQuota { + if _, ok := err.(StreamError); ok { t.streamsQuota.add(1) } return nil, err @@ -359,9 +357,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.mu.Lock() if t.state == draining { t.mu.Unlock() - if checkStreamsQuota { - t.streamsQuota.add(1) - } + t.streamsQuota.add(1) // Need to make t writable again so that the rpc in flight can still proceed. t.writableChan <- 0 return nil, ErrStreamDrain @@ -374,16 +370,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea s.clientStatsCtx = userCtx t.activeStreams[s.id] = s - // This stream is not counted when applySetings(...) initialize t.streamsQuota. - // Reset t.streamsQuota to the right value. - var reset bool - if !checkStreamsQuota && t.streamsQuota != nil { - reset = true - } t.mu.Unlock() - if reset { - t.streamsQuota.add(-1) - } // HPACK encodes various headers. Note that once WriteField(...) is // called, the corresponding headers/continuation frame has to be sent @@ -491,15 +478,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { - var updateStreams bool t.mu.Lock() if t.activeStreams == nil { t.mu.Unlock() return } - if t.streamsQuota != nil { - updateStreams = true - } delete(t.activeStreams, s.id) if t.state == draining && len(t.activeStreams) == 0 { // The transport is draining and s is the last live stream on t. @@ -508,10 +491,25 @@ func (t *http2Client) CloseStream(s *Stream, err error) { return } t.mu.Unlock() - if updateStreams { - t.streamsQuota.add(1) - } + // rstStream is true in case the stream is being closed at the client-side + // and the server needs to be intimated about it by sending a RST_STREAM + // frame. + // To make sure this frame is written to the wire before the headers of the + // next stream waiting for streamsQuota, we add to streamsQuota pool only + // after having acquired the writableChan to send RST_STREAM out (look at + // the controller() routine). + var rstStream bool + defer func() { + // In case, the client doesn't have to send RST_STREAM to server + // we can safely add back to streamsQuota pool now. + if !rstStream { + t.streamsQuota.add(1) + return + } + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) + }() s.mu.Lock() + rstStream = s.rstStream if q := s.fc.resetPendingData(); q > 0 { if n := t.fc.onRead(q); n > 0 { t.controlBuf.put(&windowUpdate{0, n}) @@ -528,7 +526,7 @@ func (t *http2Client) CloseStream(s *Stream, err error) { s.state = streamDone s.mu.Unlock() if se, ok := err.(StreamError); ok && se.Code != codes.DeadlineExceeded { - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) + rstStream = true } } @@ -769,10 +767,10 @@ func (t *http2Client) handleData(f *http2.DataFrame) { s.state = streamDone s.statusCode = codes.Internal s.statusDesc = err.Error() + s.rstStream = true close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } s.mu.Unlock() @@ -1043,16 +1041,10 @@ func (t *http2Client) applySettings(ss []http2.Setting) { s.Val = math.MaxInt32 } t.mu.Lock() - reset := t.streamsQuota != nil - if !reset { - t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) - } ms := t.maxStreams t.maxStreams = int(s.Val) t.mu.Unlock() - if reset { - t.streamsQuota.add(int(s.Val) - ms) - } + t.streamsQuota.add(int(s.Val) - ms) case http2.SettingInitialWindowSize: t.mu.Lock() for _, stream := range t.activeStreams { @@ -1085,6 +1077,12 @@ func (t *http2Client) controller() { t.framer.writeSettings(true, i.ss...) } case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + t.streamsQuota.add(1) t.framer.writeRSTStream(true, i.streamID, i.code) case *flushIO: t.framer.flushWrite() diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index d46599182..aed75d593 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -213,6 +213,9 @@ type Stream struct { // the status received from the server. statusCode codes.Code statusDesc string + // rstStream indicates whether a RST_STREAM frame needs to be sent + // to the server to signify that this stream is closing. + rstStream bool } // RecvCompress returns the compression algorithm applied to the inbound @@ -374,6 +377,9 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string + // Authority is the :authority pseudo-header to use. This field has no effect if + // TransportCredentials is set. + Authority string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. @@ -565,7 +571,7 @@ type StreamError struct { } func (e StreamError) Error() string { - return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) + return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } // ContextErr converts the error from context package into a StreamError. diff --git a/vendor/google.golang.org/grpc/transport/transport_test.go b/vendor/google.golang.org/grpc/transport/transport_test.go index 1ca6eb1a6..e91fc6ed9 100644 --- a/vendor/google.golang.org/grpc/transport/transport_test.go +++ b/vendor/google.golang.org/grpc/transport/transport_test.go @@ -507,7 +507,10 @@ func TestMaxStreams(t *testing.T) { case <-cc.streamsQuota.acquire(): t.Fatalf("streamsQuota.acquire() becomes readable mistakenly.") default: - if cc.streamsQuota.quota != 0 { + cc.streamsQuota.mu.Lock() + quota := cc.streamsQuota.quota + cc.streamsQuota.mu.Unlock() + if quota != 0 { t.Fatalf("streamsQuota.quota got non-zero quota mistakenly.") } }